text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest from tempfile import TemporaryDirectory import numpy as np import pytest import requests from transformers.models.auto.processing_auto import processor_class_from_name from transformers.testing_utils import ( get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, require_vision, ) from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, CLIPImageProcessor, Kosmos2Processor, PreTrainedTokenizerFast, XLMRobertaTokenizer, XLMRobertaTokenizerFast, ) SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers @require_vision class Kosmos2ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Kosmos2Processor def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = CLIPImageProcessor(do_center_crop=False) # We have a SentencePiece fixture for testing slow_tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB) fast_tokenizer = XLMRobertaTokenizerFast(__slow_tokenizer=slow_tokenizer) processor = Kosmos2Processor(image_processor, fast_tokenizer) processor.save_pretrained(self.tmpdirname) # We override this method to take the fast tokenizer or image processor by default def get_component(self, attribute, **kwargs): assert attribute in self.processor_class.attributes component_class_name = getattr(self.processor_class, f"{attribute}_class") if isinstance(component_class_name, tuple): component_class_name = component_class_name[-1] component_class = processor_class_from_name(component_class_name) component = component_class.from_pretrained(self.tmpdirname, **kwargs) # noqa if attribute == "tokenizer" and not component.pad_token: component.pad_token = "[TEST_PAD]" return component def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def test_image_procesor_load_save_reload(self): # make sure load from Hub repo. -> save -> reload locally work image_processor = CLIPImageProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") with TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) reloaded_image_processor = CLIPImageProcessor.from_pretrained(tmp_dir) assert image_processor.to_dict() == reloaded_image_processor.to_dict() assert image_processor.to_json_string() == reloaded_image_processor.to_json_string() def test_save_load_pretrained_additional_features(self): processor = Kosmos2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Kosmos2Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_processor = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_processor.keys(): self.assertAlmostEqual(input_image_processor[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" encoded_processor = processor(text=input_str, add_eos_token=True) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() # both image and text inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) # only text inputs = processor(text=input_str) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) # only image inputs = processor(images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values"]) @require_torch def test_full_processor(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/two_dogs.jpg" processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224") # test with different input formats. # fmt: off texts = [ # no phrase "<grounding> Two puppies sit in a field of grass.", # 1 phrase "<grounding> <phrase> Two puppies </phrase> sit in a field of grass.", # 2 phrases "<grounding> <phrase> Two puppies </phrase> sit in a field of <phrase> grass </phrase>.", # 2 phrases: bboxes already specified for the 1st phrase "<grounding> <phrase> Two puppies </phrase> <object> <patch_index_0079> <patch_index_1016> </delimiter_of_multi_objects/> <patch_index_0135> <patch_index_1008> </object> sit in a field of <phrase> grass </phrase>.", ] # fmt: on image = Image.open(requests.get(url, stream=True).raw) # To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed image_path = os.path.join(self.tmpdirname, "image.jpg") image.save(image_path) image = Image.open(image_path) # fmt: off bboxes = [ [None, []], [[None], [[]], [(79, 1016)], [[(79, 1016)]], [[(79, 1016), (135, 1008)]]], [[[(79, 1016), (135, 1008)], None], [[(79, 1016), (135, 1008)], []], [[(79, 1016), (135, 1008)], (480, 1023)], [[(79, 1016), (135, 1008)], [(480, 1023)]]], [[None, [(480, 1023)]]], ] # fmt: on batch_image = [image] * 4 batch_text = [texts[0], texts[1], texts[1], texts[2]] batch_bboxes = [ None, # no phrase [[]], # 1 phrase: no bbox [(79, 1016)], # 1 phrase: 1 bbox [[(79, 1016), (135, 1008)], (480, 1023)], # 2 phrase: 2 bboxes + 1 bbox ] # fmt: off expected_input_ids = [ [0, 64012, 1264, 17772, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 106, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 64009, 64493, 65036, 64010, 106, 4, 2], ] # fmt: on EXPECTED_PIXEL_VALUES_1 = np.array( [ [ [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6243883967399597, -0.6243883967399597, -0.5951915383338928], ], [ [-0.20629698038101196, -0.19128920137882233, -0.19128920137882233], [-0.20629698038101196, -0.19128920137882233, -0.17628143727779388], [-0.2213047444820404, -0.20629698038101196, -0.16127367317676544], ], [ [-0.5843556523323059, -0.5701355338096619, -0.5701355338096619], [-0.5843556523323059, -0.5701355338096619, -0.5559154152870178], [-0.5843556523323059, -0.5559154152870178, -0.5416953563690186], ], ] ) EXPECTED_PIXEL_VALUES_2 = np.array( [ [ [-0.4346088469028473, -0.47840413451194763, -0.7849710583686829], [-0.5221993923187256, -0.5076009631156921, -0.755774199962616], [-0.5221993923187256, -0.5076009631156921, -0.7411757707595825], ], [ [-0.2813358008861542, -0.2963435649871826, -0.431413471698761], [-0.26632803678512573, -0.2963435649871826, -0.4764367938041687], [-0.2213047444820404, -0.2813358008861542, -0.49144455790519714], ], [ [-0.5701355338096619, -0.641235888004303, -0.7549964189529419], [-0.5843556523323059, -0.641235888004303, -0.7834365367889404], [-0.5559154152870178, -0.641235888004303, -0.7834365367889404], ], ] ) def check(texts, bboxes, expected_input_ids): outputs = processor(images=None, text=texts, bboxes=bboxes, add_eos_token=True) self.assertListEqual(outputs.input_ids, expected_input_ids) # no phrase check(texts[0], bboxes[0][0], expected_input_ids[0]) # no phrase check(texts[0], bboxes[0][1], expected_input_ids[0]) # 1 phrase: no bbox check(texts[1], bboxes[1][0], expected_input_ids[1]) # 1 phrase: no bbox check(texts[1], bboxes[1][1], expected_input_ids[1]) # 1 phrase: 1 bbox check(texts[1], bboxes[1][2], expected_input_ids[2]) # 1 phrase: 1 bbox check(texts[1], bboxes[1][3], expected_input_ids[2]) # 1 phrase: 2 bboxes check(texts[1], bboxes[1][4], expected_input_ids[3]) # could not contain `[None]` with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[1], bboxes=[[None]]) # 2 phrase: 2 bboxes + no bbox check(texts[2], bboxes[2][0], expected_input_ids[4]) # 2 phrase: 2 bboxes + no bbox check(texts[2], bboxes[2][1], expected_input_ids[4]) # 2 phrase: 2 bboxes + 1 bbox check(texts[2], bboxes[2][2], expected_input_ids[5]) # 2 phrase: 2 bboxes + 1 bbox check(texts[2], bboxes[2][3], expected_input_ids[5]) # 2 phrase: no box (as already specified in the text) + 1 bbox check(texts[3], bboxes[3][0], expected_input_ids[5]) # could not contain `[None]` with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[2], bboxes=[[(79, 1016), (135, 1008)], [None]]) # test batch outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, add_eos_token=True, ) self.assertListEqual( outputs.input_ids, [expected_input_ids[0], expected_input_ids[1], expected_input_ids[2], expected_input_ids[5]], ) # test batch with padding (without `return_tensors`) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, padding=True, add_eos_token=True, ) # padding on the right self.assertListEqual( outputs.input_ids[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) # no padding for the longest sequence self.assertListEqual(outputs.input_ids[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask[-1], [1] * len(expected_input_ids[5])) # test batch with padding (with `return_tensors`) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) # padding on the right self.assertListEqual( outputs.input_ids.numpy().tolist()[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask.numpy().tolist()[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) # no padding for the longest sequence self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], [1] * len(expected_input_ids[5])) # test with image num_image_tokens = 64 outputs = processor(images=image, text=texts[0], bboxes=None, add_eos_token=True) self.assertTupleEqual(outputs.pixel_values[0].shape, (3, 224, 224)) self.assertListEqual( outputs.input_ids, [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], ) self.assertListEqual( outputs.image_embeds_position_mask, [0] * 2 + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[0]) - 1), ) np.testing.assert_allclose(outputs.pixel_values[0][:3, :3, :3], EXPECTED_PIXEL_VALUES_1, atol=1e-9) np.testing.assert_allclose(outputs.pixel_values[0][:3, -3:, -3:], EXPECTED_PIXEL_VALUES_2, atol=1e-9) # test with image in batch (right padding) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) self.assertTupleEqual(outputs.pixel_values.shape, (4, 3, 224, 224)) np.testing.assert_allclose( outputs.pixel_values[:, :3, :3, :3].numpy(), [EXPECTED_PIXEL_VALUES_1] * len(batch_image), atol=1e-9 ) np.testing.assert_allclose( outputs.pixel_values[:, :3, -3:, -3:].numpy(), [EXPECTED_PIXEL_VALUES_2] * len(batch_image), atol=1e-9 ) # padding on the right: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa # fmt: off EXPECTED_IDS_BATCH_RIGHT_PADDING = [ [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH_RIGHT_PADDING = [ [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] # fmt: on self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH_RIGHT_PADDING[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH_RIGHT_PADDING[-1]) self.assertListEqual( outputs.image_embeds_position_mask.numpy().tolist(), [[0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1)] * len(batch_image), ) processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") # test with image in batch (left padding) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) # padding on the left: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa # fmt: off EXPECTED_IDS_BATCH = [ [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH =[ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] EXPECTED_IMG_POS_MASK_BATCH = [ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 0] + [1] * num_image_tokens + [0] + [0] * len(expected_input_ids[0][1:]), [0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1), ] # fmt: on self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH[0]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[0], EXPECTED_IMG_POS_MASK_BATCH[0]) # no padding for the longest sequence self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH[-1]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[-1], EXPECTED_IMG_POS_MASK_BATCH[-1]) # Rewrite as Kosmos-2 supports custom padding only when image is None. @require_vision @require_torch def test_kwargs_overrides_default_tokenizer_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=117) processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() # set image input to None image_input = None inputs = processor( text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length", ) self.assertEqual(len(inputs["input_ids"][0]), 112) # Rewrite to test only image_processor kwargs @require_torch @require_vision def test_structured_kwargs_nested(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"size": {"height": 214, "width": 214}}, } inputs = processor(text=input_str, images=image_input, **all_kwargs) self.skip_processor_without_typed_kwargs(processor) self.assertEqual(inputs["pixel_values"].shape[2], 214) # Rewrite to test only image_processor kwargs @require_torch @require_vision def test_structured_kwargs_nested_from_dict(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"size": {"height": 214, "width": 214}}, } inputs = processor(text=input_str, images=image_input, **all_kwargs) self.assertEqual(inputs["pixel_values"].shape[2], 214) # Rewrite as Kosmos-2 supports custom padding only when image is None. @require_vision @require_torch def test_tokenizer_defaults_preserved_by_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() # set image input to None image_input = None inputs = processor(text=input_str, images=image_input, return_tensors="pt") self.assertEqual(len(inputs["input_ids"][0]), 117) # Rewrite as Kosmos-2 supports custom padding only when image is None. @require_torch @require_vision def test_unstructured_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() # set image input to None image_input = None inputs = processor( text=input_str, images=image_input, return_tensors="pt", padding="max_length", max_length=76, ) self.assertEqual(len(inputs["input_ids"][0]), 76) # Rewrite as Kosmos-2 supports custom padding only when image is None. @require_torch @require_vision def test_unstructured_kwargs_batched(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs(batch_size=2) # set image input to None image_input = None inputs = processor( text=input_str, images=image_input, return_tensors="pt", size={"height": 214, "width": 214}, padding="longest", max_length=76, ) self.assertEqual(len(inputs["input_ids"][0]), 10)
transformers/tests/models/kosmos2/test_processor_kosmos2.py/0
{ "file_path": "transformers/tests/models/kosmos2/test_processor_kosmos2.py", "repo_id": "transformers", "token_count": 12419 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import LongformerConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, ) from transformers.models.longformer.modeling_longformer import LongformerSelfAttention class LongformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, attention_window=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.attention_window = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window + 1` locations # (assuming no token with global attention, otherwise the last dimension of attentions # is x + self.attention_window + 1, where x is the number of tokens with global attention) self.key_length = self.attention_window + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return LongformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, attention_window=self.attention_window, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_attention_mask_determinism( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] output_without_mask = model(input_ids)["last_hidden_state"] self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4)) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_global_attention_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() global_attention_mask = input_mask.clone() global_attention_mask[:, input_mask.shape[-1] // 2] = 0 global_attention_mask = global_attention_mask.to(torch_device) result = model( input_ids, attention_mask=input_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, ) result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask) result = model(input_ids, global_attention_mask=global_attention_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, global_attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = LongformerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, global_attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs global_attention_mask = torch.zeros_like(input_ids) global_attention_mask[:, -1] = 1 inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "global_attention_mask": global_attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_question_answering(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs # Replace sep_token_id by some random id input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item() # Make sure there are exactly three sep_token_id input_ids[:, -3:] = config.sep_token_id input_mask = torch.ones_like(input_ids) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels @require_torch class LongformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False # pruning is not supported test_torchscript = False all_model_classes = ( ( LongformerModel, LongformerForMaskedLM, LongformerForSequenceClassification, LongformerForQuestionAnswering, LongformerForTokenClassification, LongformerForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LongformerModel, "fill-mask": LongformerForMaskedLM, "question-answering": LongformerForQuestionAnswering, "text-classification": LongformerForSequenceClassification, "token-classification": LongformerForTokenClassification, "zero-shot": LongformerForSequenceClassification, } if is_torch_available() else {} ) # Need to use `0.6` instead of `0.5` for `test_disk_offload` model_split_percents = [0.6, 0.7, 0.9] # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = LongformerModelTester(self) self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_attention_mask_determinism(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs) def test_model_global_attention_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_global_attention_mask(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) @unittest.skip(reason="Longformer cannot keep gradients in attention or hidden states") def test_retain_grad_hidden_states_attentions(self): return @unittest.skip(reason="LongFormer calculates global attn only when attn_mask has non-zero elements") def test_batching_equivalence(self): return @require_torch @require_sentencepiece @require_tokenizers class LongformerModelIntegrationTest(unittest.TestCase): def _get_hidden_states(self): return torch.tensor( [ [ [ 4.98332758e-01, 2.69175139e00, -7.08081422e-03, 1.04915401e00, -1.83476661e00, 7.67220476e-01, 2.98580543e-01, 2.84803992e-02, ], [ -7.58357372e-01, 4.20635998e-01, -4.04739919e-02, 1.59924145e-01, 2.05135748e00, -1.15997978e00, 5.37166397e-01, 2.62873606e-01, ], [ -1.69438001e00, 4.17574660e-01, -1.49196962e00, -1.76483717e00, -1.94566312e-01, -1.71183858e00, 7.72903565e-01, -1.11557056e00, ], [ 5.44028163e-01, 2.05466114e-01, -3.63045868e-01, 2.41865062e-01, 3.20348382e-01, -9.05611176e-01, -1.92690727e-01, -1.19917547e00, ], ] ], dtype=torch.float32, device=torch_device, ) def test_diagonalize(self): hidden_states = self._get_hidden_states() hidden_states = hidden_states.reshape((1, 8, 4)) # set seq length = 8, hidden dim = 4 chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) window_overlap_size = chunked_hidden_states.shape[2] self.assertTrue(window_overlap_size == 4) padded_hidden_states = LongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states) self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1) # first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000] torch.testing.assert_close( padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], rtol=1e-3, atol=1e-3 ) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, 0, 4:], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) # last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629] torch.testing.assert_close( padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], rtol=1e-3, atol=1e-3 ) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, -1, :3], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) def test_pad_and_transpose_last_two_dims(self): hidden_states = self._get_hidden_states() self.assertEqual(hidden_states.shape, (1, 4, 8)) padding = (0, 0, 0, 1) padded_hidden_states = LongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, padding) self.assertEqual(padded_hidden_states.shape, (1, 8, 5)) expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32) torch.testing.assert_close(expected_added_dim, padded_hidden_states[0, -1, :], rtol=1e-6, atol=1e-6) torch.testing.assert_close( hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], rtol=1e-6, atol=1e-6 ) def test_chunk(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) # expected slices across chunk and seq length dim expected_slice_along_seq_length = torch.tensor( [0.4983, -0.7584, -1.6944], device=torch_device, dtype=torch.float32 ) expected_slice_along_chunk = torch.tensor( [0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32 ) torch.testing.assert_close( chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3, atol=1e-3 ) torch.testing.assert_close(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3, atol=1e-3) self.assertEqual(chunked_hidden_states.shape, (1, 3, 4, 4)) def test_mask_invalid_locations(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) hid_states_1 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_1, 1) self.assertTrue(torch.isinf(hid_states_1).sum().item() == 8) hid_states_2 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_2, 2) self.assertTrue(torch.isinf(hid_states_2).sum().item() == 24) hid_states_3 = chunked_hidden_states.clone()[:, :, :, :3] LongformerSelfAttention._mask_invalid_locations(hid_states_3, 2) self.assertTrue(torch.isinf(hid_states_3).sum().item() == 24) hid_states_4 = chunked_hidden_states.clone()[:, :, 2:, :] LongformerSelfAttention._mask_invalid_locations(hid_states_4, 2) self.assertTrue(torch.isinf(hid_states_4).sum().item() == 12) def test_layer_local_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = self._get_hidden_states() batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) attention_mask[:, -2:] = -10000 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (1, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 1], torch.tensor( [0.0019, 0.0122, -0.0171, -0.0256, -0.0300, 0.0173, -0.0115, 0.0048], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_global_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) # create attn mask attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (2, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 2], torch.tensor( [-0.0651, -0.0393, 0.0309, -0.0342, -0.0066, -0.0155, -0.0209, -0.0494], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( output_hidden_states[1, -2], torch.tensor( [-0.0405, -0.0384, 0.0396, -0.0374, -0.0341, 0.0136, 0.0014, -0.0571], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_attn_probs(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) # create attn mask attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states, local_attentions, global_attentions = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=True, ) self.assertEqual(local_attentions.shape, (2, 4, 2, 8)) self.assertEqual(global_attentions.shape, (2, 2, 3, 4)) # All tokens with global attention have weight 0 in local attentions. self.assertTrue(torch.all(local_attentions[0, 2:4, :, :] == 0)) self.assertTrue(torch.all(local_attentions[1, 1:4, :, :] == 0)) # The weight of all tokens with local attention must sum to 1. self.assertTrue(torch.all(torch.abs(global_attentions[0, :, :2, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue(torch.all(torch.abs(global_attentions[1, :, :1, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( local_attentions[0, 0, 0, :], torch.tensor( [0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( local_attentions[1, 0, 0, :], torch.tensor( [0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) # All the global attention weights must sum to 1. self.assertTrue(torch.all(torch.abs(global_attentions.sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( global_attentions[0, 0, 1, :], torch.tensor( [0.2500, 0.2500, 0.2500, 0.2500], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( global_attentions[1, 0, 0, :], torch.tensor( [0.2497, 0.2500, 0.2499, 0.2504], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) @slow def test_inference_no_head(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world!' input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device) torch.testing.assert_close(output[0, 0, -5:], expected_output_slice, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output_without_mask[0, 0, -5:], expected_output_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_no_head_long(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world! ' repeated 1000 times input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) # long input attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask[:, [1, 4, 21]] = 1 # Set global attention on a few random positions output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0] expected_output_sum = torch.tensor(74585.8594, device=torch_device) expected_output_mean = torch.tensor(0.0243, device=torch_device) torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4) @slow def test_inference_masked_lm_long(self): model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world! ' repeated 1000 times input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) # long input input_ids = input_ids.to(torch_device) loss, prediction_scores = model(input_ids, labels=input_ids).to_tuple() expected_loss = torch.tensor(0.0074, device=torch_device) expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device) expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device) torch.testing.assert_close(loss, expected_loss, rtol=1e-4, atol=1e-4) torch.testing.assert_close(prediction_scores.sum(), expected_prediction_scores_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(prediction_scores.mean(), expected_prediction_scores_mean, rtol=1e-4, atol=1e-4)
transformers/tests/models/longformer/test_modeling_longformer.py/0
{ "file_path": "transformers/tests/models/longformer/test_modeling_longformer.py", "repo_id": "transformers", "token_count": 15701 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Mask2Former model.""" import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import Mask2FormerConfig, is_torch_available, is_vision_available from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 from transformers.testing_utils import ( require_timm, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Mask2FormerForUniversalSegmentation, Mask2FormerModel if is_vision_available(): from transformers import Mask2FormerImageProcessor if is_vision_available(): from PIL import Image class Mask2FormerModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_auxiliary_loss=False, num_queries=10, num_channels=3, min_size=32 * 8, max_size=32 * 8, num_labels=4, hidden_dim=64, num_attention_heads=4, num_hidden_layers=2, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_auxiliary_loss = use_auxiliary_loss self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.num_labels = num_labels self.hidden_dim = hidden_dim self.mask_feature_size = hidden_dim self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( torch_device ) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def get_config(self): config = Mask2FormerConfig( hidden_size=self.hidden_dim, num_attention_heads=self.num_attention_heads, num_hidden_layers=self.num_hidden_layers, encoder_feedforward_dim=16, dim_feedforward=32, num_queries=self.num_queries, num_labels=self.num_labels, decoder_layers=2, encoder_layers=2, feature_size=16, ) config.num_queries = self.num_queries config.num_labels = self.num_labels config.backbone_config.embed_dim = 16 config.backbone_config.depths = [1, 1, 1, 1] config.backbone_config.hidden_size = 16 config.backbone_config.num_channels = self.num_channels config.backbone_config.num_heads = [1, 1, 2, 2] config.backbone = None config.hidden_dim = self.hidden_dim config.mask_feature_size = self.hidden_dim config.feature_size = self.hidden_dim return config def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, _, _ = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def check_output_hidden_state(self, output, config): encoder_hidden_states = output.encoder_hidden_states pixel_decoder_hidden_states = output.pixel_decoder_hidden_states transformer_decoder_hidden_states = output.transformer_decoder_hidden_states self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(pixel_decoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers) def create_and_check_mask2former_model(self, config, pixel_values, pixel_mask, output_hidden_states=False): with torch.no_grad(): model = Mask2FormerModel(config=config) model.to(torch_device) model.eval() output = model(pixel_values=pixel_values, pixel_mask=pixel_mask) output = model(pixel_values, output_hidden_states=True) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(output, config) def create_and_check_mask2former_instance_segmentation_head_model( self, config, pixel_values, pixel_mask, mask_labels, class_labels ): model = Mask2FormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() def comm_check_on_output(result): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) comm_check_on_output(result) result = model( pixel_values=pixel_values, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels ) comm_check_on_output(result) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape, torch.Size([])) @require_torch class Mask2FormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Mask2FormerModel, Mask2FormerForUniversalSegmentation) if is_torch_available() else () pipeline_model_mapping = {"image-feature-extraction": Mask2FormerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False def setUp(self): self.model_tester = Mask2FormerModelTester(self) self.config_tester = ConfigTester(self, config_class=Mask2FormerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_mask2former_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=False) def test_mask2former_instance_segmentation_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mask2former_instance_segmentation_head_model(*config_and_inputs) @unittest.skip(reason="Mask2Former does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Mask2Former does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Mask2Former is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Mask2Former does not use token embeddings") def test_resize_tokens_embeddings(self): pass @require_torch_multi_gpu @unittest.skip( reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass @slow def test_model_from_pretrained(self): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: model = Mask2FormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "mask_labels": torch.randn((2, 10, *size), device=torch_device), "class_labels": torch.zeros(2, 10, device=torch_device).long(), } config = self.model_tester.get_config() model = Mask2FormerForUniversalSegmentation(config).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_hidden_states_output(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=True) def test_attention_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) outputs = model(**inputs, output_attentions=True) self.assertTrue(outputs.attentions is not None) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") model_class = self.all_model_classes[1] config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs() model = model_class(config) model.to(torch_device) model.train() loss = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels).loss loss.backward() def test_retain_grad_hidden_states_attentions(self): model_class = self.all_model_classes[1] config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs() config.output_hidden_states = True config.output_attentions = True model = model_class(config).to(torch_device) model.train() outputs = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels) encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() attentions = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) @require_timm def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config.backbone_config = None config.backbone_kwargs = {"out_indices": [1, 2, 3]} config.use_pretrained_backbone = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices config.backbone = "resnet18" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone config.backbone = "microsoft/resnet-18" config.use_timm_backbone = False for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class Mask2FormerModelIntegrationTest(unittest.TestCase): @cached_property def model_checkpoints(self): return "facebook/mask2former-swin-small-coco-instance" @cached_property def default_image_processor(self): return Mask2FormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def test_inference_no_head(self): model = Mask2FormerModel.from_pretrained(self.model_checkpoints).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 384, 384)) with torch.no_grad(): outputs = model(**inputs) expected_slice_hidden_state = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) def test_inference_universal_segmentation_head(self): model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 384, 384)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) expected_slice = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] expected_slice = torch.tensor(expected_slice).to(torch_device) torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1)) expected_slice = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(torch_device) torch.testing.assert_close( outputs.class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE ) @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): model = ( Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) .to(torch_device, dtype=torch.float16) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device, dtype=torch.float16) with torch.no_grad(): _ = model(**inputs) def test_with_segmentation_maps_and_loss(self): model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() image_processor = self.default_image_processor inputs = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))], segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)], return_tensors="pt", ) inputs["pixel_values"] = inputs["pixel_values"].to(torch_device) inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]] inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]] with torch.no_grad(): outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_export(self): if not is_torch_greater_or_equal_than_2_4: self.skipTest(reason="This test requires torch >= 2.4 to run.") model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) exported_program = torch.export.export( model, args=(inputs["pixel_values"], inputs["pixel_mask"]), strict=True, ) with torch.no_grad(): eager_outputs = model(**inputs) exported_outputs = exported_program.module().forward(inputs["pixel_values"], inputs["pixel_mask"]) self.assertEqual(eager_outputs.masks_queries_logits.shape, exported_outputs.masks_queries_logits.shape) torch.testing.assert_close( eager_outputs.masks_queries_logits, exported_outputs.masks_queries_logits, rtol=TOLERANCE, atol=TOLERANCE ) self.assertEqual(eager_outputs.class_queries_logits.shape, exported_outputs.class_queries_logits.shape) torch.testing.assert_close( eager_outputs.class_queries_logits, exported_outputs.class_queries_logits, rtol=TOLERANCE, atol=TOLERANCE )
transformers/tests/models/mask2former/test_modeling_mask2former.py/0
{ "file_path": "transformers/tests/models/mask2former/test_modeling_mask2former.py", "repo_id": "transformers", "token_count": 9102 }
# coding=utf-8 # Copyright 2024 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import binascii import unittest from transformers import MyT5Tokenizer from transformers.utils import is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" def bytes_to_hex(bline: bytes, sep: str = " ") -> str: return str(binascii.hexlify(bline, sep), "utf-8") def str_to_hex(line: str, sep: str = " ") -> str: return bytes_to_hex(bytes(line, "utf-8"), sep) class TestByteRewriter(unittest.TestCase): def setUp(self) -> None: self.tokenizer = MyT5Tokenizer.from_pretrained("Tomlim/myt5-base") def test_simple_decompose(self): decompose_rewriter = self.tokenizer.decompose_rewriter # test rewriting in_str = "Hello WorlD" out_str = "hAello wAorldA" in_hex = str_to_hex(in_str).split(" ") out_hex = str_to_hex(out_str).split(" ") self.assertEqual(decompose_rewriter.rewrite_bytes(in_hex), out_hex) def test_simple_decompose_reversible(self): decompose_rewriter = self.tokenizer.decompose_rewriter in_str = "Hello WorlD" out_str = "Hello WorlD" in_hex = str_to_hex(in_str).split(" ") out_hex = str_to_hex(out_str).split(" ") self.assertEqual( decompose_rewriter.rewrite_bytes(decompose_rewriter.rewrite_bytes(in_hex), reverse=True), out_hex ) def test_simple_decompose_non_latin(self): decompose_rewriter = self.tokenizer.decompose_rewriter in_str = "你好世界 Hello WorlD" out_str = "你好世界 hAello wAorldA" in_hex = str_to_hex(in_str).split(" ") out_hex = str_to_hex(out_str).split(" ") self.assertEqual(decompose_rewriter.rewrite_bytes(in_hex), out_hex) def test_unrecognized_byte(self): decompose_rewriter = self.tokenizer.decompose_rewriter in_hex = ["00", "01", "xx", "03", "61"] out_hex = ["00", "01", "xx", "03", "61"] self.assertEqual(decompose_rewriter.rewrite_bytes(in_hex), out_hex) class MyT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = MyT5Tokenizer test_rust_tokenizer = False def setUp(self): super().setUp() def get_tokenizer(self, **kwargs) -> MyT5Tokenizer: return self.tokenizer_class.from_pretrained("Tomlim/myt5-base", **kwargs) @unittest.skip(reason="inputs cannot be pretokenized as ids depend on whole input string") def test_pretokenized_inputs(self): pass def test_convert_tokens_to_string_format(self): tokenizer = self.get_tokenizer() with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str) def test_simple_tokenize(self): tokenizer = self.get_tokenizer() in_str = "Hello World" out_tokens = ["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90"] self.assertEqual(tokenizer.tokenize(in_str), out_tokens) in_pl_str = "Witaj świecie" out_tokens = ["77", "41", "69", "74", "61", "6a", "20", "4b", "a5", "97", "63", "69", "65"] self.assertEqual(tokenizer.tokenize(in_pl_str), out_tokens) in_jp_str = "こんにちは世界" out_tokens = ["58", "80", "91", "a1", "e4", "b8", "96", "e7", "95", "8c"] self.assertEqual(tokenizer.tokenize(in_jp_str), out_tokens) def test_batch_tokenize(self): tokenizer = self.get_tokenizer() in_batch = ["Hello World", "Witaj świecie", "こんにちは世界"] out_tokens = [ ["52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"], ["77", "41", "69", "74", "61", "6a", "20", "4b", "a5", "97", "63", "69", "65", "</s>"], ["58", "80", "91", "a1", "e4", "b8", "96", "e7", "95", "8c", "</s>"], ] self.assertListEqual( [tokenizer.convert_ids_to_tokens(ids) for ids in tokenizer(in_batch)["input_ids"]], out_tokens ) def test_special_bytes(self): tokenizer = self.get_tokenizer() in_str_special = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09" out_tokens = ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09"] self.assertEqual(tokenizer.tokenize(in_str_special), out_tokens) in_str_mixed = "\x00Hello\x01 World\x02" out_tokens = ["00", "52", "85", "91", "9f", "6f", "01", "20", "52", "85", "9f", "90", "02"] self.assertEqual(tokenizer.tokenize(in_str_mixed), out_tokens) def test_special_tokens(self): tokenizer = self.get_tokenizer() in_str_special = "<unk></s><pad>" out_tokens = ["<unk>", "</s>", "<pad>"] self.assertEqual(tokenizer.tokenize(in_str_special), out_tokens) in_str_not_special = "<s>" out_tokens = ["3c", "73", "3e"] self.assertEqual(tokenizer.tokenize(in_str_not_special), out_tokens) in_str_mixed = "<s>Hello World</s>" out_tokens = ["3c", "73", "3e", "52", "85", "91", "9f", "6f", "20", "52", "85", "9f", "90", "</s>"] self.assertEqual(tokenizer.tokenize(in_str_mixed), out_tokens) def test_token_ids_conversion(self): tokenizer = self.get_tokenizer() tokens_range = [f"{x:02x}" for x in range(256)] indices_range = list(range(3, 256 + 3)) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens_range), indices_range) self.assertListEqual(tokenizer.convert_ids_to_tokens(indices_range), tokens_range) special_tokens = ["<pad>", "</s>", "<unk>"] special_indices = [0, 1, 2] self.assertListEqual(tokenizer.convert_tokens_to_ids(special_tokens), special_indices) self.assertListEqual(tokenizer.convert_ids_to_tokens(special_indices), special_tokens)
transformers/tests/models/myt5/test_tokenization_myt5.py/0
{ "file_path": "transformers/tests/models/myt5/test_tokenization_myt5.py", "repo_id": "transformers", "token_count": 3019 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPT2Tokenizer, TFOPTForCausalLM, TFOPTModel def prepare_opt_inputs_dict(config, input_ids, attention_mask=None, head_mask=None): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class TFOPTModelTester: config_cls = OPTConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, word_embed_proj_dim=16, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False self.attn_implementation = attn_implementation def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) config = self.config_cls( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=False, attn_implementation=self.attn_implementation, **self.config_updates, ) inputs_dict = prepare_opt_inputs_dict(config, input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFOPTModel(config=config) input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) @require_tf class TFOPTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () all_generative_model_classes = (TFOPTForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) is_encoder_decoder = False test_pruning = False test_onnx = False onnx_min_opset = 10 def setUp(self): self.model_tester = TFOPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OPTConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_resize_token_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build_in_name_scope() if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings model = model_class(config=config) old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) @require_tf class TFOPTHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): eos_column_vector = tf.ones((4, 1), dtype=tf.int32) * 2 input_ids = tf.concat([ids_tensor((4, 6), self.vocab_size - 3) + 3, eos_column_vector], axis=1) batch_size = input_ids.shape[0] config = OPTConfig( vocab_size=self.vocab_size, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size @require_sentencepiece @require_tf class OPTModelIntegrationTests(unittest.TestCase): @slow def test_inference_no_head(self): model = TFOPTModel.from_pretrained("facebook/opt-350m") input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = tf.not_equal(input_ids, model.config.pad_token_id) with tf.GradientTape(): output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state expected_shape = (1, 11, 512) self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] ) self.assertTrue(np.allclose(output[:, :3, :3], expected_slice, atol=4e-3)) xla_generate = tf.function(model, jit_compile=True) output = xla_generate(input_ids, attention_mask)[0] self.assertTrue(np.allclose(output[:, :3, :3], expected_slice, atol=4e-2)) @require_tf @slow class TFOPTEmbeddingsTest(unittest.TestCase): def setUp(self): super().setUp() self.path_model = "facebook/opt-350m" def test_logits(self): model = TFOPTForCausalLM.from_pretrained(self.path_model) tokenizer = GPT2Tokenizer.from_pretrained(self.path_model) prompts = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False inputs = tokenizer(prompts, return_tensors="tf", padding=True, add_special_tokens=False) logits = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask)[0], axis=-1) logits_meta = tf.constant( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(np.allclose(logits, logits_meta, atol=1e-4)) xla_generate = tf.function(model, jit_compile=True) logits = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask)[0], axis=-1) self.assertTrue(np.allclose(logits, logits_meta, atol=1e-4)) @require_tf @slow class TFOPTGenerationTest(unittest.TestCase): @property def prompts(self): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def test_generation_pre_attn_layer_norm(self): model_id = "facebook/opt-125m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="tf").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) def test_batch_generation(self): model_id = "facebook/opt-350m" tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="tf", padding=True) input_ids = inputs["input_ids"] outputs = model.generate(input_ids=input_ids, attention_mask=inputs["attention_mask"]) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf").input_ids output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["attention_mask"][-1], tf.int64) ) inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence]) def test_generation_post_attn_layer_norm(self): model_id = "facebook/opt-350m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = TFOPTForCausalLM.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="tf").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
transformers/tests/models/opt/test_modeling_tf_opt.py/0
{ "file_path": "transformers/tests/models/opt/test_modeling_tf_opt.py", "repo_id": "transformers", "token_count": 7560 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PatchTST model.""" import inspect import random import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import ( MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, PatchTSTConfig, PatchTSTForClassification, PatchTSTForPrediction, PatchTSTForPretraining, PatchTSTForRegression, PatchTSTModel, ) @require_torch class PatchTSTModelTester: def __init__( self, parent, batch_size=13, prediction_length=7, context_length=14, patch_length=5, patch_stride=5, num_input_channels=1, num_time_features=1, is_training=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, distil=False, seed=42, num_targets=2, mask_type="random", random_mask_ratio=0, ): self.parent = parent self.batch_size = batch_size self.prediction_length = prediction_length self.context_length = context_length self.patch_length = patch_length self.patch_stride = patch_stride self.num_input_channels = num_input_channels self.num_time_features = num_time_features self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio self.seed = seed self.num_targets = num_targets self.distil = distil self.num_patches = (max(self.context_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 # define seq_length so that it can pass the test_attention_outputs self.seq_length = self.num_patches def get_config(self): return PatchTSTConfig( prediction_length=self.prediction_length, patch_length=self.patch_length, patch_stride=self.patch_stride, num_input_channels=self.num_input_channels, d_model=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, context_length=self.context_length, activation_function=self.hidden_act, seed=self.seed, num_targets=self.num_targets, mask_type=self.mask_type, random_mask_ratio=self.random_mask_ratio, ) def prepare_patchtst_inputs_dict(self, config): _past_length = config.context_length # bs, num_input_channels, num_patch, patch_len # [bs x seq_len x num_input_channels] past_values = floats_tensor([self.batch_size, _past_length, self.num_input_channels]) future_values = floats_tensor([self.batch_size, config.prediction_length, self.num_input_channels]) inputs_dict = { "past_values": past_values, "future_values": future_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_patchtst_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class PatchTSTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( PatchTSTModel, PatchTSTForPrediction, PatchTSTForPretraining, PatchTSTForClassification, PatchTSTForRegression, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": PatchTSTModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = True test_torchscript = False test_inputs_embeds = False test_resize_embeddings = True test_resize_position_embeddings = False test_mismatched_shapes = True test_model_parallel = False has_attentions = True def setUp(self): self.model_tester = PatchTSTModelTester(self) self.config_tester = ConfigTester( self, config_class=PatchTSTConfig, has_text_modality=False, prediction_length=self.model_tester.prediction_length, ) def test_config(self): self.config_tester.run_common_tests() def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) # if PatchTSTForPretraining if model_class == PatchTSTForPretraining: inputs_dict.pop("future_values") # else if classification model: elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING): rng = random.Random(self.model_tester.seed) labels = ids_tensor([self.model_tester.batch_size], self.model_tester.num_targets, rng=rng) inputs_dict["target_values"] = labels inputs_dict.pop("future_values") elif model_class in get_values(MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING): rng = random.Random(self.model_tester.seed) target_values = floats_tensor([self.model_tester.batch_size, self.model_tester.num_targets], rng=rng) inputs_dict["target_values"] = target_values inputs_dict.pop("future_values") return inputs_dict def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers ) self.assertEqual(len(hidden_states), expected_num_layers) num_patch = self.model_tester.num_patches self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patch, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="we have no tokens embeddings") def test_resize_tokens_embeddings(self): pass def test_model_main_input_name(self): model_signature = inspect.signature(getattr(PatchTSTModel, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(PatchTSTModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model_class == PatchTSTForPretraining: expected_arg_names = [ "past_values", "past_observed_mask", ] elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING) or model_class in get_values( MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING ): expected_arg_names = ["past_values", "target_values", "past_observed_mask"] else: expected_arg_names = [ "past_values", "past_observed_mask", "future_values", ] expected_arg_names.extend( [ "output_hidden_states", "output_attentions", "return_dict", ] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() @unittest.skip(reason="Model does not have input embeddings") def test_model_get_set_embeddings(self): pass def prepare_batch(repo_id="hf-internal-testing/etth1-hourly-batch", file="train-batch.pt"): file = hf_hub_download(repo_id=repo_id, filename=file, repo_type="dataset") batch = torch.load(file, map_location=torch_device) return batch # Note: Pretrained model is not yet downloadable. @require_torch @slow class PatchTSTModelIntegrationTests(unittest.TestCase): # Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable. def test_pretrain_head(self): model = PatchTSTForPretraining.from_pretrained("namctin/patchtst_etth1_pretrain").to(torch_device) batch = prepare_batch() torch.manual_seed(0) with torch.no_grad(): output = model(past_values=batch["past_values"].to(torch_device)).prediction_output num_patch = ( max(model.config.context_length, model.config.patch_length) - model.config.patch_length ) // model.config.patch_stride + 1 expected_shape = torch.Size([64, model.config.num_input_channels, num_patch, model.config.patch_length]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0173]], [[-1.0379]], [[-0.1030]], [[0.3642]], [[0.1601]], [[-1.3136]], [[0.8780]]], device=torch_device, ) torch.testing.assert_close(output[0, :7, :1, :1], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) # Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable. def test_prediction_head(self): model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast").to(torch_device) batch = prepare_batch(file="test-batch.pt") torch.manual_seed(0) with torch.no_grad(): output = model( past_values=batch["past_values"].to(torch_device), future_values=batch["future_values"].to(torch_device), ).prediction_outputs expected_shape = torch.Size([64, model.config.prediction_length, model.config.num_input_channels]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.5142, 0.6928, 0.6118, 0.5724, -0.3735, -0.1336, -0.7124]], device=torch_device, ) torch.testing.assert_close(output[0, :1, :7], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_prediction_generation(self): model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast").to(torch_device) batch = prepare_batch(file="test-batch.pt") torch.manual_seed(0) with torch.no_grad(): outputs = model.generate(past_values=batch["past_values"].to(torch_device)) expected_shape = torch.Size((64, 1, model.config.prediction_length, model.config.num_input_channels)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor( [[0.4075, 0.3716, 0.4786, 0.2842, -0.3107, -0.0569, -0.7489]], device=torch_device, ) mean_prediction = outputs.sequences.mean(dim=1) torch.testing.assert_close(mean_prediction[0, -1:], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_regression_generation(self): model = PatchTSTForRegression.from_pretrained("ibm/patchtst-etth1-regression-distribution").to(torch_device) batch = prepare_batch(repo_id="ibm/patchtst-etth1-test-data", file="regression_distribution_batch.pt") torch.manual_seed(0) model.eval() with torch.no_grad(): outputs = model.generate(past_values=batch["past_values"].to(torch_device)) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.num_targets)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor( [[-0.08046409], [-0.06570087], [-0.28218266], [-0.20636195], [-0.11787311]], device=torch_device, ) mean_prediction = outputs.sequences.mean(dim=1) torch.testing.assert_close(mean_prediction[-5:], expected_slice, rtol=TOLERANCE)
transformers/tests/models/patchtst/test_modeling_patchtst.py/0
{ "file_path": "transformers/tests/models/patchtst/test_modeling_patchtst.py", "repo_id": "transformers", "token_count": 6859 }
# coding=utf-8 # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Phi-3 model.""" import unittest from typing import List from parameterized import parameterized from transformers import Phi3Config, StaticCache, is_torch_available, set_seed from transformers.testing_utils import ( require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, Phi3ForCausalLM, Phi3ForSequenceClassification, Phi3ForTokenClassification, Phi3Model, ) end_of_text_token = 32000 class Phi3MiniWithStaticCache(torch.nn.Module): def __init__(self, model: Phi3ForCausalLM, batch_size: int, max_seq_len: int): super().__init__() self.model = model self.cache = StaticCache( config=model.config, batch_size=batch_size, max_cache_len=max_seq_len, device=self.model.device, dtype=self.model.dtype, ) def forward( self, input_ids: torch.LongTensor = None, ) -> torch.FloatTensor: return self.model.forward( input_ids=input_ids, use_cache=True, return_dict=True, past_key_values=self.cache, ).logits @staticmethod def generate(model: Phi3ForCausalLM, prompt_tokens: torch.LongTensor, max_seq_len: int) -> List[int]: model = Phi3MiniWithStaticCache(model, 1, max_seq_len + prompt_tokens.shape[-1]) response_tokens = [] for input_pos in range(prompt_tokens.shape[-1]): result = model.forward( input_ids=prompt_tokens[:, input_pos : input_pos + 1], ) response_tokens.append(prompt_tokens[0][input_pos].item()) current_token = torch.argmax(result[:, -1, :], dim=-1).item() response_tokens.append(current_token) while current_token != end_of_text_token and len(response_tokens) < max_seq_len: result = model.forward( input_ids=torch.tensor([[current_token]], dtype=torch.long), ) current_token = torch.argmax(result[:, -1, :], dim=-1).item() response_tokens.append(current_token) return response_tokens class Phi3ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return Phi3Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model with Llama->Phi3 def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Phi3Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model_as_decoder with Llama->Phi3 def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = Phi3Model(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_for_causal_lm with Llama->Phi3 def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = Phi3ForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_decoder_model_past_large_inputs with Llama->Phi3 def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = Phi3ForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class Phi3ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Phi3Model, Phi3ForCausalLM, Phi3ForSequenceClassification, Phi3ForTokenClassification) if is_torch_available() else () ) all_generative_model_classes = (Phi3ForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Phi3Model, "text-classification": Phi3ForSequenceClassification, "text-generation": Phi3ForCausalLM, "token-classification": Phi3ForTokenClassification, "zero-shot": Phi3ForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79292/workflows/fa2ba644-8953-44a6-8f67-ccd69ca6a476/jobs/1012905 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.setUp with Llama->Phi3 def setUp(self): self.model_tester = Phi3ModelTester(self) self.config_tester = ConfigTester(self, config_class=Phi3Config, hidden_size=37) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Phi3,llama->phi3 def test_phi3_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Phi3ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_single_label with Llama->Phi3,llama->phi3 def test_phi3_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Phi3ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_multi_label with Llama->Phi3,llama->phi3 def test_phi3_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = Phi3ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @parameterized.expand([("longrope",)]) def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = Phi3Model(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights n_factors = config.hidden_size // config.num_attention_heads // 2 config.rope_scaling = { "type": scaling_type, "short_factor": [5.0 for _ in range(n_factors)], "long_factor": [5.0 for _ in range(n_factors)], } scaled_model = Phi3Model(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Scaling changes the RoPE embeddings, both for the short and long outputs self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @parameterized.expand([("longrope",)]) def test_model_rope_scaling_short_long_factor(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() n_factors = config.hidden_size // config.num_key_value_heads // 2 config.rope_scaling = { "type": scaling_type, "short_factor": [3.0 for _ in range(n_factors)], "long_factor": [5.0 for _ in range(n_factors)], } input_tensor = ids_tensor([1, 4090], config.vocab_size) # Make sure we don't have padding tokens. If this is the case, then the actual number of "true" tokens may be shorter # than `config.original_max_position_embeddings + 5`, invalidating this test input_tensor[input_tensor == config.pad_token_id] += 1 model = Phi3ForCausalLM(config) model.to(torch_device) model.eval() generation_args_short = { "max_length": config.original_max_position_embeddings, "temperature": 0.0, "use_cache": True, "do_sample": False, "return_dict_in_generate": True, } output_with_short_factor = model.generate(input_tensor, **generation_args_short) keys_with_short_factor = output_with_short_factor.past_key_values[0][0] generation_args_long = { "max_length": config.original_max_position_embeddings + 5, "temperature": 0.0, "use_cache": True, "do_sample": False, "return_dict_in_generate": True, "output_logits": True, } output_with_long_factor = model.generate(input_tensor, **generation_args_long) keys_with_long_factor = output_with_long_factor.past_key_values[0][0] last_token_logits = output_with_long_factor.logits[-1][-1] regenerated_last_token_logits = model(output_with_long_factor.sequences[:, :-1]).logits[0][-1] keys_with_long_factor = keys_with_long_factor[:, :, : config.original_max_position_embeddings - 1, :] # KV cache is re-computed after reaching the (`config.original_max_position_embeddings`+1)th token position self.assertFalse(torch.allclose(keys_with_short_factor, keys_with_long_factor, atol=1e-2, rtol=1e-2)) # Last token generated using long factor torch.testing.assert_close(last_token_logits, regenerated_last_token_logits, rtol=1e-2, atol=1e-2) @slow @require_torch class Phi3IntegrationTest(unittest.TestCase): def test_model_phi3_mini_4k_instruct_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[ 0.9979, -1.9449, -2.5613, -2.2110, -0.9323, -2.2726, -3.2468, -2.0122,-1.0021, -1.2764, -1.0876, -1.2358, 3.9385, 6.2152, -0.3695, -2.3285,-1.2907, -1.8238, -1.9941, -2.2098, -0.6923, -1.6793, -1.1660, -2.0469,-0.7369, -1.4101, -1.4091, -3.1694, -1.8383, -1.1952],[ 3.0525, 1.9178, 3.7016, 0.9263, 0.3397, 1.9584, 2.1347, 0.3482, 1.3773, 0.2153, 0.2798, 0.8360, 9.0936, 11.4944, -0.3575, -0.9442,-0.1246, 1.3869, 0.9846, 1.7243, 0.9150, 1.0823, 0.4313, 1.5742, 0.2566, -0.1401, -1.3019, 0.4967, 0.6941, 0.7214]]).to(torch_device) # fmt: skip torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4) def test_phi3_mini_4k_instruct_generation(self): model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct") tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct") messages = [ { "role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.", }, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") outputs = model.generate(inputs, max_new_tokens=32) output_text = tokenizer.batch_decode(outputs) EXPECTED_OUTPUT = [ "<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some ideas for incorporating these fruits into your" ] self.assertListEqual(output_text, EXPECTED_OUTPUT) def test_phi3_mini_4k_instruct_with_static_cache(self): model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct") tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct") messages = [ { "role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.", }, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") response_tokens = Phi3MiniWithStaticCache.generate(model, inputs, 64) output_text = tokenizer.batch_decode(torch.tensor([response_tokens], dtype=torch.long, device=torch_device)) EXPECTED_OUTPUT = [ "<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some" ] self.assertListEqual(output_text, EXPECTED_OUTPUT) def test_model_phi3_mini_128k_instruct_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[ 1.8478, -0.5709, -1.6792, -1.2133, -0.7809, -0.8817, -2.0969, -1.1191,-0.7731, -1.0483, -0.5961, -1.3067, 3.1325, 6.9442, -0.4803, -0.9154,-1.3085, -1.0822, -1.1433, -0.7660, -0.8531, -0.9150, -0.6179, -1.6153,-0.2239, -1.3207, -1.1187, -2.4795, -1.4733, -0.4931],[ 3.5839, 2.4722, 3.7130, 1.2032, 0.7356, 2.7777, 2.5256, 0.9157, 1.6431, 0.3533, 0.5100, 1.3512, 8.9873, 10.9815, 0.3530, 0.1473, 0.2051, 1.8553, 1.5988, 2.2268, 1.1897, 1.2829, 0.7894, 1.8895, 0.7666, 0.4122, -0.9316, 0.9936, 1.2722, 0.8263]]).to(torch_device) # fmt: skip torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4) def test_phi3_mini_128k_instruct_generation(self): model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct") tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-128k-instruct") messages = [ { "role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.", }, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") outputs = model.generate(inputs, max_new_tokens=32) output_text = tokenizer.batch_decode(outputs) EXPECTED_OUTPUT = [ "<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious and nutritious ways. Here are some creative and healthy" ] self.assertListEqual(output_text, EXPECTED_OUTPUT) def test_phi3_mini_128k_instruct_with_static_cache(self): model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct") tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-128k-instruct") messages = [ { "role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.", }, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") response_tokens = Phi3MiniWithStaticCache.generate(model, inputs, 64) output_text = tokenizer.batch_decode(torch.tensor([response_tokens], dtype=torch.long, device=torch_device)) EXPECTED_OUTPUT = [ "<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious and nutritious ways" ] self.assertListEqual(output_text, EXPECTED_OUTPUT) def test_phi3_mini_4k_sliding_window(self): """ This tests that Phi3 doesn't deteriorate in quality for long context generations. Since Phi3 has sliding window attention, the test is tailored so that (context + max_new_tokens > sliding_window). See #33586 for more """ model = Phi3ForCausalLM.from_pretrained( "microsoft/Phi-3-mini-4k-instruct", device_map=torch_device, torch_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct") input_text = """ <|user|> Tell me about Paris, France.<|end|> <|assistant|> Paris, the capital city of France, is renowned for its rich history, iconic landmarks, and vibrant culture. Known as "The City of Light," Paris is situated in the north-central part of the country along the Seine River. Here are some key aspects of Paris: 1. Landmarks: Paris is home to numerous famous landmarks, including the Eiffel Tower, the Louvre Museum, Notre-Dame Cathedral, and the Champs-Élysées. The Eiffel Tower, built in 1889, is an iconic symbol of Paris and attracts millions of tourists each year. The Louvre Museum, the world's largest art museum, houses thousands of works of art, including the Mona Lisa and the Venus de Milo. 2. History: Paris has a rich history dating back to the 3rd century BC, when it was founded by a Celtic tribe called the Parisii. Over the centuries, the city has been influenced by various cultures, including the Romans, the Franks, and the Normans. The French Revolution in the late 18th century marked a significant turning point in Paris's history, leading to the establishment of the modern French Republic. 3. Culture: Paris is a global center for art, fashion, gastronomy, and culture. The city is home to numerous museums, including the Centre Pompidou, Musée d'Orsay, and Musée Rodin. Paris is also known for its fashion industry, with many famous designers having their origins in the city. The city's cuisine is also highly regarded, with a focus on fresh ingredients, and a wide variety of dishes, including French classics like coq au vin, boeuf bourguignon, and crêpes. 4. Architecture: Parisian architecture is characterized by its diverse styles, ranging from Gothic and Romanesque to Art Nouveau and Art Deco. The city's famous Haussmannian buildings, designed by Baron Haussmann in the mid-19th century, are known for their uniform facades, wrought-iron balconies, and large windows. 5. Transportation: Paris has an extensive public transportation system, including the Paris Métro, RER (suburban trains), and buses. The city's iconic yellow taxis are also a popular mode of transportation. 6. Language: The official language of Paris is French, and the city's residents are known for their charm and politeness. 7. Festivals and Events: Paris hosts numerous festivals and events throughout the year, including the annual Bastille Day celebrations, the Paris Fashion Week, and the famous annual New Year's Eve fireworks on the Eiffel Tower. 8. Geography: Paris is located in the north-central part of France, with the Seine River running through the city. The city's geography is characterized by rolling hills and picturesque parks, such as the Bois de Boulogne and the Jardin des Tuileries. 9. Population: As of 2021, Paris has an estimated population of around 2.2 million residents, with the metropolitan area housing over 12 million people. In summary, Paris is a city steeped in history, culture, and art, with a unique blend of architectural styles and a vibrant atmosphere that continues to captivate millions of visitors each year.<|end|> <|user|> Please give me a list of 5 architectural landmarks in Paris, France.<|end|> <|assistant|> 1. Eiffel Tower: Designed by Gustave Eiffel and completed in 1889, the Eiffel Tower is an iconic symbol of Paris and France. Standing at 324 meters tall, it was the tallest man-made structure in the world until the completion of the Chrysler Building in New York in 1930. The Eiffel Tower is made of wrought iron and offers visitors stunning views of the city from its three levels. 2. Notre-Dame Cathedral: Located on the Île de la Cité, Notre-Dame Cathedral is a masterpiece of French Gothic architecture. Construction began in the 12th century and continued for over 200 years, with the cathedral's completion in the 14th century. The cathedral is famous for its intricate facade, stained-glass windows, and the iconic gargoyles and chimeras. 3. Louvre Museum: Originally built as a fortress in the 12th century, the Louvre Museum is now the world's largest art museum and a historic monument in Paris. The museum's most famous landmark is the iconic glass pyramid entrance, designed by architect I. M. Pei in the 1980s. The Louvre houses over 380,000 works of art, including the Mona Lisa and the Venus de Milo. 4. Sacré-Cœur Basilica: The Sacré-Cœur Basilica, also known as the Basilique du Sacré-Cœur, is a Roman Catholic church and minor basilica located at the summit of the butte Montmartre, the highest point in Paris. The basilica was designed by Paul Abadie and dedicated in 1914. Its white domes and lavender-colored travertine stone make it a distinctive landmark in the Paris skyline. 5. Arc de Triomphe: The Arc de Triomphe is a monumental structure located at the western end of the Champs-Élysées. Commissioned by Napoleon in 1806, the Arc was designed by Jean-François-Thérèse Chalgrin and completed in 1836. The monument honors those who fought and died for France during the French Revolutionary and Napoleonic Wars. The Arc features sculptural reliefs and inscriptions, and its façade is adorned with the names of 357 generals and 660 soldiers. These five architectural landmarks showcase the diverse styles and historical periods of Paris, from Gothic to Neoclassical, and from the 19th to the 20th centuries. Each landmark has its unique features and contributes to the city's rich architectural heritage.<|end|> <|user|> Please give me a list of 10 famous items displayed in the Louvre Museum. Thanks!<|end|> <|assistant|> 1. Mona Lisa: The Mona Lisa, painted by Leonardo da Vinci in the early 16th century, is arguably the most famous painting in the world. The portrait is known for its enigmatic smile and masterful use of sfumato, a technique that creates a soft, hazy effect. 2. Venus de Milo: This ancient Greek statue, believed to have been created around 130-100 BC, is a masterpiece of Hellenistic sculpture. The Venus de Milo is renowned for its graceful beauty and the mystery surrounding its missing arms. 3. Winged Victory of Samothrace: This Hellenistic sculpture, dating back to the 2nd century BC, depicts the Greek goddess Nike, the personification of victory. The sculpture is celebrated for its dynamic movement and intricate details. 4. Liberty Leading the People: This iconic painting by Eugène Delacroix, created in 1830, commemorates the July Revolution in France. The artwork depicts a woman personifying Liberty leading a group of revolutionaries over the bodies of the fallen. 5. The Wedding at Cana: A 1516 painting by Veronese, The Wedding at Cana is a large-scale work that depicts the biblical story of Jesus turning water into wine at a wedding feast. The painting is known for its vibrant colors and intricate details. 6. The Raft of the Medusa: This 1819 painting by Théodore Géricault is a powerful depiction of the aftermath of the shipwreck of the French frigate Méduse. The painting is famous for its dramatic composition and emotional intensity. 7. The Coronation of Napoleon: This 1805 painting by Jacques-Louis David portrays the coronation of Napoleon Bonaparte as Emperor of the French. The artwork is a masterpiece of neoclassical style and captures the pomp and ceremony of the event. 8. The Death of Socrates: A 1914 painting by Pablo Picasso, The Death of Socrates depicts the moment when the ancient Greek philosopher Socrates drinks the poison hemlock, as ordered by the Athenian government. The painting is a powerful expression of the existential themes associated with Socrates' death. 9. The Turkish Bath: A 1887 painting by Puvis de Chavannes, The Turkish Bath is a large-scale work that depicts a scene of relaxation and leisity in a traditional Turkish bath. The painting is known for its subdued color palette and serene atmosphere. 10. The Wedding at Cana: A 1901 painting by Giovanni Boldini, The Wedding at Cana is a lively depiction of a festive Italian wedding. The painting is characterized by Boldini's signature flamboyant style and his ability to capture the essence of the subjects with a sense of movement and energy. These 10 famous items in the Louvre Museum showcase a diverse range of artistic styles and periods, from ancient Greek and Roman sculptures to Renaissance and 19th-century paintings. The collection at the Louvre is one of the world's largest and most important museums, housing over 800,000 works of art, and these famous items are just a small selection of the museum's vast collection.<|end|> <|user|> Please give me a list of 10 famous French dishes.<|end|> <|assistant|> """ inputs = tokenizer(input_text, return_tensors="pt").to(device=torch_device) outputs = model.generate(**inputs, max_new_tokens=100) output_text = tokenizer.batch_decode(outputs[:, inputs.input_ids.shape[1] :], skip_special_tokens=True) EXPECTED_OUTPUT = [ '1. Coq au Vin: Coq au Vin is a classic French dish that translates to "rooster in wine." The dish consists of chicken braised with wine, lardons, mushrooms, and garlic. It is a hearty and flavorful dish that is often served with potatoes or rice.\n\n 2. Boeuf Bourguignon: Boeuf Bourguignon is a traditional French beef stew that' ] self.assertListEqual(output_text, EXPECTED_OUTPUT)
transformers/tests/models/phi3/test_modeling_phi3.py/0
{ "file_path": "transformers/tests/models/phi3/test_modeling_phi3.py", "repo_id": "transformers", "token_count": 15606 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import RemBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertModel, ) class TFRemBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, input_embedding_size=18, output_embedding_size=43, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.input_embedding_size = input_embedding_size self.output_embedding_size = output_embedding_size self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RemBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, input_embedding_size=self.input_embedding_size, output_embedding_size=self.output_embedding_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRemBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_causal_lm_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFRemBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFRemBertModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) # Also check the case where encoder outputs are not passed result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_causal_lm_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFRemBertForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFRemBertForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) prediction_scores = result["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_past( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFRemBertForCausalLM(config=config) # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_with_attn_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFRemBertForCausalLM(config=config) # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) past_key_values = outputs.past_key_values # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) output_from_no_past = model( next_input_ids, attention_mask=attn_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFRemBertForCausalLM(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFRemBertForCausalLM(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] encoder_hidden_states = encoder_hidden_states[:1, :, :] encoder_attention_mask = encoder_attention_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRemBertForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRemBertForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFRemBertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRemBertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRemBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFRemBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFRemBertModel, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFRemBertModel, "fill-mask": TFRemBertForMaskedLM, "question-answering": TFRemBertForQuestionAnswering, "text-classification": TFRemBertForSequenceClassification, "text-generation": TFRemBertForCausalLM, "token-classification": TFRemBertForTokenClassification, "zero-shot": TFRemBertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFRemBertModelTester(self) self.config_tester = ConfigTester(self, config_class=RemBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): """Test the base model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_causal_lm_base_model(self): """Test the base model of the causal LM model is_deocder=True, no cross_attention, no encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs) def test_model_as_decoder(self): """Test the base model as a decoder (of an encoder-decoder architecture) is_deocder=True + cross_attention + pass encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): """Test the causal LM model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model(*config_and_inputs) def test_causal_lm_model_as_decoder(self): """Test the causal LM model as a decoder""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_causal_lm_model_as_decoder(*config_and_inputs) def test_causal_lm_model_past(self): """Test causal LM model with `past_key_values`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past(*config_and_inputs) def test_causal_lm_model_past_with_attn_mask(self): """Test the causal LM model with `past_key_values` and `attention_mask`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs) def test_causal_lm_model_past_with_large_inputs(self): """Test the causal LM model with `past_key_values` and a longer decoder sequence length""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): """Similar to `test_causal_lm_model_past_with_large_inputs` but with cross-attention""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFRemBertModel.from_pretrained("google/rembert") self.assertIsNotNone(model) @require_tf class TFRemBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_model(self): model = TFRemBertModel.from_pretrained("google/rembert") input_ids = tf.constant([[312, 56498, 313, 2125, 313]]) segment_ids = tf.constant([[0, 0, 0, 1, 1]]) output = model(input_ids, token_type_ids=segment_ids, output_hidden_states=True) hidden_size = 1152 expected_shape = [1, 5, hidden_size] self.assertEqual(output["last_hidden_state"].shape, expected_shape) expected_implementation = tf.constant( [ [ [0.0754, -0.2022, 0.1904], [-0.3354, -0.3692, -0.4791], [-0.2314, -0.6729, -0.0749], [-0.0396, -0.3105, -0.4234], [-0.1571, -0.0525, 0.5353], ] ] ) tf.debugging.assert_near(output["last_hidden_state"][:, :, :3], expected_implementation, atol=1e-4) # Running on the original tf implementation gives slightly different results here. # Not clear why this variations is present # TODO: Find reason for discrepancy # expected_original_implementation = [[ # [0.07630594074726105, -0.20146065950393677, 0.19107051193714142], # [-0.3405614495277405, -0.36971670389175415, -0.4808273911476135], # [-0.22587086260318756, -0.6656315922737122, -0.07844287157058716], # [-0.04145475849509239, -0.3077218234539032, -0.42316967248916626], # [-0.15887849032878876, -0.054529931396245956, 0.5356100797653198] # ]]
transformers/tests/models/rembert/test_modeling_tf_rembert.py/0
{ "file_path": "transformers/tests/models/rembert/test_modeling_tf_rembert.py", "repo_id": "transformers", "token_count": 12938 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch RoCBert model.""" import unittest from transformers import RoCBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertModel, ) class RoCBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, pronunciation_vocab_size=99, shape_vocab_size=99, pronunciation_embed_dim=32, shape_embed_dim=32, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.pronunciation_vocab_size = pronunciation_vocab_size self.shape_vocab_size = shape_vocab_size self.pronunciation_embed_dim = pronunciation_embed_dim self.shape_embed_dim = shape_embed_dim self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_shape_ids = ids_tensor([self.batch_size, self.seq_length], self.shape_vocab_size) input_pronunciation_ids = ids_tensor([self.batch_size, self.seq_length], self.pronunciation_vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def get_config(self): return RoCBertConfig( vocab_size=self.vocab_size, shape_vocab_size=self.shape_vocab_size, pronunciation_vocab_size=self.pronunciation_vocab_size, shape_embed_dim=self.shape_embed_dim, pronunciation_embed_dim=self.pronunciation_embed_dim, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoCBertModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, token_type_ids=token_type_ids, ) result = model(input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = RoCBertModel(config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = RoCBertForCausalLM(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoCBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = RoCBertForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_shape_tokens = ids_tensor((self.batch_size, 3), config.shape_vocab_size) next_pronunciation_tokens = ids_tensor((self.batch_size, 3), config.pronunciation_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_input_shape_ids = torch.cat([input_shape_ids, next_shape_tokens], dim=-1) next_input_pronunciation_ids = torch.cat([input_pronunciation_ids, next_pronunciation_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, input_shape_ids=next_input_shape_ids, input_pronunciation_ids=next_input_pronunciation_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, input_shape_ids=next_shape_tokens, input_pronunciation_ids=next_pronunciation_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoCBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = RoCBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = RoCBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_choices = self.num_choices model = RoCBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_inputs_shape_ids = input_shape_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_inputs_pronunciation_ids = ( input_pronunciation_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() ) multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, input_shape_ids=multiple_choice_inputs_shape_ids, input_pronunciation_ids=multiple_choice_inputs_pronunciation_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "input_shape_ids": input_shape_ids, "input_pronunciation_ids": input_pronunciation_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict def create_and_check_for_pretraining( self, config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoCBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, input_shape_ids, input_pronunciation_ids, attention_mask=input_mask, token_type_ids=token_type_ids, attack_input_ids=input_ids, attack_input_shape_ids=input_shape_ids, attack_input_pronunciation_ids=input_pronunciation_ids, attack_attention_mask=input_mask, attack_token_type_ids=token_type_ids, labels_input_ids=token_labels, labels_input_shape_ids=input_shape_ids, labels_input_pronunciation_ids=input_pronunciation_ids, labels_attention_mask=input_mask, labels_token_type_ids=token_type_ids, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) @require_torch class RoCBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( RoCBertModel, RoCBertForMaskedLM, RoCBertForCausalLM, RoCBertForMultipleChoice, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertForPreTraining, ) if is_torch_available() else () ) all_generative_model_classes = (RoCBertForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": RoCBertModel, "fill-mask": RoCBertForMaskedLM, "question-answering": RoCBertForQuestionAnswering, "text-classification": RoCBertForSequenceClassification, "text-generation": RoCBertForCausalLM, "token-classification": RoCBertForTokenClassification, "zero-shot": RoCBertForSequenceClassification, } if is_torch_available() else {} ) # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name in [ "FillMaskPipelineTests", "FeatureExtractionPipelineTests", "TextClassificationPipelineTests", "TokenClassificationPipelineTests", ]: # Get error: IndexError: index out of range in self. # `word_shape_file` and `word_pronunciation_file` should be shrunk during tiny model creation, # otherwise `IndexError` could occur in some embedding layers. Skip for now until this model has # more usage. return True return False # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels_input_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["labels_input_shape_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["labels_input_pronunciation_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["attack_input_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["attack_input_shape_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["attack_input_pronunciation_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = RoCBertModelTester(self) self.config_tester = ConfigTester(self, config_class=RoCBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, input_shape_ids, input_pronunciation_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): model_name = "weiweishi/roc-bert-base-zh" model = RoCBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class RoCBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = RoCBertForMaskedLM.from_pretrained("weiweishi/roc-bert-base-zh") # input_text: ['[CLS]', 'b', 'a', '里', '系', '[MASK]', '国', '的', '首', '都', '[SEP]'] is the adversarial text # of ['[CLS]', '巴', '黎', '是', '[MASK]', '国', '的', '首', '都', '[SEP]'], means # "Paris is the [MASK] of France" in English input_ids = torch.tensor([[101, 144, 143, 7027, 5143, 103, 1744, 4638, 7674, 6963, 102]]) input_shape_ids = torch.tensor([[2, 20324, 23690, 8740, 706, 1, 10900, 23343, 20205, 5850, 2]]) input_pronunciation_ids = torch.tensor([[2, 718, 397, 52, 61, 1, 168, 273, 180, 243, 2]]) output = model(input_ids, input_shape_ids, input_pronunciation_ids) output_ids = torch.argmax(output.logits, dim=2) # convert to tokens is: ['[CLS]', '巴', '*', '黎', '是', '法', '国', '的', '首', '都', '[SEP]'] expected_output = torch.tensor([[101, 2349, 115, 7944, 3221, 3791, 1744, 4638, 7674, 6963, 102]]) assert torch.allclose(output_ids, expected_output)
transformers/tests/models/roc_bert/test_modeling_roc_bert.py/0
{ "file_path": "transformers/tests/models/roc_bert/test_modeling_roc_bert.py", "repo_id": "transformers", "token_count": 13607 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SAM model.""" import tempfile import unittest import requests from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig, pipeline from transformers.testing_utils import cleanup, require_torch, require_torch_sdpa, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SamModel, SamProcessor if is_vision_available(): from PIL import Image class SamPromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=24, patch_size=2, mask_input_channels=4, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return SamPromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class SamMaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=2, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, layer_norm_eps=1e-6, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps def get_config(self): return SamMaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, layer_norm_eps=self.layer_norm_eps, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class SamModelTester: def __init__( self, parent, hidden_size=36, intermediate_size=72, projection_dim=62, output_channels=32, num_hidden_layers=2, num_attention_heads=4, num_channels=3, image_size=24, patch_size=2, hidden_act="gelu", layer_norm_eps=1e-06, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, rel_pos_zero_init=False, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=16, mlp_dim=None, batch_size=2, ): self.parent = parent self.image_size = image_size self.patch_size = patch_size self.output_channels = output_channels self.num_channels = num_channels self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.rel_pos_zero_init = rel_pos_zero_init self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = mlp_dim self.batch_size = batch_size # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.prompt_encoder_tester = SamPromptEncoderTester() self.mask_decoder_tester = SamMaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): vision_config = SamVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, initializer_factor=self.initializer_factor, output_channels=self.output_channels, qkv_bias=self.qkv_bias, mlp_ratio=self.mlp_ratio, use_abs_pos=self.use_abs_pos, use_rel_pos=self.use_rel_pos, rel_pos_zero_init=self.rel_pos_zero_init, window_size=self.window_size, global_attn_indexes=self.global_attn_indexes, num_pos_feats=self.num_pos_feats, mlp_dim=self.mlp_dim, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return SamConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, ) def create_and_check_model(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def create_and_check_get_image_features(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.get_image_embeddings(pixel_values) self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12)) def create_and_check_get_image_hidden_states(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=True, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=False, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SamModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SamModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": SamModel, "mask-generation": SamModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False _is_composite = True # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True def setUp(self): self.model_tester = SamModelTester(self) common_properties = ["initializer_range"] self.config_tester = ConfigTester( self, config_class=SamConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_get_image_features(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_features(*config_and_inputs) def test_image_hidden_states(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_vision_attention_shape = ( self.model_tester.batch_size * self.model_tester.num_attention_heads, 196, 196, ) expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) self.assertListEqual( list(vision_attentions[0].shape[-4:]), list(expected_vision_attention_shape), ) self.assertListEqual( list(mask_decoder_attentions[0].shape[-4:]), list(expected_mask_decoder_attention_shape), ) @unittest.skip(reason="SamModel does not support training") def test_training(self): pass @unittest.skip(reason="SamModel does not support training") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SamModel does not support training") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests") def test_hidden_states_output(self): pass def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # Use a slightly higher default tol to make the tests non-flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) @slow def test_model_from_pretrained(self): model_name = "facebook/sam-vit-huge" model = SamModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch_sdpa def test_sdpa_can_compile_dynamic(self): self.skipTest(reason="SAM model can't be compiled dynamic yet") @require_torch_sdpa def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa") model_sdpa = model_sdpa.eval().to(torch_device) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) # Root model determines SDPA support attn_impl = "sdpa" if model._supports_sdpa else "eager" # Check config propagation to submodels that support it self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_sdpa.vision_encoder.config._attn_implementation == attn_impl) self.assertTrue(model_sdpa.mask_decoder.config._attn_implementation == attn_impl) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.vision_encoder.config._attn_implementation == "eager") self.assertTrue(model_eager.mask_decoder.config._attn_implementation == "eager") # Verify SDPA/eager layer presence has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa and attn_impl == "sdpa": raise ValueError("The SDPA model should have SDPA attention layers") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") def prepare_image(): img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @slow class SamModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def test_inference_mask_generation_no_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() inputs = processor(images=raw_image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] torch.testing.assert_close(scores[-1], torch.tensor(0.4515), rtol=2e-4, atol=2e-4) torch.testing.assert_close( masks, torch.tensor([-4.1800, -3.4948, -3.4481]).to(torch_device), rtol=2e-4, atol=2e-4 ) def test_inference_mask_generation_one_point_one_bb(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] torch.testing.assert_close(scores[-1], torch.tensor(0.9566), rtol=2e-4, atol=2e-4) torch.testing.assert_close( masks, torch.tensor([-12.7729, -12.3665, -12.6061]).to(torch_device), rtol=2e-4, atol=2e-4 ) def test_inference_mask_generation_batched_points_batched_images(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [ [[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], [[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], ] inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze().cpu() masks = outputs.pred_masks[0, 0, 0, 0, :3].cpu() EXPECTED_SCORES = torch.tensor( [ [ [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], [ [0.3317, 0.7264, 0.7646], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], ] ) EXPECTED_MASKS = torch.tensor([-2.8550, -2.7988, -2.9625]) torch.testing.assert_close(scores, EXPECTED_SCORES, rtol=1e-3, atol=1e-3) torch.testing.assert_close(masks, EXPECTED_MASKS, rtol=1e-3, atol=1e-3) def test_inference_mask_generation_one_point_one_bb_zero(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, input_labels=labels, return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() torch.testing.assert_close(scores[-1], torch.tensor(0.7894), rtol=1e-4, atol=1e-4) def test_inference_mask_generation_one_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650]]] input_labels = [[1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() torch.testing.assert_close(scores[-1], torch.tensor(0.9675), rtol=1e-4, atol=1e-4) # With no label input_points = [[[400, 650]]] inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() torch.testing.assert_close(scores[-1], torch.tensor(0.9675), rtol=1e-4, atol=1e-4) def test_inference_mask_generation_two_points(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() torch.testing.assert_close(scores[-1], torch.tensor(0.9762), rtol=1e-4, atol=1e-4) # no labels inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() torch.testing.assert_close(scores[-1], torch.tensor(0.9762), rtol=1e-4, atol=1e-4) def test_inference_mask_generation_two_points_batched(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]], [[400, 650]]] input_labels = [[1, 1], [1]] inputs = processor( images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() torch.testing.assert_close(scores[0][-1], torch.tensor(0.9762), rtol=1e-4, atol=1e-4) torch.testing.assert_close(scores[1][-1], torch.tensor(0.9637), rtol=1e-4, atol=1e-4) def test_inference_mask_generation_one_box(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[75, 275, 1725, 850]]] inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() torch.testing.assert_close(scores[-1], torch.tensor(0.7937), rtol=1e-4, atol=1e-4) def test_inference_mask_generation_batched_image_one_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() raw_dog_image = prepare_dog_img() input_points = [[[820, 1080]], [[220, 470]]] inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores_batched = outputs.iou_scores.squeeze() input_points = [[[220, 470]]] inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores_single = outputs.iou_scores.squeeze() torch.testing.assert_close(scores_batched[1, :], scores_single, rtol=1e-4, atol=1e-4) def test_inference_mask_generation_two_points_point_batch(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = torch.Tensor([[[400, 650]], [[220, 470]]]).cpu() # fmt: skip input_points = input_points.unsqueeze(0) inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 2, 3)) torch.testing.assert_close( iou_scores, torch.tensor([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]), atol=1e-4, rtol=1e-4 ) def test_inference_mask_generation_three_boxes_point_batch(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() # fmt: off input_boxes = torch.Tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]]).cpu() EXPECTED_IOU = torch.tensor([[[0.9773, 0.9881, 0.9522], [0.5996, 0.7661, 0.7937], [0.5996, 0.7661, 0.7937]]]) # fmt: on input_boxes = input_boxes.unsqueeze(0) inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 3, 3)) torch.testing.assert_close(iou_scores, EXPECTED_IOU, rtol=1e-4, atol=1e-4) def test_dummy_pipeline_generation(self): generator = pipeline("mask-generation", model="facebook/sam-vit-base", device=torch_device) raw_image = prepare_image() _ = generator(raw_image, points_per_batch=64)
transformers/tests/models/sam/test_modeling_sam.py/0
{ "file_path": "transformers/tests/models/sam/test_modeling_sam.py", "repo_id": "transformers", "token_count": 14432 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SegGpt model.""" import inspect import math import unittest from datasets import load_dataset from transformers import SegGptConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SegGptForImageSegmentation, SegGptModel from transformers.models.seggpt.modeling_seggpt import SegGptLoss if is_vision_available(): from transformers import SegGptImageProcessor class SegGptModelTester: def __init__( self, parent, batch_size=2, image_size=30, patch_size=2, num_channels=3, is_training=False, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, mlp_ratio=2.0, merge_index=0, intermediate_hidden_state_indices=[1], pretrain_image_size=10, decoder_hidden_size=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.mlp_ratio = mlp_ratio self.merge_index = merge_index self.intermediate_hidden_state_indices = intermediate_hidden_state_indices self.pretrain_image_size = pretrain_image_size self.decoder_hidden_size = decoder_hidden_size # in SegGpt, the seq length equals the number of patches (we don't use the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) prompt_pixel_values = floats_tensor( [self.batch_size, self.num_channels, self.image_size // 2, self.image_size] ) prompt_masks = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) labels = None if self.use_labels: labels = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) config = self.get_config() return config, pixel_values, prompt_pixel_values, prompt_masks, labels def get_config(self): return SegGptConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, initializer_range=self.initializer_range, mlp_ratio=self.mlp_ratio, merge_index=self.merge_index, intermediate_hidden_state_indices=self.intermediate_hidden_state_indices, pretrain_image_size=self.pretrain_image_size, decoder_hidden_size=self.decoder_hidden_size, ) def create_and_check_model(self, config, pixel_values, prompt_pixel_values, prompt_masks, labels): model = SegGptModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, prompt_pixel_values, prompt_masks) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.image_size // self.patch_size, self.image_size // self.patch_size, self.hidden_size, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, prompt_pixel_values, prompt_masks, labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "prompt_pixel_values": prompt_pixel_values, "prompt_masks": prompt_masks, } return config, inputs_dict @require_torch class SegGptModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SegGpt does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SegGptModel, SegGptForImageSegmentation) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False pipeline_model_mapping = ( {"feature-extraction": SegGptModel, "mask-generation": SegGptModel} if is_torch_available() else {} ) def setUp(self): self.model_tester = SegGptModelTester(self) self.config_tester = ConfigTester(self, config_class=SegGptConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SegGpt does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "prompt_pixel_values", "prompt_masks"] self.assertListEqual(arg_names[:3], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) patch_height = patch_width = config.image_size // config.patch_size self.assertListEqual( list(hidden_states[0].shape[-3:]), [patch_height, patch_width, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_batching_equivalence(self): def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) else: batched_row = batched_object[:1] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( torch.max(torch.abs(batched_row - single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={torch.max(torch.abs(batched_row - single_row_object))}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) for key in model_batched_output: # the first hidden state in SegGPT has weird hack of adding first half of batch with second half if key == "hidden_states": model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def test_seggpt_loss(self): torch.manual_seed(100) config = self.model_tester.get_config() prompt_masks = torch.rand(1, config.num_channels, config.image_size, config.image_size) label = torch.rand(1, config.num_channels, config.image_size, config.image_size) pred_masks = torch.rand(1, config.num_channels, config.image_size * 2, config.image_size) # seq_len x 2 because the loss concatenates prompt_masks and labels as pred_masks is concatenated bool_masked_pos = torch.rand(1, self.model_tester.seq_length * 2) > 0.5 loss = SegGptLoss(config) loss_value = loss(prompt_masks, pred_masks, label, bool_masked_pos) expected_loss_value = torch.tensor(0.3340) torch.testing.assert_close(loss_value, expected_loss_value, rtol=1e-4, atol=1e-4) @slow def test_model_from_pretrained(self): model_name = "BAAI/seggpt-vit-large" model = SegGptModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] images = [image.convert("RGB") for image in ds["image"]] masks = [image.convert("RGB") for image in ds["mask"]] return images, masks def prepare_bool_masked_pos(config: SegGptConfig): num_patches = math.prod([i // config.patch_size for i in config.image_size]) mask_ratio = 0.75 torch.manual_seed(2) num_masked_patches = int(num_patches * mask_ratio) shuffle_idx = torch.randperm(num_patches) bool_masked_pos = torch.FloatTensor([0] * (num_patches - num_masked_patches) + [1] * num_masked_patches)[ shuffle_idx ] bool_masked_pos = bool_masked_pos.unsqueeze(0).bool() return bool_masked_pos @require_torch @require_vision class SegGptModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return SegGptImageProcessor.from_pretrained("BAAI/seggpt-vit-large") if is_vision_available() else None @slow def test_one_shot_inference(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_image = images[1] prompt_image = images[0] prompt_mask = masks[0] inputs = image_processor( images=input_image, prompt_images=prompt_image, prompt_masks=prompt_mask, return_tensors="pt", do_convert_rgb=False, ) inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 3, 896, 448)) self.assertEqual(outputs.pred_masks.shape, expected_shape) expected_slice = torch.tensor( [ [[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]], [[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]], [[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]], ] ).to(torch_device) torch.testing.assert_close(outputs.pred_masks[0, :, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) result = image_processor.post_process_semantic_segmentation(outputs, [input_image.size[::-1]])[0] result_expected_shape = torch.Size((170, 297)) expected_area = 1082 area = (result > 0).sum().item() self.assertEqual(result.shape, result_expected_shape) self.assertEqual(area, expected_area) @slow def test_few_shot_inference(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_images = [images[1]] * 2 prompt_images = [images[0], images[2]] prompt_masks = [masks[0], masks[2]] inputs = image_processor( images=input_images, prompt_images=prompt_images, prompt_masks=prompt_masks, return_tensors="pt", do_convert_rgb=False, ) inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs, feature_ensemble=True) expected_shape = torch.Size((2, 3, 896, 448)) expected_slice = torch.tensor( [ [[-2.1201, -2.1192, -2.1189], [-2.1217, -2.1210, -2.1204], [-2.1216, -2.1202, -2.1194]], [[-2.0393, -2.0390, -2.0387], [-2.0402, -2.0402, -2.0397], [-2.0400, -2.0394, -2.0388]], [[-1.8083, -1.8076, -1.8077], [-1.8105, -1.8102, -1.8099], [-1.8105, -1.8095, -1.8090]], ] ).to(torch_device) self.assertEqual(outputs.pred_masks.shape, expected_shape) torch.testing.assert_close(outputs.pred_masks[0, :, 448:451, :3], expected_slice, rtol=4e-4, atol=4e-4) @slow def test_one_shot_with_label(self): model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) image_processor = self.default_image_processor images, masks = prepare_img() input_image = images[1] label = masks[1] prompt_image = images[0] prompt_mask = masks[0] inputs = image_processor( images=input_image, prompt_masks=prompt_mask, prompt_images=prompt_image, return_tensors="pt", do_convert_rgb=False, ).to(torch_device) labels = image_processor(images=None, prompt_masks=label, return_tensors="pt", do_convert_rgb=False)[ "prompt_masks" ].to(torch_device) bool_masked_pos = prepare_bool_masked_pos(model.config).to(torch_device) with torch.no_grad(): outputs = model(**inputs, labels=labels, bool_masked_pos=bool_masked_pos) expected_loss = torch.tensor(0.0074).to(torch_device) torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4)
transformers/tests/models/seggpt/test_modeling_seggpt.py/0
{ "file_path": "transformers/tests/models/seggpt/test_modeling_seggpt.py", "repo_id": "transformers", "token_count": 8380 }
# coding=utf-8 # Copyright 2024 BigCode and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Starcoder2 model.""" import unittest import pytest from transformers import Starcoder2Config, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_torch, require_torch_accelerator, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, Starcoder2ForCausalLM, Starcoder2ForSequenceClassification, Starcoder2ForTokenClassification, Starcoder2Model, ) # Copied from transformers.tests.models.mistral.test_modeling_mistral.Starcoder2ModelTester with Mistral->Starcoder2 class Starcoder2ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels # Ignore copy def get_config(self): return Starcoder2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, eos_token_id=self.pad_token_id, bos_token_id=self.pad_token_id, ) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model with Llama->Starcoder2 def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Starcoder2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model_as_decoder with Llama->Starcoder2 def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = Starcoder2Model(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_for_causal_lm with Llama->Starcoder2 def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = Starcoder2ForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_decoder_model_past_large_inputs with Llama->Starcoder2 def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = Starcoder2ForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch # Copied from transformers.tests.models.mistral.test_modeling_mistral.MistralModelTest with Mistral->Starcoder2 class Starcoder2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Starcoder2Model, Starcoder2ForCausalLM, Starcoder2ForSequenceClassification, Starcoder2ForTokenClassification) if is_torch_available() else () ) all_generative_model_classes = (Starcoder2ForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Starcoder2Model, "text-classification": Starcoder2ForSequenceClassification, "token-classification": Starcoder2ForTokenClassification, "text-generation": Starcoder2ForCausalLM, "zero-shot": Starcoder2ForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True def setUp(self): self.model_tester = Starcoder2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Starcoder2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_Starcoder2_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() print(config) config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Starcoder2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Starcoder2_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = Starcoder2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_Starcoder2_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = Starcoder2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_token_classification_model with Llama->Starcoder2,llama->Starcoder2 def test_Starcoder2_token_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) model = Starcoder2ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) @unittest.skip(reason="Starcoder2 buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="Starcoder2 uses GQA on all models so the KV cache is a non standard format") def test_past_key_values_format(self): pass @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="Starcoder2 flash attention does not support right padding") @slow @require_torch_accelerator class Starcoder2IntegrationTest(unittest.TestCase): def test_starcoder2_batched_generation_sdpa(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\[email protected]('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="sdpa" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) def test_starcoder2_batched_generation_eager(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\[email protected]('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="eager" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) @require_flash_attn @pytest.mark.flash_attn_test def test_starcoder2_batched_generation_fa2(self): EXPECTED_TEXT = [ "Hello my name is Younes and I am a student at the University of Liverpool. I am currently studying for my MSc in Computer Science. I am interested in the field of Machine Learning and I am currently working on", "def hello_world():\n\treturn 'Hello World!'\n\[email protected]('/hello/<name>')\ndef hello_name(name):\n\treturn 'Hello %s!' % name\n\n@app", ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="flash_attention_2" ) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) @require_bitsandbytes def test_starcoder2_batched_generation_4bit(self): EXPECTED_TEXT = [ 'Hello my name is Younes and I am a student at the University of Maryland. I am currently working on a project that is related to the topic of "How to make a game". I am currently working on a project', 'def hello_world():\n\treturn "Hello World"\n\[email protected](\'/hello/<name>\')\ndef hello_name(name):\n\treturn "Hello " + name\n\[email protected]', ] model_id = "bigcode/starcoder2-7b" model = Starcoder2ForCausalLM.from_pretrained(model_id, load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello my name is Younes and", "def hello_world():"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text)
transformers/tests/models/starcoder2/test_modeling_starcoder2.py/0
{ "file_path": "transformers/tests/models/starcoder2/test_modeling_starcoder2.py", "repo_id": "transformers", "token_count": 9344 }
# coding=utf-8 # Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch TVP model.""" import unittest from transformers import ResNetConfig, TimmBackboneConfig, TvpConfig from transformers.testing_utils import require_timm, require_torch, require_vision, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import TvpForVideoGrounding, TvpModel if is_vision_available(): from PIL import Image from transformers import TvpImageProcessor # Copied from test.models.videomae.test_modeling_videomae.VideoMAEModelTester with VideoMAE->TVP class TVPModelTester: def __init__( self, parent, batch_size=1, seq_length=2, alpha=1.0, beta=0.1, visual_prompter_type="framepad", visual_prompter_apply="replace", num_frames=2, max_img_size=448, visual_prompt_size=96, vocab_size=100, hidden_size=32, intermediate_size=32, num_hidden_layers=2, num_attention_heads=4, max_position_embeddings=30, max_grid_col_position_embeddings=30, max_grid_row_position_embeddings=30, hidden_dropout_prob=0.1, hidden_act="gelu", layer_norm_eps=1e-12, initializer_range=0.02, pad_token_id=0, type_vocab_size=2, attention_probs_dropout_prob=0.1, ): self.parent = parent self.batch_size = batch_size self.input_id_length = seq_length self.seq_length = seq_length + 10 + 784 # include text prompt length and visual input length self.alpha = alpha self.beta = beta self.visual_prompter_type = visual_prompter_type self.visual_prompter_apply = visual_prompter_apply self.num_frames = num_frames self.max_img_size = max_img_size self.visual_prompt_size = visual_prompt_size self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_grid_col_position_embeddings = max_grid_col_position_embeddings self.max_grid_row_position_embeddings = max_grid_row_position_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.pad_token_id = pad_token_id self.type_vocab_size = type_vocab_size self.is_training = False self.num_channels = 3 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.input_id_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.input_id_length]) pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.max_img_size, self.max_img_size] ) config = self.get_config() return (config, input_ids, pixel_values, attention_mask) def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=64, hidden_sizes=[64, 128], depths=[2, 2], hidden_act="relu", out_features=["stage2"], out_indices=[2], ) return TvpConfig( backbone_config=resnet_config, backbone=None, alpha=self.alpha, beta=self.beta, visual_prompter_type=self.visual_prompter_type, visual_prompter_apply=self.visual_prompter_apply, num_frames=self.num_frames, max_img_size=self.max_img_size, visual_prompt_size=self.visual_prompt_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_grid_col_position_embeddings=self.max_grid_col_position_embeddings, max_grid_row_position_embeddings=self.max_grid_row_position_embeddings, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, type_vocab_size=self.type_vocab_size, ) def create_and_check_model(self, config, input_ids, pixel_values, attention_mask): model = TvpModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, pixel_values, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "pixel_values": pixel_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class TVPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as TVP does not use, inputs_embeds. The seq_length in TVP contain textual and visual inputs, and prompt. """ all_model_classes = (TvpModel, TvpForVideoGrounding) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": TvpModel, "temporal-video-grounding": TvpForVideoGrounding} if is_torch_available() else {} ) # TODO: Enable this once this model gets more usage test_torchscript = False def setUp(self): self.model_tester = TVPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="TVP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TVPModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initilization is different for TVP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # params are randomly initialized. self.assertAlmostEqual( param.data.mean().item(), 0.0, delta=1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_timm def test_backbone_selection(self): def _validate_backbone_init(): for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # Confirm out_indices propogated to backbone if model.__class__.__name__ == "TvpModel": self.assertEqual(len(model.vision_model.backbone.out_indices), 2) elif model.__class__.__name__ == "TvpForVideoGrounding": self.assertEqual(len(model.model.vision_model.backbone.out_indices), 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Force load_backbone path config.is_hybrid = False # We load through configs, as the modeling file assumes config.backbone_config is always set config.use_pretrained_backbone = False config.backbone_kwargs = None # Load a timm backbone # We hack adding hidden_sizes to the config to test the backbone loading backbone_config = TimmBackboneConfig("resnet18", out_indices=[-2, -1], hidden_sizes=[64, 128]) config.backbone_config = backbone_config _validate_backbone_init() # Load a HF backbone backbone_config = ResNetConfig.from_pretrained("facebook/dinov2-small", out_indices=[-2, -1]) config.backbone_config = backbone_config _validate_backbone_init() # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_torch class TvpModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return TvpImageProcessor.from_pretrained("Jiqing/tiny-random-tvp") def test_inference_no_head(self): model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") input_ids = torch.tensor([[1, 2]]) attention_mask = torch.tensor([[1, 1]]) encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) encoding.to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 796, 128)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[-0.4902, -0.4121, -1.7872], [-0.2184, 2.1211, -0.9371], [0.1180, 0.5003, -0.1727]] ).to(torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) def test_inference_with_head(self): model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") input_ids = torch.tensor([[1, 2]]) attention_mask = torch.tensor([[1, 1]]) encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) encoding.to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 2)) assert outputs.logits.shape == expected_shape expected_slice = torch.tensor([[0.5061, 0.4988]]).to(torch_device) torch.testing.assert_close(outputs.logits, expected_slice, rtol=1e-4, atol=1e-4) def test_interpolate_inference_no_head(self): model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) image_processor = self.default_image_processor image = prepare_img() # 480X640 encoding = image_processor( images=image, return_tensors="pt", do_resize=False, do_pad=False, do_center_crop=False ) input_ids = torch.tensor([[1, 2]]) attention_mask = torch.tensor([[1, 1]]) encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) encoding.to(torch_device) with torch.no_grad(): outputs = model(**encoding, interpolate_pos_encoding=True) expected_shape = torch.Size((1, 1212, 128)) assert outputs.last_hidden_state.shape == expected_shape def test_interpolate_inference_with_head(self): model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device) image_processor = self.default_image_processor image = prepare_img() # 480X640 encoding = image_processor( images=image, return_tensors="pt", do_resize=False, do_pad=False, do_center_crop=False ) input_ids = torch.tensor([[1, 2]]) attention_mask = torch.tensor([[1, 1]]) encoding.update({"input_ids": input_ids, "attention_mask": attention_mask}) encoding.to(torch_device) with torch.no_grad(): outputs = model(**encoding, interpolate_pos_encoding=True, output_hidden_states=True) expected_shape = torch.Size((1, 1212, 128)) assert outputs.hidden_states[-1].shape == expected_shape
transformers/tests/models/tvp/test_modeling_tvp.py/0
{ "file_path": "transformers/tests/models/tvp/test_modeling_tvp.py", "repo_id": "transformers", "token_count": 5921 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ViViT model.""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VivitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VivitForVideoClassification, VivitModel if is_vision_available(): from transformers import VivitImageProcessor class VivitModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_labels=True, num_labels=10, image_size=10, num_frames=8, # decreased, because default 32 takes too much RAM at inference tubelet_size=[2, 4, 4], num_channels=3, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu_fast", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, scope=None, attn_implementation="eager", mask_ratio=0.5, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.image_size = image_size self.num_frames = num_frames self.tubelet_size = tubelet_size self.num_channels = num_channels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.scope = scope self.attn_implementation = attn_implementation self.seq_length = ( (self.image_size // self.tubelet_size[2]) * (self.image_size // self.tubelet_size[1]) * (self.num_frames // self.tubelet_size[0]) ) + 1 # CLS token self.mask_ratio = mask_ratio self.num_masks = int(mask_ratio * self.seq_length) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): config = VivitConfig( num_frames=self.num_frames, image_size=self.image_size, tubelet_size=self.tubelet_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, qkv_bias=self.qkv_bias, attn_implementation=self.attn_implementation, ) config.num_labels = self.num_labels return config def create_and_check_model(self, config, pixel_values, labels): model = VivitModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_video_classification(self, config, pixel_values, labels): model = VivitForVideoClassification(config) model.to(torch_device) model.eval() result = model(pixel_values) # verify the logits shape expected_shape = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape, expected_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VivitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Vivit does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VivitModel, VivitForVideoClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": VivitModel, "video-classification": VivitForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VivitModelTester(self) self.config_tester = ConfigTester(self, config_class=VivitConfig, has_text_modality=False, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Vivit does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "head_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_video_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "google/vivit-b-16x2-kinetics400" model = VivitModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: seq_len = self.model_tester.seq_length inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class VivitModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return VivitImageProcessor() if is_vision_available() else None @slow def test_inference_for_video_classification(self): model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) # taken from original model expected_slice = torch.tensor([-0.9498, 2.7971, -1.4049, 0.1024, -1.8353]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :5], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_interpolate_pos_encoding(self): # Vivit models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device) image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400") video = prepare_video() inputs = image_processor( video, size={"shortest_edge": 480}, crop_size={"height": 232, "width": 232}, return_tensors="pt" ) pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) # verify the logits shape expected_shape = torch.Size((1, 3137, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
transformers/tests/models/vivit/test_modeling_vivit.py/0
{ "file_path": "transformers/tests/models/vivit/test_modeling_vivit.py", "repo_id": "transformers", "token_count": 6340 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoFeatureExtractor, AutoProcessor from transformers.models.wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wav2vec2.test_feature_extraction_wav2vec2 import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm import Wav2Vec2DecoderWithLMOutput if is_torch_available(): from transformers import Wav2Vec2ForCTC @require_pyctcdecode class Wav2Vec2ProcessorWithLMTest(unittest.TestCase): def setUp(self): vocab = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.add_kwargs_tokens_map = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "return_attention_mask": False, "do_normalize": True, } self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") # load decoder from hub self.decoder_name = "hf-internal-testing/ngram-beam-search-decoder" def get_tokenizer(self, **kwargs_init): kwargs = self.add_kwargs_tokens_map.copy() kwargs.update(kwargs_init) return Wav2Vec2CTCTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return Wav2Vec2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def get_decoder(self, **kwargs): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) processor.save_pretrained(self.tmpdirname) processor = Wav2Vec2ProcessorWithLM.from_pretrained(self.tmpdirname) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, BeamSearchDecoderCTC) def test_save_load_pretrained_additional_features(self): processor = Wav2Vec2ProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname) # make sure that error is thrown when decoder alphabet doesn't match processor = Wav2Vec2ProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0) self.assertEqual(processor.language_model.beta, 3.0) self.assertEqual(processor.language_model.score_boundary, -7.0) self.assertEqual(processor.language_model.unk_score_offset, 3) def test_load_decoder_tokenizer_mismatch_content(self): tokenizer = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"]) with self.assertRaisesRegex(ValueError, "include"): Wav2Vec2ProcessorWithLM( tokenizer=tokenizer, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_another_feature_extractor(self): feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0") tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) def test_wrong_feature_extractor_raises_error(self): feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-large-v3") tokenizer = self.get_tokenizer() decoder = self.get_decoder() with self.assertRaises(ValueError): Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def _get_dummy_logits(self, shape=(2, 10, 16), seed=77): np.random.seed(seed) return np.random.rand(*shape) def test_decoder(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits(shape=(10, 16), seed=13) decoded_processor = processor.decode(logits) decoded_decoder = decoder.decode_beams(logits)[0] self.assertEqual(decoded_decoder[0], decoded_processor.text) self.assertEqual("</s> <s> </s>", decoded_processor.text) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score) @parameterized.expand([[None], ["fork"], ["spawn"]]) def test_decoder_batch(self, pool_context): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: decoded_processor = processor.batch_decode(logits) else: with get_context(pool_context).Pool() as pool: decoded_processor = processor.batch_decode(logits, pool) logits_list = list(logits) with get_context("fork").Pool() as p: decoded_beams = decoder.decode_beams_batch(p, logits_list) texts_decoder, logit_scores_decoder, lm_scores_decoder = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0]) logit_scores_decoder.append(beams[0][-2]) lm_scores_decoder.append(beams[0][-1]) self.assertListEqual(texts_decoder, decoded_processor.text) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"], decoded_processor.text) self.assertListEqual(logit_scores_decoder, decoded_processor.logit_score) self.assertListEqual(lm_scores_decoder, decoded_processor.lm_score) def test_decoder_with_params(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() beam_width = 15 beam_prune_logp = -20.0 token_min_logp = -4.0 decoded_processor_out = processor.batch_decode( logits, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, ) decoded_processor = decoded_processor_out.text logits_list = list(logits) with get_context("fork").Pool() as pool: decoded_decoder_out = decoder.decode_beams_batch( pool, logits_list, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, ) decoded_decoder = [d[0][0] for d in decoded_decoder_out] logit_scores = [d[0][2] for d in decoded_decoder_out] lm_scores = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(decoded_decoder, decoded_processor) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"], decoded_processor) self.assertTrue(np.array_equal(logit_scores, decoded_processor_out.logit_score)) self.assertTrue(np.allclose([-20.054, -18.447], logit_scores, atol=1e-3)) self.assertTrue(np.array_equal(lm_scores, decoded_processor_out.lm_score)) self.assertTrue(np.allclose([-15.554, -13.9474], lm_scores, atol=1e-3)) def test_decoder_with_params_of_lm(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) logits = self._get_dummy_logits() alpha = 2.0 beta = 5.0 unk_score_offset = -20.0 lm_score_boundary = True decoded_processor_out = processor.batch_decode( logits, alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary, ) decoded_processor = decoded_processor_out.text logits_list = list(logits) decoder.reset_params( alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary, ) with get_context("fork").Pool() as pool: decoded_decoder_out = decoder.decode_beams_batch( pool, logits_list, ) decoded_decoder = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(decoded_decoder, decoded_processor) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"], decoded_processor) lm_model = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0) self.assertEqual(lm_model.beta, 5.0) self.assertEqual(lm_model.unk_score_offset, -20.0) self.assertEqual(lm_model.score_boundary, True) def test_decoder_download_ignores_files(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") language_model = processor.decoder.model_container[processor.decoder._model_key] path_to_cached_dir = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute() downloaded_decoder_files = os.listdir(path_to_cached_dir) expected_decoder_files = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(downloaded_decoder_files, expected_decoder_files) def test_decoder_local_files(self): local_dir = snapshot_download("hf-internal-testing/processor_with_lm") processor = Wav2Vec2ProcessorWithLM.from_pretrained(local_dir) language_model = processor.decoder.model_container[processor.decoder._model_key] path_to_cached_dir = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute() local_decoder_files = os.listdir(local_dir) expected_decoder_files = os.listdir(path_to_cached_dir) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(local_decoder_files, expected_decoder_files) def test_processor_from_auto_processor(self): processor_wav2vec2 = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") processor_auto = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm") raw_speech = floats_list((3, 1000)) input_wav2vec2 = processor_wav2vec2(raw_speech, return_tensors="np") input_auto = processor_auto(raw_speech, return_tensors="np") for key in input_wav2vec2.keys(): self.assertAlmostEqual(input_wav2vec2[key].sum(), input_auto[key].sum(), delta=1e-2) logits = self._get_dummy_logits() decoded_wav2vec2 = processor_wav2vec2.batch_decode(logits) decoded_auto = processor_auto.batch_decode(logits) self.assertListEqual(decoded_wav2vec2.text, decoded_auto.text) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() decoder = self.get_decoder() processor = Wav2Vec2ProcessorWithLM(tokenizer=tokenizer, feature_extractor=feature_extractor, decoder=decoder) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) @staticmethod def get_from_offsets(offsets, key): retrieved_list = [d[key] for d in offsets] return retrieved_list def test_offsets_integration_fast(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") logits = self._get_dummy_logits()[0] outputs = processor.decode(logits, output_word_offsets=True) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys()), 4) self.assertTrue("text" in outputs) self.assertTrue("word_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2DecoderWithLMOutput)) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"], "word")), outputs.text) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "word"), ["<s>", "<s>", "</s>"]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "start_offset"), [0, 2, 4]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"], "end_offset"), [1, 3, 5]) def test_offsets_integration_fast_batch(self): processor = Wav2Vec2ProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm") logits = self._get_dummy_logits() outputs = processor.batch_decode(logits, output_word_offsets=True) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys()), 4) self.assertTrue("text" in outputs) self.assertTrue("word_offsets" in outputs) self.assertTrue(isinstance(outputs, Wav2Vec2DecoderWithLMOutput)) self.assertListEqual( [" ".join(self.get_from_offsets(o, "word")) for o in outputs["word_offsets"]], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "word"), ["<s>", "<s>", "</s>"]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "start_offset"), [0, 2, 4]) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0], "end_offset"), [1, 3, 5]) @slow @require_torch @require_torchaudio def test_word_time_stamp_integration(self): import torch ds = load_dataset( "mozilla-foundation/common_voice_11_0", "en", split="train", streaming=True, trust_remote_code=True ) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) ds_iter = iter(ds) sample = next(ds_iter) processor = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm") input_values = processor(sample["audio"]["array"], return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values).logits.cpu().numpy() output = processor.decode(logits[0], output_word_offsets=True) time_offset = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate word_time_stamps = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] EXPECTED_TEXT = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" EXPECTED_TEXT = "THE TRACK APPEARS ON THE COMPILATION ALBUM CRAFT FORKS" # output words self.assertEqual(" ".join(self.get_from_offsets(word_time_stamps, "word")), EXPECTED_TEXT) self.assertEqual(" ".join(self.get_from_offsets(word_time_stamps, "word")), output.text) # output times start_times = torch.tensor(self.get_from_offsets(word_time_stamps, "start_time")) end_times = torch.tensor(self.get_from_offsets(word_time_stamps, "end_time")) # fmt: off expected_start_tensor = torch.tensor([0.6800, 0.8800, 1.1800, 1.8600, 1.9600, 2.1000, 3.0000, 3.5600, 3.9800]) expected_end_tensor = torch.tensor([0.7800, 1.1000, 1.6600, 1.9200, 2.0400, 2.8000, 3.3000, 3.8800, 4.2800]) # fmt: on torch.testing.assert_close(start_times, expected_start_tensor, rtol=0.01, atol=0.01) torch.testing.assert_close(end_times, expected_end_tensor, rtol=0.01, atol=0.01)
transformers/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py/0
{ "file_path": "transformers/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py", "repo_id": "transformers", "token_count": 9350 }
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class XGLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/xglm-564M" tokenizer_class = XGLMTokenizer rust_tokenizer_class = XGLMTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(len(vocab_keys), 1_008) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_008) def test_full_tokenizer(self): tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return XGLMTokenizer.from_pretrained("facebook/xglm-564M") def test_picklable_without_disk(self): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SAMPLE_VOCAB, f.name) tokenizer = XGLMTokenizer(f.name, keep_accents=True) pickled_tokenizer = pickle.dumps(tokenizer) pickle.loads(pickled_tokenizer) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [2, 31227, 4447, 35] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) original_tokenizer_encodings = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: skip self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = { 'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="facebook/xglm-564M", padding=False, )
transformers/tests/models/xglm/test_tokenization_xglm.py/0
{ "file_path": "transformers/tests/models/xglm/test_tokenization_xglm.py", "repo_id": "transformers", "token_count": 4238 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class OptimizationFTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def testGradientAccumulator(self): accumulator = GradientAccumulator() accumulator([tf.constant([1.0, 2.0])]) accumulator([tf.constant([-2.0, 1.0])]) accumulator([tf.constant([-1.0, 2.0])]) with self.assertRaises(ValueError): accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])]) self.assertEqual(accumulator.step, 3) self.assertEqual(len(accumulator.gradients), 1) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2) accumulator.reset() self.assertEqual(accumulator.step, 0) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2) def testGradientAccumulatorDistributionStrategy(self): context._context = None ops.enable_eager_execution_internal() physical_devices = tf.config.list_physical_devices("CPU") if len(physical_devices) == 1: tf.config.set_logical_device_configuration( physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) devices = tf.config.list_logical_devices(device_type="CPU") strategy = tf.distribute.MirroredStrategy(devices=devices[:2]) with strategy.scope(): accumulator = GradientAccumulator() variable = tf.Variable([4.0, 3.0]) optimizer, _ = create_optimizer(5e-5, 10, 5) gradient_placeholder = tf.Variable([0.0, 0.0], trainable=False) def accumulate_on_replica(gradient): accumulator([gradient]) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients, [variable]))) @tf.function def accumulate(grad1, grad2): with strategy.scope(): local_variables = strategy.experimental_local_results(gradient_placeholder) local_variables[0].assign(grad1) local_variables[1].assign(grad2) strategy.run(accumulate_on_replica, args=(gradient_placeholder,)) @tf.function def apply_grad(): with strategy.scope(): strategy.run(apply_on_replica) def _check_local_values(grad1, grad2): values = strategy.experimental_local_results(accumulator._gradients[0]) self.assertListAlmostEqual(values[0].value(), grad1, tol=1e-2) self.assertListAlmostEqual(values[1].value(), grad2, tol=1e-2) accumulate([1.0, 2.0], [-1.0, 1.0]) accumulate([3.0, -1.0], [-1.0, -1.0]) accumulate([-2.0, 2.0], [3.0, -2.0]) self.assertEqual(accumulator.step, 3) _check_local_values([2.0, 3.0], [1.0, -2.0]) apply_grad() self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2) accumulator.reset() self.assertEqual(accumulator.step, 0) _check_local_values([0.0, 0.0], [0.0, 0.0])
transformers/tests/optimization/test_optimization_tf.py/0
{ "file_path": "transformers/tests/optimization/test_optimization_tf.py", "repo_id": "transformers", "token_count": 1782 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict import numpy as np from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"hash": hashimage(mask), "shape": shape} @is_pipeline_test @require_vision @require_torch class MaskGenerationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) tf_model_mapping = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): image_segmenter = MaskGenerationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] @unittest.skip(reason="TODO @Arthur: Implement me") def run_pipeline_test(self, mask_generator, examples): pass @require_tf @unittest.skip(reason="Image segmentation not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_small_model_pt(self): image_segmenter = pipeline("mask-generation", model="facebook/sam-vit-huge") outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871} ], ) # fmt: on @require_torch @slow def test_threshold(self): model_id = "facebook/sam-vit-huge" image_segmenter = pipeline("mask-generation", model=model_id) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256 ) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053}, ], )
transformers/tests/pipelines/test_pipelines_mask_generation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_mask_generation.py", "repo_id": "transformers", "token_count": 3521 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, ZeroShotObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_torch class ZeroShotObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): object_detector = ZeroShotObjectDetectionPipeline( model=model, processor=processor, tokenizer=tokenizer, image_processor=image_processor, torch_dtype=torch_dtype, ) examples = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def run_pipeline_test(self, object_detector, examples): outputs = object_detector(examples[0].get("image"), examples[0].get("candidate_labels"), threshold=0.0) n = len(outputs) self.assertGreater(n, 0) self.assertEqual( outputs, [ { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, } for i in range(n) ], ) @require_tf @unittest.skip(reason="Zero Shot Object Detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) outputs = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png", candidate_labels=["cat", "remote", "couch"], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ], ) outputs = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ], ) @require_torch @slow def test_large_model_pt(self): object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ) outputs = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ], ) @require_tf @unittest.skip(reason="Zero Shot Object Detection not implemented in TF") def test_large_model_tf(self): pass @require_torch @slow def test_threshold(self): threshold = 0.2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], threshold=threshold, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ], ) @require_torch @slow def test_top_k(self): top_k = 2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], top_k=top_k, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ], )
transformers/tests/pipelines/test_pipelines_zero_shot_object_detection.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_zero_shot_object_detection.py", "repo_id": "transformers", "token_count": 5255 }
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, EetqConfig, OPTForCausalLM from transformers.testing_utils import ( require_accelerate, require_eetq, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class EetqConfigTest(unittest.TestCase): def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = EetqConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"modules_to_not_convert": ["lm_head.weight"], "quant_method": "eetq", "weights": "int8"} quantization_config = EetqConfig.from_dict(dict) self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert) self.assertEqual(dict["quant_method"], quantization_config.quant_method) self.assertEqual(dict["weights"], quantization_config.weights) @slow @require_torch_gpu @require_eetq @require_accelerate class EetqTest(unittest.TestCase): model_name = "facebook/opt-350m" input_text = "What are we having for dinner?" max_new_tokens = 9 EXPECTED_OUTPUT = "What are we having for dinner?\nI'm having a steak and a salad" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ quantization_config = EetqConfig(weights="int8") cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, quantization_config=quantization_config ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from eetq import EetqLinear from transformers.integrations import replace_with_eetq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = EetqConfig(weights="int8") with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model = replace_with_eetq_linear(model, quantization_config=quantization_config) nb_eetq_linear = 0 for module in model.modules(): if isinstance(module, EetqLinear): nb_eetq_linear += 1 self.assertEqual(nb_linears - 1, nb_eetq_linear) # Try with `modules_to_not_convert` with init_empty_weights(): model = OPTForCausalLM(config) quantization_config = EetqConfig(modules_to_not_convert=["fc1"]) model = replace_with_eetq_linear(model, quantization_config=quantization_config) nb_eetq_linear = 0 for module in model.modules(): if isinstance(module, EetqLinear): nb_eetq_linear += 1 # 25 corresponds to the lm_head along with 24 fc1 layers. self.assertEqual(nb_linears - 25, nb_eetq_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUS """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = EetqConfig() quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map="auto", quantization_config=quantization_config ) self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
transformers/tests/quantization/eetq_integration/test_eetq.py/0
{ "file_path": "transformers/tests/quantization/eetq_integration/test_eetq.py", "repo_id": "transformers", "token_count": 2636 }
import os import sys import unittest ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(ROOT_DIR, "utils")) import create_dependency_mapping # noqa: E402 # This is equivalent to `all` in the current library state (as of 09/01/2025) MODEL_ROOT = os.path.join("src", "transformers", "models") FILES_TO_PARSE = [ os.path.join(MODEL_ROOT, "starcoder2", "modular_starcoder2.py"), os.path.join(MODEL_ROOT, "gemma", "modular_gemma.py"), os.path.join(MODEL_ROOT, "olmo2", "modular_olmo2.py"), os.path.join(MODEL_ROOT, "diffllama", "modular_diffllama.py"), os.path.join(MODEL_ROOT, "granite", "modular_granite.py"), os.path.join(MODEL_ROOT, "gemma2", "modular_gemma2.py"), os.path.join(MODEL_ROOT, "mixtral", "modular_mixtral.py"), os.path.join(MODEL_ROOT, "olmo", "modular_olmo.py"), os.path.join(MODEL_ROOT, "rt_detr", "modular_rt_detr.py"), os.path.join(MODEL_ROOT, "qwen2", "modular_qwen2.py"), os.path.join(MODEL_ROOT, "llava_next_video", "modular_llava_next_video.py"), os.path.join(MODEL_ROOT, "cohere2", "modular_cohere2.py"), os.path.join(MODEL_ROOT, "modernbert", "modular_modernbert.py"), os.path.join(MODEL_ROOT, "colpali", "modular_colpali.py"), os.path.join(MODEL_ROOT, "deformable_detr", "modular_deformable_detr.py"), os.path.join(MODEL_ROOT, "aria", "modular_aria.py"), os.path.join(MODEL_ROOT, "ijepa", "modular_ijepa.py"), os.path.join(MODEL_ROOT, "bamba", "modular_bamba.py"), os.path.join(MODEL_ROOT, "dinov2_with_registers", "modular_dinov2_with_registers.py"), os.path.join(MODEL_ROOT, "instructblipvideo", "modular_instructblipvideo.py"), os.path.join(MODEL_ROOT, "glm", "modular_glm.py"), os.path.join(MODEL_ROOT, "phi", "modular_phi.py"), os.path.join(MODEL_ROOT, "mistral", "modular_mistral.py"), os.path.join(MODEL_ROOT, "phi3", "modular_phi3.py"), os.path.join(MODEL_ROOT, "cohere", "modular_cohere.py"), ] def appear_after(model1: str, model2: str, priority_list: list[str]) -> bool: """Return True if `model1` appear after `model2` in `priority_list`.""" return priority_list.index(model1) > priority_list.index(model2) class ConversionOrderTest(unittest.TestCase): def test_conversion_order(self): # Find the order priority_list = create_dependency_mapping.find_priority_list(FILES_TO_PARSE) # Extract just the model names model_priority_list = [file.rsplit("modular_")[-1].replace(".py", "") for file in priority_list] # These are based on what the current library order should be (as of 09/01/2025) self.assertTrue(appear_after("mixtral", "mistral", model_priority_list)) self.assertTrue(appear_after("gemma2", "gemma", model_priority_list)) self.assertTrue(appear_after("starcoder2", "mistral", model_priority_list)) self.assertTrue(appear_after("olmo2", "olmo", model_priority_list)) self.assertTrue(appear_after("diffllama", "mistral", model_priority_list)) self.assertTrue(appear_after("cohere2", "gemma2", model_priority_list)) self.assertTrue(appear_after("cohere2", "cohere", model_priority_list)) self.assertTrue(appear_after("phi3", "mistral", model_priority_list))
transformers/tests/repo_utils/modular/test_conversion_order.py/0
{ "file_path": "transformers/tests/repo_utils/modular/test_conversion_order.py", "repo_id": "transformers", "token_count": 1422 }
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "FacebookAI/roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "FacebookAI/roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): # configuration for running training on smdistributed Model Parallel mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(1,)]) def test_scripz(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
transformers/tests/sagemaker/test_multi_node_model_parallel.py/0
{ "file_path": "transformers/tests/sagemaker/test_multi_node_model_parallel.py", "repo_id": "transformers", "token_count": 2103 }
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ isort:skip_file """ import os import pickle import tempfile import unittest from typing import Callable, Optional import numpy as np from transformers import ( BatchEncoding, BertTokenizer, BertTokenizerFast, LlamaTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, TensorType, TokenSpan, is_tokenizers_available, ) from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import ( CaptureStderr, require_flax, require_sentencepiece, require_tf, require_tokenizers, require_torch, slow, ) if is_tokenizers_available(): import tokenizers from tokenizers import Tokenizer from tokenizers.models import WordPiece class TokenizerUtilsTest(unittest.TestCase): def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: tokenizer = tokenizer_class.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, tokenizer_class) self.assertIsInstance(tokenizer, PreTrainedTokenizer) for special_tok in tokenizer.all_special_tokens: self.assertIsInstance(special_tok, str) special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None): batch_encoding_str = pickle.dumps(be_original) self.assertIsNotNone(batch_encoding_str) be_restored = pickle.loads(batch_encoding_str) # Ensure is_fast is correctly restored self.assertEqual(be_restored.is_fast, be_original.is_fast) # Ensure encodings are potentially correctly restored if be_original.is_fast: self.assertIsNotNone(be_restored.encodings) else: self.assertIsNone(be_restored.encodings) # Ensure the keys are the same for original_v, restored_v in zip(be_original.values(), be_restored.values()): if equal_op: self.assertTrue(equal_op(restored_v, original_v)) else: self.assertEqual(restored_v, original_v) @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) def test_tensor_type_from_str(self): self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW) self.assertEqual(TensorType("pt"), TensorType.PYTORCH) self.assertEqual(TensorType("np"), TensorType.NUMPY) @require_tokenizers def test_batch_encoding_pickle(self): import numpy as np tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") # Python no tensor with self.subTest("BatchEncoding (Python, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_p("Small example to encode")) with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) with self.subTest("BatchEncoding (Rust, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_r("Small example to encode")) with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) @require_tf @require_tokenizers def test_batch_encoding_pickle_tf(self): import tensorflow as tf def tf_array_equals(t1, t2): return tf.reduce_all(tf.equal(t1, t2)) tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) @require_torch @require_tokenizers def test_batch_encoding_pickle_pt(self): import torch tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) @require_tokenizers def test_batch_encoding_is_fast(self): tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") with self.subTest("Python Tokenizer"): self.assertFalse(tokenizer_p("Small example to_encode").is_fast) with self.subTest("Rust Tokenizer"): self.assertTrue(tokenizer_r("Small example to_encode").is_fast) @require_tokenizers def test_batch_encoding_word_to_tokens(self): tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased") encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True) self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2)) self.assertEqual(encoded.word_to_tokens(1), None) self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3)) def test_batch_encoding_with_labels(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_torch def test_batch_encoding_with_labels_pt(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_tf def test_batch_encoding_with_labels_tf(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="tf", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_flax def test_batch_encoding_with_labels_jax(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="jax", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) def test_padding_accepts_tensors(self): features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="np") self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_decoding_single_token(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast]: with self.subTest(f"{tokenizer_class}"): tokenizer = tokenizer_class.from_pretrained("google-bert/bert-base-cased") token_id = 2300 decoded_flat = tokenizer.decode(token_id) decoded_list = tokenizer.decode([token_id]) self.assertEqual(decoded_flat, "Force") self.assertEqual(decoded_list, "Force") token_id = 0 decoded_flat = tokenizer.decode(token_id) decoded_list = tokenizer.decode([token_id]) self.assertEqual(decoded_flat, "[PAD]") self.assertEqual(decoded_list, "[PAD]") last_item_id = tokenizer.vocab_size - 1 decoded_flat = tokenizer.decode(last_item_id) decoded_list = tokenizer.decode([last_item_id]) self.assertEqual(decoded_flat, "##:") self.assertEqual(decoded_list, "##:") def test_extra_special_tokens_multimodal(self): special_tokens_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] llama_tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b") llama_tokenizer.extra_special_tokens = { "boi_token": "<image_start>", "eoi_token": "<image_end>", "image_token": "<image>", } self.assertListEqual(llama_tokenizer.SPECIAL_TOKENS_ATTRIBUTES, special_tokens_list) with tempfile.TemporaryDirectory() as tmpdirname: llama_tokenizer.save_pretrained(tmpdirname) # load back and check we have extra special tokens set loaded_tokenizer = LlamaTokenizerFast.from_pretrained(tmpdirname) multimodal_special_tokens_list = special_tokens_list + ["boi_token", "eoi_token", "image_token"] self.assertListEqual(loaded_tokenizer.SPECIAL_TOKENS_ATTRIBUTES, multimodal_special_tokens_list) # We set an image_token_id before, so we can get an "image_token" as str that matches the id self.assertTrue(loaded_tokenizer.image_token == "<image>") self.assertTrue(loaded_tokenizer.image_token_id == loaded_tokenizer.convert_tokens_to_ids("<image>")) # save one more time and make sure the image token can get loaded back with tempfile.TemporaryDirectory() as tmpdirname: loaded_tokenizer.save_pretrained(tmpdirname) loaded_tokenizer_with_extra_tokens = LlamaTokenizerFast.from_pretrained(tmpdirname) self.assertTrue(loaded_tokenizer_with_extra_tokens.image_token == "<image>") # test that we can also indicate extra tokens during load time extra_special_tokens = { "boi_token": "<image_start>", "eoi_token": "<image_end>", "image_token": "<image>", } tokenizer = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b", extra_special_tokens=extra_special_tokens ) self.assertTrue(tokenizer.image_token == "<image>") self.assertTrue(tokenizer.image_token_id == loaded_tokenizer.convert_tokens_to_ids("<image>")) @require_tokenizers def test_decoding_skip_special_tokens(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast]: with self.subTest(f"{tokenizer_class}"): tokenizer = tokenizer_class.from_pretrained("google-bert/bert-base-cased") tokenizer.add_tokens(["ஐ"], special_tokens=True) # test special token with other tokens, skip the special tokens sentence = "This is a beautiful flower ஐ" ids = tokenizer(sentence)["input_ids"] decoded_sent = tokenizer.decode(ids, skip_special_tokens=True) self.assertEqual(decoded_sent, "This is a beautiful flower") # test special token with other tokens, do not skip the special tokens ids = tokenizer(sentence)["input_ids"] decoded_sent = tokenizer.decode(ids, skip_special_tokens=False) self.assertEqual(decoded_sent, "[CLS] This is a beautiful flower ஐ [SEP]") # test special token stand alone, skip the special tokens sentence = "ஐ" ids = tokenizer(sentence)["input_ids"] decoded_sent = tokenizer.decode(ids, skip_special_tokens=True) self.assertEqual(decoded_sent, "") # test special token stand alone, do not skip the special tokens ids = tokenizer(sentence)["input_ids"] decoded_sent = tokenizer.decode(ids, skip_special_tokens=False) self.assertEqual(decoded_sent, "[CLS] ஐ [SEP]") # test single special token alone, skip pad_id = 0 decoded_sent = tokenizer.decode(pad_id, skip_special_tokens=True) self.assertEqual(decoded_sent, "") # test single special token alone, do not skip decoded_sent = tokenizer.decode(pad_id, skip_special_tokens=False) self.assertEqual(decoded_sent, "[PAD]") @require_torch def test_padding_accepts_tensors_pt(self): import torch features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="pt") self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tf def test_padding_accepts_tensors_tf(self): import tensorflow as tf features = [{"input_ids": tf.constant([0, 1, 2])}, {"input_ids": tf.constant([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="tf") self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_instantiation_from_tokenizers(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer) @require_tokenizers def test_instantiation_from_tokenizers_json_file(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) with tempfile.TemporaryDirectory() as tmpdirname: bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json")) PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json")) def test_len_tokenizer(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast]: with self.subTest(f"{tokenizer_class}"): tokenizer = tokenizer_class.from_pretrained("bert-base-uncased") added_tokens_size = len(tokenizer.added_tokens_decoder) self.assertEqual(len(tokenizer), tokenizer.vocab_size) tokenizer.add_tokens(["<test_token>"]) self.assertEqual(len(tokenizer), tokenizer.vocab_size + 1) self.assertEqual(len(tokenizer.added_tokens_decoder), added_tokens_size + 1) self.assertEqual(len(tokenizer.added_tokens_encoder), added_tokens_size + 1) @require_sentencepiece def test_sentencepiece_cohabitation(self): from sentencepiece import sentencepiece_model_pb2 as _original_protobuf # noqa: F401 from transformers.convert_slow_tokenizer import import_protobuf # noqa: F401 # Now this will try to import sentencepiece_model_pb2_new.py. This should not fail even if the protobuf # was already imported. import_protobuf() def test_training_new_tokenizer_edge_cases(self): _tokenizer = Tokenizer(tokenizers.models.BPE(vocab={"a": 1, "b": 2, "ab": 3}, merges=[("a", "b")])) _tokenizer.pre_tokenizer = None tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer) toy_text_iterator = ("a" for _ in range(1000)) tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) _tokenizer.normalizer = None tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer) toy_text_iterator = ("a" for _ in range(1000)) tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) _tokenizer.post_processor = None tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer) toy_text_iterator = ("a" for _ in range(1000)) tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
transformers/tests/tokenization/test_tokenization_utils.py/0
{ "file_path": "transformers/tests/tokenization/test_tokenization_utils.py", "repo_id": "transformers", "token_count": 9216 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class TestActivations(unittest.TestCase): def test_gelu_versions(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") torch.testing.assert_close(gelu_python(x), torch_builtin(x)) self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x))) def test_gelu_10(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") gelu10 = get_activation("gelu_10") y_gelu = torch_builtin(x) y_gelu_10 = gelu10(x) clipped_mask = torch.where(y_gelu_10 < 10.0, 1, 0) self.assertTrue(torch.max(y_gelu_10).item() == 10.0) torch.testing.assert_close(y_gelu * clipped_mask, y_gelu_10 * clipped_mask) def test_get_activation(self): get_activation("gelu") get_activation("gelu_10") get_activation("gelu_fast") get_activation("gelu_new") get_activation("gelu_python") get_activation("gelu_pytorch_tanh") get_activation("linear") get_activation("mish") get_activation("quick_gelu") get_activation("relu") get_activation("sigmoid") get_activation("silu") get_activation("swish") get_activation("tanh") with self.assertRaises(KeyError): get_activation("bogus") with self.assertRaises(KeyError): get_activation(None) def test_activations_are_distinct_objects(self): act1 = get_activation("gelu") act1.a = 1 act2 = get_activation("gelu") self.assertEqual(act1.a, 1) with self.assertRaises(AttributeError): _ = act2.a
transformers/tests/utils/test_activations.py/0
{ "file_path": "transformers/tests/utils/test_activations.py", "repo_id": "transformers", "token_count": 1055 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import Dict, List, Literal, Optional, Union, get_args, get_origin import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool from transformers.testing_utils import require_torch from transformers.training_args import _VALID_DICT_FIELDS # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 is_python_no_less_than_3_10 = sys.version_info >= (3, 10) def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata) @dataclass class BasicExample: foo: int bar: float baz: str flag: bool @dataclass class WithDefaultExample: foo: int = 42 baz: str = field(default="toto", metadata={"help": "help message"}) @dataclass class WithDefaultBoolExample: foo: bool = False baz: bool = True opt: Optional[bool] = None class BasicEnum(Enum): titi = "titi" toto = "toto" class MixedTypeEnum(Enum): titi = "titi" toto = "toto" fourtytwo = 42 @dataclass class EnumExample: foo: BasicEnum = "toto" def __post_init__(self): self.foo = BasicEnum(self.foo) @dataclass class MixedTypeEnumExample: foo: MixedTypeEnum = "toto" def __post_init__(self): self.foo = MixedTypeEnum(self.foo) @dataclass class OptionalExample: foo: Optional[int] = None bar: Optional[float] = field(default=None, metadata={"help": "help message"}) baz: Optional[str] = None ces: Optional[List[str]] = list_field(default=[]) des: Optional[List[int]] = list_field(default=[]) @dataclass class ListExample: foo_int: List[int] = list_field(default=[]) bar_int: List[int] = list_field(default=[1, 2, 3]) foo_str: List[str] = list_field(default=["Hallo", "Bonjour", "Hello"]) foo_float: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class RequiredExample: required_list: List[int] = field() required_str: str = field() required_enum: BasicEnum = field() def __post_init__(self): self.required_enum = BasicEnum(self.required_enum) @dataclass class StringLiteralAnnotationExample: foo: int required_enum: "BasicEnum" = field() opt: "Optional[bool]" = None baz: "str" = field(default="toto", metadata={"help": "help message"}) foo_str: "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"]) if is_python_no_less_than_3_10: @dataclass class WithDefaultBoolExamplePep604: foo: bool = False baz: bool = True opt: bool | None = None @dataclass class OptionalExamplePep604: foo: int | None = None bar: float | None = field(default=None, metadata={"help": "help message"}) baz: str | None = None ces: list[str] | None = list_field(default=[]) des: list[int] | None = list_field(default=[]) class HfArgumentParserTest(unittest.TestCase): def argparsersEqual(self, a: argparse.ArgumentParser, b: argparse.ArgumentParser): """ Small helper to check pseudo-equality of parsed arguments on `ArgumentParser` instances. """ self.assertEqual(len(a._actions), len(b._actions)) for x, y in zip(a._actions, b._actions): xx = {k: v for k, v in vars(x).items() if k != "container"} yy = {k: v for k, v in vars(y).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices", None) and yy.get("choices", None): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](expected_choice), yy["type"](expected_choice)) del xx["type"], yy["type"] self.assertEqual(xx, yy) def test_basic(self): parser = HfArgumentParser(BasicExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", type=int, required=True) expected.add_argument("--bar", type=float, required=True) expected.add_argument("--baz", type=str, required=True) expected.add_argument("--flag", type=string_to_bool, default=False, const=True, nargs="?") self.argparsersEqual(parser, expected) args = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] (example,) = parser.parse_args_into_dataclasses(args, look_for_args_file=False) self.assertFalse(example.flag) def test_with_default(self): parser = HfArgumentParser(WithDefaultExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", default=42, type=int) expected.add_argument("--baz", default="toto", type=str, help="help message") self.argparsersEqual(parser, expected) def test_with_default_bool(self): expected = argparse.ArgumentParser() expected.add_argument("--foo", type=string_to_bool, default=False, const=True, nargs="?") expected.add_argument("--baz", type=string_to_bool, default=True, const=True, nargs="?") # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz", "--no-baz", action="store_false", default=False, dest="baz") expected.add_argument("--opt", type=string_to_bool, default=None) dataclass_types = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(WithDefaultBoolExamplePep604) for dataclass_type in dataclass_types: parser = HfArgumentParser(dataclass_type) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args, Namespace(foo=False, baz=True, opt=None)) args = parser.parse_args(["--foo", "--no_baz"]) self.assertEqual(args, Namespace(foo=True, baz=False, opt=None)) args = parser.parse_args(["--foo", "--no-baz"]) self.assertEqual(args, Namespace(foo=True, baz=False, opt=None)) args = parser.parse_args(["--foo", "--baz"]) self.assertEqual(args, Namespace(foo=True, baz=True, opt=None)) args = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"]) self.assertEqual(args, Namespace(foo=True, baz=True, opt=True)) args = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"]) self.assertEqual(args, Namespace(foo=False, baz=False, opt=False)) def test_with_enum(self): parser = HfArgumentParser(MixedTypeEnumExample) expected = argparse.ArgumentParser() expected.add_argument( "--foo", default="toto", choices=["titi", "toto", 42], type=make_choice_type_function(["titi", "toto", 42]), ) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args.foo, "toto") enum_ex = parser.parse_args_into_dataclasses([])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.toto) args = parser.parse_args(["--foo", "titi"]) self.assertEqual(args.foo, "titi") enum_ex = parser.parse_args_into_dataclasses(["--foo", "titi"])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.titi) args = parser.parse_args(["--foo", "42"]) self.assertEqual(args.foo, 42) enum_ex = parser.parse_args_into_dataclasses(["--foo", "42"])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo) def test_with_literal(self): @dataclass class LiteralExample: foo: Literal["titi", "toto", 42] = "toto" parser = HfArgumentParser(LiteralExample) expected = argparse.ArgumentParser() expected.add_argument( "--foo", default="toto", choices=("titi", "toto", 42), type=make_choice_type_function(["titi", "toto", 42]), ) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args.foo, "toto") args = parser.parse_args(["--foo", "titi"]) self.assertEqual(args.foo, "titi") args = parser.parse_args(["--foo", "42"]) self.assertEqual(args.foo, 42) def test_with_list(self): parser = HfArgumentParser(ListExample) expected = argparse.ArgumentParser() expected.add_argument("--foo_int", "--foo-int", nargs="+", default=[], type=int) expected.add_argument("--bar_int", "--bar-int", nargs="+", default=[1, 2, 3], type=int) expected.add_argument("--foo_str", "--foo-str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=str) expected.add_argument("--foo_float", "--foo-float", nargs="+", default=[0.1, 0.2, 0.3], type=float) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual( args, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=["Hallo", "Bonjour", "Hello"], foo_float=[0.1, 0.2, 0.3]), ) args = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split()) self.assertEqual(args, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=["a", "b", "c"], foo_float=[0.1, 0.7])) args = parser.parse_args("--foo-int 1 --bar-int 2 3 --foo-str a b c --foo-float 0.1 0.7".split()) self.assertEqual(args, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=["a", "b", "c"], foo_float=[0.1, 0.7])) def test_with_optional(self): expected = argparse.ArgumentParser() expected.add_argument("--foo", default=None, type=int) expected.add_argument("--bar", default=None, type=float, help="help message") expected.add_argument("--baz", default=None, type=str) expected.add_argument("--ces", nargs="+", default=[], type=str) expected.add_argument("--des", nargs="+", default=[], type=int) dataclass_types = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(OptionalExamplePep604) for dataclass_type in dataclass_types: parser = HfArgumentParser(dataclass_type) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args, Namespace(foo=None, bar=None, baz=None, ces=[], des=[])) args = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split()) self.assertEqual(args, Namespace(foo=12, bar=3.14, baz="42", ces=["a", "b", "c"], des=[1, 2, 3])) def test_with_required(self): parser = HfArgumentParser(RequiredExample) expected = argparse.ArgumentParser() expected.add_argument("--required_list", "--required-list", nargs="+", type=int, required=True) expected.add_argument("--required_str", "--required-str", type=str, required=True) expected.add_argument( "--required_enum", "--required-enum", type=make_choice_type_function(["titi", "toto"]), choices=["titi", "toto"], required=True, ) self.argparsersEqual(parser, expected) def test_with_string_literal_annotation(self): parser = HfArgumentParser(StringLiteralAnnotationExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", type=int, required=True) expected.add_argument( "--required_enum", "--required-enum", type=make_choice_type_function(["titi", "toto"]), choices=["titi", "toto"], required=True, ) expected.add_argument("--opt", type=string_to_bool, default=None) expected.add_argument("--baz", default="toto", type=str, help="help message") expected.add_argument("--foo_str", "--foo-str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=str) self.argparsersEqual(parser, expected) def test_parse_dict(self): parser = HfArgumentParser(BasicExample) args_dict = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } parsed_args = parser.parse_dict(args_dict)[0] args = BasicExample(**args_dict) self.assertEqual(parsed_args, args) def test_parse_dict_extra_key(self): parser = HfArgumentParser(BasicExample) args_dict = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(ValueError, parser.parse_dict, args_dict, allow_extra_keys=False) def test_parse_json(self): parser = HfArgumentParser(BasicExample) args_dict_for_json = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: temp_local_path = os.path.join(tmp_dir, "temp_json") os.mkdir(temp_local_path) with open(temp_local_path + ".json", "w+") as f: json.dump(args_dict_for_json, f) parsed_args = parser.parse_json_file(Path(temp_local_path + ".json"))[0] args = BasicExample(**args_dict_for_json) self.assertEqual(parsed_args, args) def test_parse_yaml(self): parser = HfArgumentParser(BasicExample) args_dict_for_yaml = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: temp_local_path = os.path.join(tmp_dir, "temp_yaml") os.mkdir(temp_local_path) with open(temp_local_path + ".yaml", "w+") as f: yaml.dump(args_dict_for_yaml, f) parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".yaml"))[0] args = BasicExample(**args_dict_for_yaml) self.assertEqual(parsed_args, args) def test_integration_training_args(self): parser = HfArgumentParser(TrainingArguments) self.assertIsNotNone(parser) def test_valid_dict_annotation(self): """ Tests to make sure that `dict` based annotations are correctly made in the `TrainingArguments`. If this fails, a type annotation change is needed on a new input """ base_list = _VALID_DICT_FIELDS.copy() args = TrainingArguments # First find any annotations that contain `dict` fields = args.__dataclass_fields__ raw_dict_fields = [] optional_dict_fields = [] for field in fields.values(): # First verify raw dict if field.type in (dict, Dict): raw_dict_fields.append(field) # Next check for `Union` or `Optional` elif get_origin(field.type) == Union: if any(arg in (dict, Dict) for arg in get_args(field.type)): optional_dict_fields.append(field) # First check: anything in `raw_dict_fields` is very bad self.assertEqual( len(raw_dict_fields), 0, "Found invalid raw `dict` types in the `TrainingArgument` typings. " "This leads to issues with the CLI. Please turn this into `typing.Optional[dict,str]`", ) # Next check raw annotations for field in optional_dict_fields: args = get_args(field.type) # These should be returned as `dict`, `str`, ... # we only care about the first two self.assertIn(args[0], (Dict, dict)) self.assertEqual( str(args[1]), "<class 'str'>", f"Expected field `{field.name}` to have a type signature of at least `typing.Union[dict,str,...]` for CLI compatibility, " "but `str` not found. Please fix this.", ) # Second check: anything in `optional_dict_fields` is bad if it's not in `base_list` for field in optional_dict_fields: self.assertIn( field.name, base_list, f"Optional dict field `{field.name}` is not in the base list of valid fields. Please add it to `training_args._VALID_DICT_FIELDS`", ) @require_torch def test_valid_dict_input_parsing(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, accelerator_config='{"split_batches": "True", "gradient_accumulation_kwargs": {"num_steps": 2}}', ) self.assertEqual(args.accelerator_config.split_batches, True) self.assertEqual(args.accelerator_config.gradient_accumulation_kwargs["num_steps"], 2)
transformers/tests/utils/test_hf_argparser.py/0
{ "file_path": "transformers/tests/utils/test_hf_argparser.py", "repo_id": "transformers", "token_count": 7886 }
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPT2TokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, TemporaryHubRepo, is_staging_test, require_tokenizers from transformers.tokenization_utils import ExtensionsTrie, Trie sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class TokenizerUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def test_cached_files_are_used_when_internet_is_down_missing_files(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = GPT2TokenizerFast.from_pretrained("openai-community/gpt2") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = GPT2TokenizerFast.from_pretrained("openai-community/gpt2") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_one_file(self): # This test is for deprecated behavior and can be removed in v5 try: tmp_file = tempfile.NamedTemporaryFile(delete=False).name with open(tmp_file, "wb") as f: http_get("https://huggingface.co/albert/albert-base-v1/resolve/main/spiece.model", f) _ = AlbertTokenizer.from_pretrained(tmp_file) finally: os.remove(tmp_file) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json"): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. self.skipTest(reason="Skipping test as there is a `tokenizer.json` file in the current folder.") try: with open("tokenizer.json", "wb") as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", f) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") # The tiny random BERT has a vocab size of 1024, tiny openai-community/gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1000) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json") @is_staging_test class TokenizerPushToHubTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub(tmp_repo.repo_id, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(tmp_repo.repo_id) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) # Push to hub via save_pretrained tokenizer.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(tmp_repo.repo_id) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub(tmp_repo.repo_id, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(tmp_repo.repo_id) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) # Push to hub via save_pretrained tokenizer.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(tmp_repo.repo_id) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @require_tokenizers def test_push_to_hub_dynamic_tokenizer(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = CustomTokenizer(vocab_file) # No fast custom tokenizer tokenizer.push_to_hub(tmp_repo.repo_id, token=self._token) tokenizer = AutoTokenizer.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") @require_tokenizers def test_push_to_hub_dynamic_tokenizer_with_both_slow_and_fast_classes(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomTokenizer.register_for_auto_class() # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) bert_tokenizer = BertTokenizerFast.from_pretrained(tmp_dir) bert_tokenizer.save_pretrained(tmp_dir) tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir) tokenizer.push_to_hub(tmp_repo.repo_id, token=self._token) tokenizer = AutoTokenizer.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast") tokenizer = AutoTokenizer.from_pretrained(tmp_repo.repo_id, use_fast=False, trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") class TrieTest(unittest.TestCase): def test_trie(self): trie = Trie() trie.add("Hello 友達") self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}) trie.add("Hello") self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}) def test_trie_split(self): trie = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"]) trie.add("[CLS]") trie.add("extra_id_1") trie.add("extra_id_100") self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"]) def test_trie_single(self): trie = Trie() trie.add("A") self.assertEqual(trie.split("ABC"), ["A", "BC"]) self.assertEqual(trie.split("BCA"), ["BC", "A"]) def test_trie_final(self): trie = Trie() trie.add("TOKEN]") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_subtokens(self): trie = Trie() trie.add("A") trie.add("P") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_suffix_tokens(self): trie = Trie() trie.add("AB") trie.add("B") trie.add("C") self.assertEqual(trie.split("ABC"), ["AB", "C"]) def test_trie_skip(self): trie = Trie() trie.add("ABC") trie.add("B") trie.add("CD") self.assertEqual(trie.split("ABCD"), ["ABC", "D"]) def test_cut_text_hardening(self): # Even if the offsets are wrong, we necessarily output correct string # parts. trie = Trie() parts = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3]) self.assertEqual(parts, ["AB", "C"]) class ExtensionsTrieTest(unittest.TestCase): def test_extensions(self): # Test searching by prefix trie = ExtensionsTrie() trie.add("foo") trie.add("food") trie.add("foodie") trie.add("helium") self.assertEqual(trie.extensions("foo"), ["foo", "food", "foodie"]) self.assertEqual(trie.extensions("helium"), ["helium"]) def test_empty_prefix(self): trie = ExtensionsTrie() # Test searching with an empty prefix returns all values trie.add("hello") trie.add("bye") self.assertEqual(trie.extensions(""), ["hello", "bye"]) def test_no_extension_match(self): trie = ExtensionsTrie() # Test searching for a prefix that doesn't match any key values = trie.extensions("unknown") self.assertEqual(len(values), 0) def test_update_value(self): trie = ExtensionsTrie() # Test updating the value of an existing key trie.add("hi") trie.add("hi") self.assertEqual(trie.extensions("hi"), ["hi"])
transformers/tests/utils/test_tokenization_utils.py/0
{ "file_path": "transformers/tests/utils/test_tokenization_utils.py", "repo_id": "transformers", "token_count": 5858 }
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that performs several consistency checks on the repo. This includes: - checking all models are properly defined in the __init__ of models/ - checking all models are in the main __init__ - checking all models are properly tested - checking all object in the main __init__ are documented - checking all models are in at least one auto class - checking all the auto mapping are properly defined (no typos, importable) - checking the list of deprecated models is up to date Use from the root of the repo with (as used in `make repo-consistency`): ```bash python utils/check_repo.py ``` It has no auto-fix mode. """ import os import re import sys import types import warnings from collections import OrderedDict from difflib import get_close_matches from pathlib import Path from typing import List, Tuple from transformers import is_flax_available, is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES from transformers.models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING_NAMES from transformers.models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING_NAMES from transformers.models.auto.processing_auto import PROCESSOR_MAPPING_NAMES from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES from transformers.utils import ENV_VARS_TRUE_VALUES, direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_repo.py PATH_TO_TRANSFORMERS = "src/transformers" PATH_TO_TESTS = "tests" PATH_TO_DOC = "docs/source/en" # Update this list with models that are supposed to be private. PRIVATE_MODELS = [ "AltRobertaModel", "DPRSpanPredictor", "UdopStack", "LongT5Stack", "RealmBertModel", "T5Stack", "MT5Stack", "UMT5Stack", "Pop2PianoStack", "Qwen2AudioEncoder", "Qwen2VisionTransformerPretrainedModel", "Qwen2_5_VisionTransformerPretrainedModel", "SwitchTransformersStack", "TFDPRSpanPredictor", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", "BridgeTowerTextModel", "BridgeTowerVisionModel", "Kosmos2TextModel", "Kosmos2TextForCausalLM", "Kosmos2VisionModel", "SeamlessM4Tv2TextToUnitModel", "SeamlessM4Tv2CodeHifiGan", "SeamlessM4Tv2TextToUnitForConditionalGeneration", "Idefics2PerceiverResampler", "Idefics2VisionTransformer", "Idefics3VisionTransformer", "AriaTextForCausalLM", "AriaTextModel", ] # Update this list for models that are not tested with a comment explaining the reason it should not be. # Being in this list is an exception and should **not** be the rule. IGNORE_NON_TESTED = ( PRIVATE_MODELS.copy() + [ # models to ignore for not tested "RecurrentGemmaModel", # Building part of bigger (tested) model. "FuyuForCausalLM", # Not tested fort now "InstructBlipQFormerModel", # Building part of bigger (tested) model. "InstructBlipVideoQFormerModel", # Building part of bigger (tested) model. "UMT5EncoderModel", # Building part of bigger (tested) model. "Blip2QFormerModel", # Building part of bigger (tested) model. "ErnieMForInformationExtraction", "FastSpeech2ConformerHifiGan", # Already tested by SpeechT5HifiGan (# Copied from) "FastSpeech2ConformerWithHifiGan", # Built with two smaller (tested) models. "GraphormerDecoderHead", # Building part of bigger (tested) model. "JukeboxVQVAE", # Building part of bigger (tested) model. "JukeboxPrior", # Building part of bigger (tested) model. "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. "SegformerDecodeHead", # Building part of bigger (tested) model. "MgpstrModel", # Building part of bigger (tested) model. "BertLMHeadModel", # Needs to be setup as decoder. "MegatronBertLMHeadModel", # Building part of bigger (tested) model. "RealmBertModel", # Building part of bigger (tested) model. "RealmReader", # Not regular model. "RealmScorer", # Not regular model. "RealmForOpenQA", # Not regular model. "ReformerForMaskedLM", # Needs to be setup as decoder. "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?) "TFRobertaForMultipleChoice", # TODO: fix "TFRobertaPreLayerNormForMultipleChoice", # TODO: fix "SeparableConv1D", # Building part of bigger (tested) model. "FlaxBartForCausalLM", # Building part of bigger (tested) model. "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. "OPTDecoderWrapper", "TFSegformerDecodeHead", # Not a regular model. "AltRobertaModel", # Building part of bigger (tested) model. "BlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models "TFBlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models "BridgeTowerTextModel", # No need to test it as it is tested by BridgeTowerModel model. "BridgeTowerVisionModel", # No need to test it as it is tested by BridgeTowerModel model. "BarkCausalModel", # Building part of bigger (tested) model. "BarkModel", # Does not have a forward signature - generation tested with integration tests. "SeamlessM4TTextToUnitModel", # Building part of bigger (tested) model. "SeamlessM4TCodeHifiGan", # Building part of bigger (tested) model. "SeamlessM4TTextToUnitForConditionalGeneration", # Building part of bigger (tested) model. "ChameleonVQVAE", # VQVAE here is used only for encoding (discretizing) and is tested as part of bigger model "Qwen2VLModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2VLForConditionalGeneration. "Qwen2_5_VLModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2_5_VLForConditionalGeneration. "MllamaTextModel", # Building part of bigger (tested) model. # TODO: add tests "MllamaVisionModel", # Building part of bigger (tested) model. # TODO: add tests "Emu3VQVAE", # Building part of bigger (tested) model "Emu3TextModel", # Building part of bigger (tested) model ] ) # Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't # trigger the common tests. TEST_FILES_WITH_NO_COMMON_TESTS = [ "models/decision_transformer/test_modeling_decision_transformer.py", "models/camembert/test_modeling_camembert.py", "models/mt5/test_modeling_flax_mt5.py", "models/mbart/test_modeling_mbart.py", "models/mt5/test_modeling_mt5.py", "models/pegasus/test_modeling_pegasus.py", "models/camembert/test_modeling_tf_camembert.py", "models/mt5/test_modeling_tf_mt5.py", "models/xlm_roberta/test_modeling_tf_xlm_roberta.py", "models/xlm_roberta/test_modeling_flax_xlm_roberta.py", "models/xlm_prophetnet/test_modeling_xlm_prophetnet.py", "models/xlm_roberta/test_modeling_xlm_roberta.py", "models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py", "models/vision_text_dual_encoder/test_modeling_tf_vision_text_dual_encoder.py", "models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py", "models/decision_transformer/test_modeling_decision_transformer.py", "models/bark/test_modeling_bark.py", ] # Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ # models to ignore for model xxx mapping "AlignTextModel", "AlignVisionModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", "Blip2TextModelWithProjection", "Blip2VisionModelWithProjection", "Blip2QFormerModel", "Blip2VisionModel", "ErnieMForInformationExtraction", "FastSpeech2ConformerHifiGan", "FastSpeech2ConformerWithHifiGan", "GitVisionModel", "GraphormerModel", "GraphormerForGraphClassification", "BlipForImageTextRetrieval", "BlipForQuestionAnswering", "BlipVisionModel", "BlipTextLMHeadModel", "BlipTextModel", "BrosSpadeEEForTokenClassification", "BrosSpadeELForTokenClassification", "TFBlipForConditionalGeneration", "TFBlipForImageTextRetrieval", "TFBlipForQuestionAnswering", "TFBlipVisionModel", "TFBlipTextLMHeadModel", "TFBlipTextModel", "Swin2SRForImageSuperResolution", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerForContrastiveLearning", "CLIPSegForImageSegmentation", "CLIPSegVisionModel", "CLIPSegTextModel", "EsmForProteinFolding", "GPTSanJapaneseModel", "TimeSeriesTransformerForPrediction", "InformerForPrediction", "AutoformerForPrediction", "PatchTSTForPretraining", "PatchTSTForPrediction", "JukeboxVQVAE", "JukeboxPrior", "SamModel", "DPTForDepthEstimation", "DecisionTransformerGPT2Model", "GLPNForDepthEstimation", "ViltForImagesAndTextClassification", "ViltForImageAndTextRetrieval", "ViltForTokenClassification", "ViltForMaskedLM", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "SegformerDecodeHead", "TFSegformerDecodeHead", "FlaxBeitForMaskedImageModeling", "BeitForMaskedImageModeling", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", "CLIPTextModelWithProjection", "CLIPVisionModelWithProjection", "ClvpForCausalLM", "ClvpModel", "GroupViTTextModel", "GroupViTVisionModel", "TFCLIPTextModel", "TFCLIPVisionModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", "FlaxCLIPTextModel", "FlaxCLIPTextModelWithProjection", "FlaxCLIPVisionModel", "FlaxWav2Vec2ForCTC", "DetrForSegmentation", "Pix2StructVisionModel", "Pix2StructTextModel", "ConditionalDetrForSegmentation", "DPRReader", "FlaubertForQuestionAnswering", "FlavaImageCodebook", "FlavaTextModel", "FlavaImageModel", "FlavaMultimodalModel", "GPT2DoubleHeadsModel", "GPTSw3DoubleHeadsModel", "InstructBlipVisionModel", "InstructBlipQFormerModel", "InstructBlipVideoVisionModel", "InstructBlipVideoQFormerModel", "LayoutLMForQuestionAnswering", "LukeForMaskedLM", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "MgpstrModel", "OpenAIGPTDoubleHeadsModel", "OwlViTTextModel", "OwlViTVisionModel", "Owlv2TextModel", "Owlv2VisionModel", "OwlViTForObjectDetection", "PatchTSMixerForPrediction", "PatchTSMixerForPretraining", "RagModel", "RagSequenceForGeneration", "RagTokenForGeneration", "RealmEmbedder", "RealmForOpenQA", "RealmScorer", "RealmReader", "TFDPRReader", "TFGPT2DoubleHeadsModel", "TFLayoutLMForQuestionAnswering", "TFOpenAIGPTDoubleHeadsModel", "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", "Wav2Vec2ForCTC", "HubertForCTC", "SEWForCTC", "SEWDForCTC", "XLMForQuestionAnswering", "XLNetForQuestionAnswering", "SeparableConv1D", "VisualBertForRegionToPhraseAlignment", "VisualBertForVisualReasoning", "VisualBertForQuestionAnswering", "VisualBertForMultipleChoice", "TFWav2Vec2ForCTC", "TFHubertForCTC", "XCLIPVisionModel", "XCLIPTextModel", "AltCLIPTextModel", "AltCLIPVisionModel", "AltRobertaModel", "TvltForAudioVisualClassification", "BarkCausalModel", "BarkCoarseModel", "BarkFineModel", "BarkSemanticModel", "MusicgenMelodyModel", "MusicgenModel", "MusicgenForConditionalGeneration", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5HifiGan", "VitMatteForImageMatting", "SeamlessM4TTextToUnitModel", "SeamlessM4TTextToUnitForConditionalGeneration", "SeamlessM4TCodeHifiGan", "SeamlessM4TForSpeechToSpeech", # no auto class for speech-to-speech "TvpForVideoGrounding", "SeamlessM4Tv2NARTextToUnitModel", "SeamlessM4Tv2NARTextToUnitForConditionalGeneration", "SeamlessM4Tv2CodeHifiGan", "SeamlessM4Tv2ForSpeechToSpeech", # no auto class for speech-to-speech "SegGptForImageSegmentation", "SiglipVisionModel", "SiglipTextModel", "ChameleonVQVAE", # no autoclass for VQ-VAE models "VitPoseForPoseEstimation", "CLIPTextModel", "MoshiForConditionalGeneration", # no auto class for speech-to-speech "Emu3VQVAE", # no autoclass for VQ-VAE models "Emu3TextModel", # Building part of bigger (tested) model ] # DO NOT edit this list! # (The corresponding pytorch objects should never have been in the main `__init__`, but it's too late to remove) OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK = [ "FlaxBertLayer", "FlaxBigBirdLayer", "FlaxRoFormerLayer", "TFBertLayer", "TFLxmertEncoder", "TFLxmertXLayer", "TFMPNetLayer", "TFMobileBertLayer", "TFSegformerLayer", "TFViTMAELayer", ] # Update this list for models that have multiple model types for the same model doc. MODEL_TYPE_TO_DOC_MAPPING = OrderedDict( [ ("data2vec-text", "data2vec"), ("data2vec-audio", "data2vec"), ("data2vec-vision", "data2vec"), ("donut-swin", "donut"), ] ) # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) def check_missing_backends(): """ Checks if all backends are installed (otherwise the check of this script is incomplete). Will error in the CI if that's not the case but only throw a warning for users running this. """ missing_backends = [] if not is_torch_available(): missing_backends.append("PyTorch") if not is_tf_available(): missing_backends.append("TensorFlow") if not is_flax_available(): missing_backends.append("Flax") if len(missing_backends) > 0: missing = ", ".join(missing_backends) if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: raise Exception( "Full repo consistency checks require all backends to be installed (with `pip install -e '.[dev]'` in the " f"Transformers repo, the following are missing: {missing}." ) else: warnings.warn( "Full repo consistency checks require all backends to be installed (with `pip install -e '.[dev]'` in the " f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you " "didn't make any change in one of those backends modeling files, you should probably execute the " "command above to be on the safe side." ) def check_model_list(): """ Checks the model listed as subfolders of `models` match the models available in `transformers.models`. """ # Get the models from the directory structure of `src/transformers/models/` models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models") _models = [] for model in os.listdir(models_dir): if model == "deprecated": continue model_dir = os.path.join(models_dir, model) if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir): _models.append(model) # Get the models in the submodule `transformers.models` models = [model for model in dir(transformers.models) if not model.startswith("__")] missing_models = sorted(set(_models).difference(models)) if missing_models: raise Exception( f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}." ) # If some modeling modules should be ignored for all checks, they should be added in the nested list # _ignore_modules of this function. def get_model_modules() -> List[str]: """Get all the model modules inside the transformers library (except deprecated models).""" _ignore_modules = [ "modeling_auto", "modeling_encoder_decoder", "modeling_marian", "modeling_retribert", "modeling_flax_auto", "modeling_flax_encoder_decoder", "modeling_speech_encoder_decoder", "modeling_flax_speech_encoder_decoder", "modeling_flax_vision_encoder_decoder", "modeling_timm_backbone", "modeling_tf_auto", "modeling_tf_encoder_decoder", "modeling_tf_vision_encoder_decoder", "modeling_vision_encoder_decoder", ] modules = [] for model in dir(transformers.models): # There are some magic dunder attributes in the dir, we ignore them if model == "deprecated" or model.startswith("__"): continue model_module = getattr(transformers.models, model) for submodule in dir(model_module): if submodule.startswith("modeling") and submodule not in _ignore_modules: modeling_module = getattr(model_module, submodule) modules.append(modeling_module) return modules def get_models(module: types.ModuleType, include_pretrained: bool = False) -> List[Tuple[str, type]]: """ Get the objects in a module that are models. Args: module (`types.ModuleType`): The module from which we are extracting models. include_pretrained (`bool`, *optional*, defaults to `False`): Whether or not to include the `PreTrainedModel` subclass (like `BertPreTrainedModel`) or not. Returns: List[Tuple[str, type]]: List of models as tuples (class name, actual class). """ models = [] model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel) for attr_name in dir(module): if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name): continue attr = getattr(module, attr_name) if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__: models.append((attr_name, attr)) return models def is_building_block(model: str) -> bool: """ Returns `True` if a model is a building block part of a bigger model. """ if model.endswith("Wrapper"): return True if model.endswith("Encoder"): return True if model.endswith("Decoder"): return True if model.endswith("Prenet"): return True def is_a_private_model(model: str) -> bool: """Returns `True` if the model should not be in the main init.""" if model in PRIVATE_MODELS: return True return is_building_block(model) def check_models_are_in_init(): """Checks all models defined in the library are in the main init.""" models_not_in_init = [] dir_transformers = dir(transformers) for module in get_model_modules(): models_not_in_init += [ model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers ] # Remove private models models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)] if len(models_not_in_init) > 0: raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.") # If some test_modeling files should be ignored when checking models are all tested, they should be added in the # nested list _ignore_files of this function. def get_model_test_files() -> List[str]: """ Get the model test files. Returns: `List[str]`: The list of test files. The returned files will NOT contain the `tests` (i.e. `PATH_TO_TESTS` defined in this script). They will be considered as paths relative to `tests`. A caller has to use `os.path.join(PATH_TO_TESTS, ...)` to access the files. """ _ignore_files = [ "test_modeling_common", "test_modeling_encoder_decoder", "test_modeling_flax_encoder_decoder", "test_modeling_flax_speech_encoder_decoder", "test_modeling_marian", "test_modeling_tf_common", "test_modeling_tf_encoder_decoder", ] test_files = [] model_test_root = os.path.join(PATH_TO_TESTS, "models") model_test_dirs = [] for x in os.listdir(model_test_root): x = os.path.join(model_test_root, x) if os.path.isdir(x): model_test_dirs.append(x) for target_dir in [PATH_TO_TESTS] + model_test_dirs: for file_or_dir in os.listdir(target_dir): path = os.path.join(target_dir, file_or_dir) if os.path.isfile(path): filename = os.path.split(path)[-1] if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files: file = os.path.join(*path.split(os.sep)[1:]) test_files.append(file) return test_files # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class # for the all_model_classes variable. def find_tested_models(test_file: str) -> List[str]: """ Parse the content of test_file to detect what's in `all_model_classes`. This detects the models that inherit from the common test class. Args: test_file (`str`): The path to the test file to check Returns: `List[str]`: The list of models tested in that file. """ with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f: content = f.read() all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content) # Check with one less parenthesis as well all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content) if len(all_models) > 0: model_tested = [] for entry in all_models: for line in entry.split(","): name = line.strip() if len(name) > 0: model_tested.append(name) return model_tested def should_be_tested(model_name: str) -> bool: """ Whether or not a model should be tested. """ if model_name in IGNORE_NON_TESTED: return False return not is_building_block(model_name) def check_models_are_tested(module: types.ModuleType, test_file: str) -> List[str]: """Check models defined in a module are all tested in a given file. Args: module (`types.ModuleType`): The module in which we get the models. test_file (`str`): The path to the file where the module is tested. Returns: `List[str]`: The list of error messages corresponding to models not tested. """ # XxxPreTrainedModel are not tested defined_models = get_models(module) tested_models = find_tested_models(test_file) if tested_models is None: if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS: return return [ f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. " + "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file " + "`utils/check_repo.py`." ] failures = [] for model_name, _ in defined_models: if model_name not in tested_models and should_be_tested(model_name): failures.append( f"{model_name} is defined in {module.__name__} but is not tested in " + f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file." + "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`" + "in the file `utils/check_repo.py`." ) return failures def check_all_models_are_tested(): """Check all models are properly tested.""" modules = get_model_modules() test_files = get_model_test_files() failures = [] for module in modules: # Matches a module to its test file. test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file] if len(test_file) == 0: failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.") elif len(test_file) > 1: failures.append(f"{module.__name__} has several test files: {test_file}.") else: test_file = test_file[0] new_failures = check_models_are_tested(module, test_file) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def get_all_auto_configured_models() -> List[str]: """Return the list of all models in at least one auto class.""" result = set() # To avoid duplicates we concatenate all model classes in a set. if is_torch_available(): for attr_name in dir(transformers.models.auto.modeling_auto): if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name))) if is_tf_available(): for attr_name in dir(transformers.models.auto.modeling_tf_auto): if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name))) if is_flax_available(): for attr_name in dir(transformers.models.auto.modeling_flax_auto): if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name))) return list(result) def ignore_unautoclassed(model_name: str) -> bool: """Rules to determine if a model should be in an auto class.""" # Special white list if model_name in IGNORE_NON_AUTO_CONFIGURED: return True # Encoder and Decoder should be ignored if "Encoder" in model_name or "Decoder" in model_name: return True return False def check_models_are_auto_configured(module: types.ModuleType, all_auto_models: List[str]) -> List[str]: """ Check models defined in module are each in an auto class. Args: module (`types.ModuleType`): The module in which we get the models. all_auto_models (`List[str]`): The list of all models in an auto class (as obtained with `get_all_auto_configured_models()`). Returns: `List[str]`: The list of error messages corresponding to models not tested. """ defined_models = get_models(module) failures = [] for model_name, _ in defined_models: if model_name not in all_auto_models and not ignore_unautoclassed(model_name): failures.append( f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. " "If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file " "`utils/check_repo.py`." ) return failures def check_all_models_are_auto_configured(): """Check all models are each in an auto class.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() modules = get_model_modules() all_auto_models = get_all_auto_configured_models() failures = [] for module in modules: new_failures = check_models_are_auto_configured(module, all_auto_models) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_object_names_being_defined(): """Check all names defined in auto (name) mappings exist in the library.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] mappings_to_check = { "TOKENIZER_MAPPING_NAMES": TOKENIZER_MAPPING_NAMES, "IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES, "FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES, "PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES, } # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name, mapping in mappings_to_check.items(): for _, class_names in mapping.items(): if not isinstance(class_names, tuple): class_names = (class_names,) for class_name in class_names: if class_name is None: continue # dummy object is accepted if not hasattr(transformers, class_name): # If the class name is in a model name mapping, let's not check if there is a definition in any modeling # module, if it's a private model defined in this file. if name.endswith("MODEL_MAPPING_NAMES") and is_a_private_model(class_name): continue if name.endswith("MODEL_FOR_IMAGE_MAPPING_NAMES") and is_a_private_model(class_name): continue failures.append( f"`{class_name}` appears in the mapping `{name}` but it is not defined in the library." ) if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_mapping_names_in_config_mapping_names(): """Check all keys defined in auto mappings (mappings of names) appear in `CONFIG_MAPPING_NAMES`.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] # `TOKENIZER_PROCESSOR_MAPPING_NAMES` and `AutoTokenizer` is special, and don't need to follow the rule. mappings_to_check = { "IMAGE_PROCESSOR_MAPPING_NAMES": IMAGE_PROCESSOR_MAPPING_NAMES, "FEATURE_EXTRACTOR_MAPPING_NAMES": FEATURE_EXTRACTOR_MAPPING_NAMES, "PROCESSOR_MAPPING_NAMES": PROCESSOR_MAPPING_NAMES, } # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name, mapping in mappings_to_check.items(): for model_type in mapping: if model_type not in CONFIG_MAPPING_NAMES: failures.append( f"`{model_type}` appears in the mapping `{name}` but it is not defined in the keys of " "`CONFIG_MAPPING_NAMES`." ) if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_auto_mappings_importable(): """Check all auto mappings can be imported.""" # This is where we need to check we have all backends or the check is incomplete. check_missing_backends() failures = [] mappings_to_check = {} # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [x for x in dir(module) if x.endswith("_MAPPING_NAMES")] mappings_to_check.update({name: getattr(module, name) for name in mapping_names}) for name in mappings_to_check: name = name.replace("_MAPPING_NAMES", "_MAPPING") if not hasattr(transformers, name): failures.append(f"`{name}`") if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_objects_being_equally_in_main_init(): """ Check if a (TensorFlow or Flax) object is in the main __init__ iif its counterpart in PyTorch is. """ attrs = dir(transformers) failures = [] for attr in attrs: obj = getattr(transformers, attr) if not hasattr(obj, "__module__") or "models.deprecated" in obj.__module__: continue module_path = obj.__module__ module_name = module_path.split(".")[-1] module_dir = ".".join(module_path.split(".")[:-1]) if ( module_name.startswith("modeling_") and not module_name.startswith("modeling_tf_") and not module_name.startswith("modeling_flax_") ): parent_module = sys.modules[module_dir] frameworks = [] if is_tf_available(): frameworks.append("TF") if is_flax_available(): frameworks.append("Flax") for framework in frameworks: other_module_path = module_path.replace("modeling_", f"modeling_{framework.lower()}_") if os.path.isfile("src/" + other_module_path.replace(".", "/") + ".py"): other_module_name = module_name.replace("modeling_", f"modeling_{framework.lower()}_") other_module = getattr(parent_module, other_module_name) if hasattr(other_module, f"{framework}{attr}"): if not hasattr(transformers, f"{framework}{attr}"): if f"{framework}{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK: failures.append(f"{framework}{attr}") if hasattr(other_module, f"{framework}_{attr}"): if not hasattr(transformers, f"{framework}_{attr}"): if f"{framework}_{attr}" not in OBJECT_TO_SKIP_IN_MAIN_INIT_CHECK: failures.append(f"{framework}_{attr}") if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) _re_decorator = re.compile(r"^\s*@(\S+)\s+$") def check_decorator_order(filename: str) -> List[int]: """ Check that in a given test file, the slow decorator is always last. Args: filename (`str`): The path to a test file to check. Returns: `List[int]`: The list of failures as a list of indices where there are problems. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() decorator_before = None errors = [] for i, line in enumerate(lines): search = _re_decorator.search(line) if search is not None: decorator_name = search.groups()[0] if decorator_before is not None and decorator_name.startswith("parameterized"): errors.append(i) decorator_before = decorator_name elif decorator_before is not None: decorator_before = None return errors def check_all_decorator_order(): """Check that in all test files, the slow decorator is always last.""" errors = [] for fname in os.listdir(PATH_TO_TESTS): if fname.endswith(".py"): filename = os.path.join(PATH_TO_TESTS, fname) new_errors = check_decorator_order(filename) errors += [f"- {filename}, line {i}" for i in new_errors] if len(errors) > 0: msg = "\n".join(errors) raise ValueError( "The parameterized decorator (and its variants) should always be first, but this is not the case in the" f" following files:\n{msg}" ) def find_all_documented_objects() -> List[str]: """ Parse the content of all doc files to detect which classes and functions it documents. Returns: `List[str]`: The list of all object names being documented. `Dict[str, List[str]]`: A dictionary mapping the object name (full import path, e.g. `integrations.PeftAdapterMixin`) to its documented methods """ documented_obj = [] documented_methods_map = {} for doc_file in Path(PATH_TO_DOC).glob("**/*.md"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] for obj in raw_doc_objs: obj_public_methods = re.findall(rf"\[\[autodoc\]\] {obj}((\n\s+-.*)+)", content) # Some objects have no methods documented if len(obj_public_methods) == 0: continue else: documented_methods_map[obj] = re.findall(r"(?<=-\s).*", obj_public_methods[0][0]) return documented_obj, documented_methods_map # One good reason for not being documented is to be deprecated. Put in this list deprecated objects. DEPRECATED_OBJECTS = [ "AutoModelWithLMHead", "BartPretrainedModel", "DataCollator", "DataCollatorForSOP", "GlueDataset", "GlueDataTrainingArguments", "LineByLineTextDataset", "LineByLineWithRefDataset", "LineByLineWithSOPTextDataset", "NerPipeline", "PretrainedBartModel", "PretrainedFSMTModel", "SingleSentenceClassificationProcessor", "SquadDataTrainingArguments", "SquadDataset", "SquadExample", "SquadFeatures", "SquadV1Processor", "SquadV2Processor", "TFAutoModelWithLMHead", "TFBartPretrainedModel", "TextDataset", "TextDatasetForNextSentencePrediction", "Wav2Vec2ForMaskedLM", "Wav2Vec2Tokenizer", "glue_compute_metrics", "glue_convert_examples_to_features", "glue_output_modes", "glue_processors", "glue_tasks_num_labels", "squad_convert_examples_to_features", "xnli_compute_metrics", "xnli_output_modes", "xnli_processors", "xnli_tasks_num_labels", "TFTrainingArguments", "OwlViTFeatureExtractor", ] # Exceptionally, some objects should not be documented after all rules passed. # ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT! UNDOCUMENTED_OBJECTS = [ "AddedToken", # This is a tokenizers class. "BasicTokenizer", # Internal, should never have been in the main init. "CharacterTokenizer", # Internal, should never have been in the main init. "DPRPretrainedReader", # Like an Encoder. "DummyObject", # Just picked by mistake sometimes. "MecabTokenizer", # Internal, should never have been in the main init. "ModelCard", # Internal type. "SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer) "TFDPRPretrainedReader", # Like an Encoder. "TransfoXLCorpus", # Internal type. "WordpieceTokenizer", # Internal, should never have been in the main init. "absl", # External module "add_end_docstrings", # Internal, should never have been in the main init. "add_start_docstrings", # Internal, should never have been in the main init. "convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights "logger", # Internal logger "logging", # External module "requires_backends", # Internal function "AltRobertaModel", # Internal module "VitPoseBackbone", # Internal module "VitPoseBackboneConfig", # Internal module ] # This list should be empty. Objects in it should get their own doc page. SHOULD_HAVE_THEIR_OWN_PAGE = [ "AutoBackbone", "BeitBackbone", "BitBackbone", "ConvNextBackbone", "ConvNextV2Backbone", "DinatBackbone", "Dinov2Backbone", "Dinov2WithRegistersBackbone", "FocalNetBackbone", "HieraBackbone", "MaskFormerSwinBackbone", "MaskFormerSwinConfig", "MaskFormerSwinModel", "NatBackbone", "PvtV2Backbone", "ResNetBackbone", "SwinBackbone", "Swinv2Backbone", "TextNetBackbone", "TimmBackbone", "TimmBackboneConfig", "VitDetBackbone", ] def ignore_undocumented(name: str) -> bool: """Rules to determine if `name` should be undocumented (returns `True` if it should not be documented).""" # NOT DOCUMENTED ON PURPOSE. # Constants uppercase are not documented. if name.isupper(): return True # PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented. if ( name.endswith("PreTrainedModel") or name.endswith("Decoder") or name.endswith("Encoder") or name.endswith("Layer") or name.endswith("Embeddings") or name.endswith("Attention") ): return True # Submodules are not documented. if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile( os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py") ): return True # All load functions are not documented. if name.startswith("load_tf") or name.startswith("load_pytorch"): return True # is_xxx_available functions are not documented. if name.startswith("is_") and name.endswith("_available"): return True # Deprecated objects are not documented. if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS: return True # MMBT model does not really work. if name.startswith("MMBT"): return True if name in SHOULD_HAVE_THEIR_OWN_PAGE: return True return False def check_all_objects_are_documented(): """Check all models are properly documented.""" documented_objs, documented_methods_map = find_all_documented_objects() modules = transformers._modules objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")] undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] if len(undocumented_objs) > 0: raise Exception( "The following objects are in the public init so should be documented:\n - " + "\n - ".join(undocumented_objs) ) check_model_type_doc_match() check_public_method_exists(documented_methods_map) def check_public_method_exists(documented_methods_map): """Check that all explicitly documented public methods are defined in the corresponding class.""" failures = [] for obj, methods in documented_methods_map.items(): # Let's ensure there is no repetition if len(set(methods)) != len(methods): failures.append(f"Error in the documentation of {obj}: there are repeated documented methods.") # Navigates into the object, given the full import path nested_path = obj.split(".") submodule = transformers if len(nested_path) > 1: nested_submodules = nested_path[:-1] for submodule_name in nested_submodules: if submodule_name == "transformers": continue try: submodule = getattr(submodule, submodule_name) except AttributeError: failures.append(f"Could not parse {submodule_name}. Are the required dependencies installed?") continue class_name = nested_path[-1] try: obj_class = getattr(submodule, class_name) except AttributeError: failures.append(f"Could not parse {class_name}. Are the required dependencies installed?") continue # Checks that all explicitly documented methods are defined in the class for method in methods: if method == "all": # Special keyword to document all public methods continue try: if not hasattr(obj_class, method): failures.append( "The following public method is explicitly documented but not defined in the corresponding " f"class. class: {obj}, method: {method}. If the method is defined, this error can be due to " f"lacking dependencies." ) except ImportError: pass if len(failures) > 0: raise Exception("\n".join(failures)) def check_model_type_doc_match(): """Check all doc pages have a corresponding model type.""" model_doc_folder = Path(PATH_TO_DOC) / "model_doc" model_docs = [m.stem for m in model_doc_folder.glob("*.md")] model_types = list(transformers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys()) model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types] errors = [] for m in model_docs: if m not in model_types and m != "auto": close_matches = get_close_matches(m, model_types) error_message = f"{m} is not a proper model identifier." if len(close_matches) > 0: close_matches = "/".join(close_matches) error_message += f" Did you mean {close_matches}?" errors.append(error_message) if len(errors) > 0: raise ValueError( "Some model doc pages do not match any existing model type:\n" + "\n".join(errors) + "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in " "models/auto/configuration_auto.py." ) def check_deprecated_constant_is_up_to_date(): """ Check if the constant `DEPRECATED_MODELS` in `models/auto/configuration_auto.py` is up to date. """ deprecated_folder = os.path.join(PATH_TO_TRANSFORMERS, "models", "deprecated") deprecated_models = [m for m in os.listdir(deprecated_folder) if not m.startswith("_")] constant_to_check = transformers.models.auto.configuration_auto.DEPRECATED_MODELS message = [] missing_models = sorted(set(deprecated_models) - set(constant_to_check)) if len(missing_models) != 0: missing_models = ", ".join(missing_models) message.append( "The following models are in the deprecated folder, make sure to add them to `DEPRECATED_MODELS` in " f"`models/auto/configuration_auto.py`: {missing_models}." ) extra_models = sorted(set(constant_to_check) - set(deprecated_models)) if len(extra_models) != 0: extra_models = ", ".join(extra_models) message.append( "The following models are in the `DEPRECATED_MODELS` constant but not in the deprecated folder. Either " f"remove them from the constant or move to the deprecated folder: {extra_models}." ) if len(message) > 0: raise Exception("\n".join(message)) def check_repo_quality(): """Check all models are tested and documented.""" print("Repository-wide checks:") print(" - checking all models are included.") check_model_list() print(" - checking all models are public.") check_models_are_in_init() print(" - checking all models have tests.") check_all_decorator_order() check_all_models_are_tested() print(" - checking all objects have documentation.") check_all_objects_are_documented() print(" - checking all models are in at least one auto class.") check_all_models_are_auto_configured() print(" - checking all names in auto name mappings are defined.") check_all_auto_object_names_being_defined() print(" - checking all keys in auto name mappings are defined in `CONFIG_MAPPING_NAMES`.") check_all_auto_mapping_names_in_config_mapping_names() print(" - checking all auto mappings could be imported.") check_all_auto_mappings_importable() print(" - checking all objects are equally (across frameworks) in the main __init__.") check_objects_being_equally_in_main_init() print(" - checking the DEPRECATED_MODELS constant is up to date.") check_deprecated_constant_is_up_to_date() if __name__ == "__main__": check_repo_quality()
transformers/utils/check_repo.py/0
{ "file_path": "transformers/utils/check_repo.py", "repo_id": "transformers", "token_count": 20316 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A script running `create_dummy_models.py` with a pre-defined set of arguments. This file is intended to be used in a CI workflow file without the need of specifying arguments. It creates and uploads tiny models for all model classes (if their tiny versions are not on the Hub yet), as well as produces an updated version of `tests/utils/tiny_model_summary.json`. That updated file should be merged into the `main` branch of `transformers` so the pipeline testing will use the latest created/updated tiny models. """ import argparse import copy import json import multiprocessing import os import time from create_dummy_models import COMPOSITE_MODELS, create_tiny_models from huggingface_hub import ModelFilter, hf_api import transformers from transformers import AutoFeatureExtractor, AutoImageProcessor, AutoTokenizer from transformers.image_processing_utils import BaseImageProcessor def get_all_model_names(): model_names = set() # Each auto modeling files contains multiple mappings. Let's get them in a dynamic way. for module_name in ["modeling_auto", "modeling_tf_auto", "modeling_flax_auto"]: module = getattr(transformers.models.auto, module_name, None) if module is None: continue # all mappings in a single auto modeling file mapping_names = [ x for x in dir(module) if x.endswith("_MAPPING_NAMES") and (x.startswith("MODEL_") or x.startswith("TF_MODEL_") or x.startswith("FLAX_MODEL_")) ] for name in mapping_names: mapping = getattr(module, name) if mapping is not None: for v in mapping.values(): if isinstance(v, (list, tuple)): model_names.update(v) elif isinstance(v, str): model_names.add(v) return sorted(model_names) def get_tiny_model_names_from_repo(): # All model names defined in auto mappings model_names = set(get_all_model_names()) with open("tests/utils/tiny_model_summary.json") as fp: tiny_model_info = json.load(fp) tiny_models_names = set() for model_base_name in tiny_model_info: tiny_models_names.update(tiny_model_info[model_base_name]["model_classes"]) # Remove a tiny model name if one of its framework implementation hasn't yet a tiny version on the Hub. not_on_hub = model_names.difference(tiny_models_names) for model_name in copy.copy(tiny_models_names): if not model_name.startswith("TF") and f"TF{model_name}" in not_on_hub: tiny_models_names.remove(model_name) elif model_name.startswith("TF") and model_name[2:] in not_on_hub: tiny_models_names.remove(model_name) return sorted(tiny_models_names) def get_tiny_model_summary_from_hub(output_path): special_models = COMPOSITE_MODELS.values() # All tiny model base names on Hub model_names = get_all_model_names() models = hf_api.list_models( filter=ModelFilter( author="hf-internal-testing", ) ) _models = set() for x in models: model = x.id org, model = model.split("/") if not model.startswith("tiny-random-"): continue model = model.replace("tiny-random-", "") if not model[0].isupper(): continue if model not in model_names and model not in special_models: continue _models.add(model) models = sorted(_models) # All tiny model names on Hub summary = {} for model in models: repo_id = f"hf-internal-testing/tiny-random-{model}" model = model.split("-")[0] try: repo_info = hf_api.repo_info(repo_id) content = { "tokenizer_classes": set(), "processor_classes": set(), "model_classes": set(), "sha": repo_info.sha, } except Exception: continue try: time.sleep(1) tokenizer_fast = AutoTokenizer.from_pretrained(repo_id) content["tokenizer_classes"].add(tokenizer_fast.__class__.__name__) except Exception: pass try: time.sleep(1) tokenizer_slow = AutoTokenizer.from_pretrained(repo_id, use_fast=False) content["tokenizer_classes"].add(tokenizer_slow.__class__.__name__) except Exception: pass try: time.sleep(1) img_p = AutoImageProcessor.from_pretrained(repo_id) content["processor_classes"].add(img_p.__class__.__name__) except Exception: pass try: time.sleep(1) feat_p = AutoFeatureExtractor.from_pretrained(repo_id) if not isinstance(feat_p, BaseImageProcessor): content["processor_classes"].add(feat_p.__class__.__name__) except Exception: pass try: time.sleep(1) model_class = getattr(transformers, model) m = model_class.from_pretrained(repo_id) content["model_classes"].add(m.__class__.__name__) except Exception: pass try: time.sleep(1) model_class = getattr(transformers, f"TF{model}") m = model_class.from_pretrained(repo_id) content["model_classes"].add(m.__class__.__name__) except Exception: pass content["tokenizer_classes"] = sorted(content["tokenizer_classes"]) content["processor_classes"] = sorted(content["processor_classes"]) content["model_classes"] = sorted(content["model_classes"]) summary[model] = content with open(os.path.join(output_path, "hub_tiny_model_summary.json"), "w") as fp: json.dump(summary, fp, ensure_ascii=False, indent=4) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--num_workers", default=1, type=int, help="The number of workers to run.") args = parser.parse_args() # This has to be `spawn` to avoid hanging forever! multiprocessing.set_start_method("spawn") output_path = "tiny_models" all = True model_types = None models_to_skip = get_tiny_model_names_from_repo() no_check = True upload = True organization = "hf-internal-testing" create_tiny_models( output_path, all, model_types, models_to_skip, no_check, upload, organization, token=os.environ.get("TOKEN", None), num_workers=args.num_workers, )
transformers/utils/update_tiny_models.py/0
{ "file_path": "transformers/utils/update_tiny_models.py", "repo_id": "transformers", "token_count": 3071 }
# Data Utilities ## is_conversational [[autodoc]] is_conversational ## apply_chat_template [[autodoc]] apply_chat_template ## maybe_apply_chat_template [[autodoc]] maybe_apply_chat_template ## extract_prompt [[autodoc]] extract_prompt ## maybe_extract_prompt [[autodoc]] maybe_extract_prompt ## unpair_preference_dataset [[autodoc]] unpair_preference_dataset ## maybe_unpair_preference_dataset [[autodoc]] maybe_unpair_preference_dataset ## pack_examples [[autodoc]] pack_examples
trl/docs/source/data_utils.md/0
{ "file_path": "trl/docs/source/data_utils.md", "repo_id": "trl", "token_count": 188 }
# Supervised Fine-tuning Trainer [![](https://img.shields.io/badge/All_models-SFT-blue)](https://huggingface.co/models?other=sft,trl) [![](https://img.shields.io/badge/smol_course-Chapter_1-yellow)](https://github.com/huggingface/smol-course/tree/main/1_instruction_tuning) Supervised fine-tuning (or SFT for short) is a crucial step in RLHF. In TRL we provide an easy-to-use API to create your SFT models and train them with few lines of code on your dataset. Check out a complete flexible example at [`trl/scripts/sft.py`](https://github.com/huggingface/trl/tree/main/trl/scripts/sft.py). Experimental support for Vision Language Models is also included in the example [`examples/scripts/sft_vlm.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/sft_vlm.py). ## Quickstart If you have a dataset hosted on the 🤗 Hub, you can easily fine-tune your SFT model using [`SFTTrainer`] from TRL. Let us assume your dataset is `imdb`, the text you want to predict is inside the `text` field of the dataset, and you want to fine-tune the `facebook/opt-350m` model. The following code-snippet takes care of all the data pre-processing and training for you: ```python from datasets import load_dataset from trl import SFTConfig, SFTTrainer dataset = load_dataset("stanfordnlp/imdb", split="train") training_args = SFTConfig( max_seq_length=512, output_dir="/tmp", ) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=training_args, ) trainer.train() ``` Make sure to pass the correct value for `max_seq_length` as the default value will be set to `min(tokenizer.model_max_length, 1024)`. You can also construct a model outside of the trainer and pass it as follows: ```python from transformers import AutoModelForCausalLM from datasets import load_dataset from trl import SFTConfig, SFTTrainer dataset = load_dataset("stanfordnlp/imdb", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") training_args = SFTConfig(output_dir="/tmp") trainer = SFTTrainer( model, train_dataset=dataset, args=training_args, ) trainer.train() ``` The above snippets will use the default training arguments from the [`SFTConfig`] class. If you want to modify the defaults pass in your modification to the `SFTConfig` constructor and pass them to the trainer via the `args` argument. ## Advanced usage ### Train on completions only You can use the `DataCollatorForCompletionOnlyLM` to train your model on the generated prompts only. Note that this works only in the case when `packing=False`. To instantiate that collator for instruction data, pass a response template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on completions only on the CodeAlpaca dataset: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTConfig, SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("lucasmccabe-lmi/CodeAlpaca-20k", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") def formatting_prompts_func(example): output_texts = [] for i in range(len(example['instruction'])): text = f"### Question: {example['instruction'][i]}\n ### Answer: {example['output'][i]}" output_texts.append(text) return output_texts response_template = " ### Answer:" collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer) trainer = SFTTrainer( model, train_dataset=dataset, args=SFTConfig(output_dir="/tmp"), formatting_func=formatting_prompts_func, data_collator=collator, ) trainer.train() ``` To instantiate that collator for assistant style conversation data, pass a response template, an instruction template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on assistant completions only on the Open Assistant Guanaco dataset: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTConfig, SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") instruction_template = "### Human:" response_template = "### Assistant:" collator = DataCollatorForCompletionOnlyLM(instruction_template=instruction_template, response_template=response_template, tokenizer=tokenizer, mlm=False) trainer = SFTTrainer( model, args=SFTConfig(output_dir="/tmp"), train_dataset=dataset, data_collator=collator, ) trainer.train() ``` Make sure to have a `pad_token_id` which is different from `eos_token_id` which can result in the model not properly predicting EOS (End of Sentence) tokens during generation. #### Using token_ids directly for `response_template` Some tokenizers like Llama 2 (`meta-llama/Llama-2-XXb-hf`) tokenize sequences differently depending on whether they have context or not. For example: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") def print_tokens_with_ids(txt): tokens = tokenizer.tokenize(txt, add_special_tokens=False) token_ids = tokenizer.encode(txt, add_special_tokens=False) print(list(zip(tokens, token_ids))) prompt = """### User: Hello\n\n### Assistant: Hi, how can I help you?""" print_tokens_with_ids(prompt) # [..., ('▁Hello', 15043), ('<0x0A>', 13), ('<0x0A>', 13), ('##', 2277), ('#', 29937), ('▁Ass', 4007), ('istant', 22137), (':', 29901), ...] response_template = "### Assistant:" print_tokens_with_ids(response_template) # [('▁###', 835), ('▁Ass', 4007), ('istant', 22137), (':', 29901)] ``` In this case, and due to lack of context in `response_template`, the same string ("### Assistant:") is tokenized differently: - Text (with context): `[2277, 29937, 4007, 22137, 29901]` - `response_template` (without context): `[835, 4007, 22137, 29901]` This will lead to an error when the `DataCollatorForCompletionOnlyLM` does not find the `response_template` in the dataset example text: ``` RuntimeError: Could not find response key [835, 4007, 22137, 29901] in token IDs tensor([ 1, 835, ...]) ``` To solve this, you can tokenize the `response_template` with the same context as in the dataset, truncate it as needed and pass the `token_ids` directly to the `response_template` argument of the `DataCollatorForCompletionOnlyLM` class. For example: ```python response_template_with_context = "\n### Assistant:" # We added context here: "\n". This is enough for this tokenizer response_template_ids = tokenizer.encode(response_template_with_context, add_special_tokens=False)[2:] # Now we have it like in the dataset texts: `[2277, 29937, 4007, 22137, 29901]` data_collator = DataCollatorForCompletionOnlyLM(response_template_ids, tokenizer=tokenizer) ``` ### Add Special Tokens for Chat Format Adding special tokens to a language model is crucial for training chat models. These tokens are added between the different roles in a conversation, such as the user, assistant, and system and help the model recognize the structure and flow of a conversation. This setup is essential for enabling the model to generate coherent and contextually appropriate responses in a chat environment. The [`setup_chat_format`] function in `trl` easily sets up a model and tokenizer for conversational AI tasks. This function: - Adds special tokens to the tokenizer, e.g. `<|im_start|>` and `<|im_end|>`, to indicate the start and end of a conversation. - Resizes the model’s embedding layer to accommodate the new tokens. - Sets the `chat_template` of the tokenizer, which is used to format the input data into a chat-like format. The default is `chatml` from OpenAI. - _optionally_ you can pass `resize_to_multiple_of` to resize the embedding layer to a multiple of the `resize_to_multiple_of` argument, e.g. 64. If you want to see more formats being supported in the future, please open a GitHub issue on [trl](https://github.com/huggingface/trl) ```python from transformers import AutoModelForCausalLM, AutoTokenizer from trl import setup_chat_format # Load model and tokenizer model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") # Set up the chat format with default 'chatml' format model, tokenizer = setup_chat_format(model, tokenizer) ``` With our model and tokenizer set up, we can now fine-tune our model on a conversational dataset. Below is an example of how a dataset can be formatted for fine-tuning. ### Dataset format support The [`SFTTrainer`] supports popular dataset formats. This allows you to pass the dataset to the trainer without any pre-processing directly. The following formats are supported: * conversational format ```json {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "..."}]} {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "..."}]} {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "..."}]} ``` * instruction format ```json {"prompt": "<prompt text>", "completion": "<ideal generated text>"} {"prompt": "<prompt text>", "completion": "<ideal generated text>"} {"prompt": "<prompt text>", "completion": "<ideal generated text>"} ``` If your dataset uses one of the above formats, you can directly pass it to the trainer without pre-processing. The [`SFTTrainer`] will then format the dataset for you using the defined format from the model's tokenizer with the [apply_chat_template](https://huggingface.co/docs/transformers/main/en/chat_templating#templates-for-chat-models) method. ```python from datasets import load_dataset from trl import SFTConfig, SFTTrainer ... # load jsonl dataset dataset = load_dataset("json", data_files="path/to/dataset.jsonl", split="train") # load dataset from the HuggingFace Hub dataset = load_dataset("philschmid/dolly-15k-oai-style", split="train") ... training_args = SFTConfig(packing=True) trainer = SFTTrainer( "facebook/opt-350m", args=training_args, train_dataset=dataset, ) ``` If the dataset is not in one of those format you can either preprocess the dataset to match the formatting or pass a formatting function to the SFTTrainer to do it for you. Let's have a look. ### Format your input prompts For instruction fine-tuning, it is quite common to have two columns inside the dataset: one for the prompt & the other for the response. This allows people to format examples like [Stanford-Alpaca](https://github.com/tatsu-lab/stanford_alpaca) did as follows: ```bash Below is an instruction ... ### Instruction {prompt} ### Response: {completion} ``` Let us assume your dataset has two fields, `question` and `answer`. Therefore you can just run: ```python ... def formatting_prompts_func(example): output_texts = [] for i in range(len(example['question'])): text = f"### Question: {example['question'][i]}\n ### Answer: {example['answer'][i]}" output_texts.append(text) return output_texts trainer = SFTTrainer( model, args=training_args, train_dataset=dataset, formatting_func=formatting_prompts_func, ) trainer.train() ``` To properly format your input make sure to process all the examples by looping over them and returning a list of processed text. Check out a full example of how to use SFTTrainer on alpaca dataset [here](https://github.com/huggingface/trl/pull/444#issue-1760952763) ### Packing dataset ([`ConstantLengthDataset`]) [`SFTTrainer`] supports _example packing_, where multiple short examples are packed in the same input sequence to increase training efficiency. This is done with the [`ConstantLengthDataset`] utility class that returns constant length chunks of tokens from a stream of examples. To enable the usage of this dataset class, simply pass `packing=True` to the [`SFTConfig`] constructor. ```python ... training_args = SFTConfig(packing=True) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=training_args ) trainer.train() ``` Note that if you use a packed dataset and if you pass `max_steps` in the training arguments you will probably train your models for more than few epochs, depending on the way you have configured the packed dataset and the training protocol. Double check that you know and understand what you are doing. If you don't want to pack your `eval_dataset`, you can pass `eval_packing=False` to the `SFTConfig` init method. #### Customize your prompts using packed dataset If your dataset has several fields that you want to combine, for example if the dataset has `question` and `answer` fields and you want to combine them, you can pass a formatting function to the trainer that will take care of that. For example: ```python def formatting_func(example): text = f"### Question: {example['question']}\n ### Answer: {example['answer']}" return text training_args = SFTConfig(packing=True) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=training_args, formatting_func=formatting_func ) trainer.train() ``` You can also customize the [`ConstantLengthDataset`] much more by directly passing the arguments to the [`SFTConfig`] constructor. Please refer to that class' signature for more information. ### Control over the pretrained model You can directly pass the kwargs of the `from_pretrained()` method to the [`SFTConfig`]. For example, if you want to load a model in a different precision, analogous to ```python model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.bfloat16) ... training_args = SFTConfig( model_init_kwargs={ "torch_dtype": "bfloat16", }, output_dir="/tmp", ) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=training_args, ) trainer.train() ``` Note that all keyword arguments of `from_pretrained()` are supported. ### Training adapters We also support tight integration with 🤗 PEFT library so that any user can conveniently train adapters and share them on the Hub instead of training the entire model. ```python from datasets import load_dataset from trl import SFTConfig, SFTTrainer from peft import LoraConfig dataset = load_dataset("trl-lib/Capybara", split="train") peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, target_modules="all-linear", modules_to_save=["lm_head", "embed_token"], task_type="CAUSAL_LM", ) trainer = SFTTrainer( "Qwen/Qwen2.5-0.5B", train_dataset=dataset, args=SFTConfig(output_dir="Qwen2.5-0.5B-SFT"), peft_config=peft_config ) trainer.train() ``` > [!WARNING] > If the chat template contains special tokens like `<|im_start|>` (ChatML) or `<|eot_id|>` (Llama), the embedding layer and LM head must be included in the trainable parameters via the `modules_to_save` argument. Without this, the fine-tuned model will produce unbounded or nonsense generations. If the chat template doesn't contain special tokens (e.g. Alpaca), then the `modules_to_save` argument can be ignored or set to `None`. You can also continue training your `PeftModel`. For that, first load a `PeftModel` outside `SFTTrainer` and pass it directly to the trainer without the `peft_config` argument being passed. ### Training adapters with base 8 bit models For that, you need to first load your 8 bit model outside the Trainer and pass a `PeftConfig` to the trainer. For example: ```python ... peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map="auto", ) trainer = SFTTrainer( model, train_dataset=dataset, args=SFTConfig(), peft_config=peft_config, ) trainer.train() ``` ## Using Flash Attention and Flash Attention 2 You can benefit from Flash Attention 1 & 2 using SFTTrainer out of the box with minimal changes of code. First, to make sure you have all the latest features from transformers, install transformers from source ```bash pip install -U git+https://github.com/huggingface/transformers.git ``` Note that Flash Attention only works on GPU now and under half-precision regime (when using adapters, base model loaded in half-precision) Note also both features are perfectly compatible with other tools such as quantization. ### Using Flash-Attention 1 For Flash Attention 1 you can use the `BetterTransformer` API and force-dispatch the API to use Flash Attention kernel. First, install the latest optimum package: ```bash pip install -U optimum ``` Once you have loaded your model, wrap the `trainer.train()` call under the `with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):` context manager: ```diff ... + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): trainer.train() ``` Note that you cannot train your model using Flash Attention 1 on an arbitrary dataset as `torch.scaled_dot_product_attention` does not support training with padding tokens if you use Flash Attention kernels. Therefore you can only use that feature with `packing=True`. If your dataset contains padding tokens, consider switching to Flash Attention 2 integration. Below are some numbers you can get in terms of speedup and memory efficiency, using Flash Attention 1, on a single NVIDIA-T4 16GB. | use_flash_attn_1 | model_name | max_seq_len | batch_size | time per training step | | ---------------- | ----------------- | ----------- | ---------- | ---------------------- | | x | facebook/opt-350m | 2048 | 8 | ~59.1s | | | facebook/opt-350m | 2048 | 8 | **OOM** | | x | facebook/opt-350m | 2048 | 4 | ~30.3s | | | facebook/opt-350m | 2048 | 4 | ~148.9s | ### Using Flash Attention-2 To use Flash Attention 2, first install the latest `flash-attn` package: ```bash pip install -U flash-attn ``` And add `attn_implementation="flash_attention_2"` when calling `from_pretrained`: ```python model = AutoModelForCausalLM.from_pretrained( model_id, load_in_4bit=True, attn_implementation="flash_attention_2" ) ``` If you don't use quantization, make sure your model is loaded in half-precision and dispatch your model on a supported GPU device. After loading your model, you can either train it as it is, or attach adapters and train adapters on it in case your model is quantized. In contrast to Flash Attention 1, the integration makes it possible to train your model on an arbitrary dataset that also includes padding tokens. ### Using model creation utility We included a utility function to create your model. [[autodoc]] ModelConfig ```python from trl import ModelConfig, SFTTrainer, get_kbit_device_map, get_peft_config, get_quantization_config model_args = ModelConfig( model_name_or_path="facebook/opt-350m" attn_implementation=None, # or "flash_attention_2" ) torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, trust_remote_code=model_args.trust_remote_code, attn_implementation=model_args.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, **model_kwargs) trainer = SFTTrainer( ..., model=model_args.model_name_or_path, peft_config=get_peft_config(model_args), ) ``` ### Enhance the model's performances using NEFTune NEFTune is a technique to boost the performance of chat models and was introduced by the paper ["NEFTune: Noisy Embeddings Improve Instruction Finetuning"](https://huggingface.co/papers/2310.05914) from Jain et al. it consists of adding noise to the embedding vectors during training. According to the abstract of the paper: > Standard finetuning of LLaMA-2-7B using Alpaca achieves 29.79% on AlpacaEval, which rises to 64.69% using noisy embeddings. NEFTune also improves over strong baselines on modern instruction datasets. Models trained with Evol-Instruct see a 10% improvement, with ShareGPT an 8% improvement, and with OpenPlatypus an 8% improvement. Even powerful models further refined with RLHF such as LLaMA-2-Chat benefit from additional training with NEFTune. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/neft-screenshot.png"> </div> To use it in `SFTTrainer` simply pass `neftune_noise_alpha` when creating your `SFTConfig` instance. Note that to avoid any surprising behaviour, NEFTune is disabled after training to retrieve back the original behaviour of the embedding layer. ```python from datasets import load_dataset from trl import SFTConfig, SFTTrainer dataset = load_dataset("stanfordnlp/imdb", split="train") training_args = SFTConfig( neftune_noise_alpha=5, ) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=training_args, ) trainer.train() ``` We have tested NEFTune by training `mistralai/Mistral-7B-v0.1` on the [OpenAssistant dataset](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) and validated that using NEFTune led to a performance boost of ~25% on MT Bench. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-neftune-mistral-7b.png"> </div> Note however, that the amount of performance gain is _dataset dependent_ and in particular, applying NEFTune on synthetic datasets like [UltraChat](https://huggingface.co/datasets/stingning/ultrachat) typically produces smaller gains. ### Accelerate fine-tuning 2x using `unsloth` You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks on 1x A100 listed below: | 1 A100 40GB | Dataset | 🤗 | 🤗 + Flash Attention 2 | 🦥 Unsloth | 🦥 VRAM saved | | --------------- | --------- | --- | --------------------- | --------- | ------------ | | Code Llama 34b | Slim Orca | 1x | 1.01x | **1.94x** | -22.7% | | Llama-2 7b | Slim Orca | 1x | 0.96x | **1.87x** | -39.3% | | Mistral 7b | Slim Orca | 1x | 1.17x | **1.88x** | -65.9% | | Tiny Llama 1.1b | Alpaca | 1x | 1.55x | **2.74x** | -57.8% | First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows: ```python import torch from trl import SFTConfig, SFTTrainer from unsloth import FastLanguageModel max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number # Load model model, tokenizer = FastLanguageModel.from_pretrained( model_name="unsloth/mistral-7b", max_seq_length=max_seq_length, dtype=None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit=True, # Use 4bit quantization to reduce memory usage. Can be False # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf ) # Do model patching and add fast LoRA weights model = FastLanguageModel.get_peft_model( model, r=16, target_modules=[ "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj", ], lora_alpha=16, lora_dropout=0, # Dropout = 0 is currently optimized bias="none", # Bias = "none" is currently optimized use_gradient_checkpointing=True, random_state=3407, ) training_args = SFTConfig(output_dir="./output", max_seq_length=max_seq_length) trainer = SFTTrainer( model=model, args=training_args, train_dataset=dataset, ) trainer.train() ``` The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth). ## Liger-Kernel: Increase 20% throughput and reduces 60% memory for multi-GPU training [Liger Kernel](https://github.com/linkedin/Liger-Kernel) is a collection of Triton kernels designed specifically for LLM training. It can effectively increase multi-GPU training throughput by 20% and reduces memory usage by 60%. That way, we can **4x** our context length, as described in the benchmark below. They have implemented Hugging Face Compatible `RMSNorm`, `RoPE`, `SwiGLU`, `CrossEntropy`, `FusedLinearCrossEntropy`, and more to come. The kernel works out of the box with [Flash Attention](https://github.com/Dao-AILab/flash-attention), [PyTorch FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html), and [Microsoft DeepSpeed](https://github.com/microsoft/DeepSpeed). With great memory reduction, you can potentially turn off cpu_offloading or gradient checkpointing to further boost the performance. | Speed Up | Memory Reduction | |--------------------------|-------------------------| | ![Speed up](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/docs/images/e2e-tps.png) | ![Memory](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/docs/images/e2e-memory.png) | 1. To use Liger-Kernel in `SFTTrainer`, first install by ```bash pip install liger-kernel ``` 2. Once installed, set `use_liger` in [`SFTConfig`]. No other changes are needed! ```python training_args = SFTConfig( use_liger=True ) ``` To learn more about Liger-Kernel, visit their [official repository](https://github.com/linkedin/Liger-Kernel/). ## Best practices Pay attention to the following best practices when training a model with that trainer: - [`SFTTrainer`] always truncates by default the sequences to the `max_seq_length` argument of the [`SFTConfig`]. If none is passed, the trainer will retrieve that value from the tokenizer. Some tokenizers do not provide a default value, so there is a check to retrieve the minimum between 1024 and that value. Make sure to check it before training. - For training adapters in 8bit, you might need to tweak the arguments of the `prepare_model_for_kbit_training` method from PEFT, hence we advise users to use `prepare_in_int8_kwargs` field, or create the `PeftModel` outside the [`SFTTrainer`] and pass it. - For a more memory-efficient training using adapters, you can load the base model in 8bit, for that simply add `load_in_8bit` argument when creating the [`SFTTrainer`], or create a base model in 8bit outside the trainer and pass it. - If you create a model outside the trainer, make sure to not pass to the trainer any additional keyword arguments that are relative to `from_pretrained()` method. ## Multi-GPU Training Trainer (and thus SFTTrainer) supports multi-GPU training. If you run your script with `python script.py` it will default to using DP as the strategy, which may be [slower than expected](https://github.com/huggingface/trl/issues/1303). To use DDP (which is generally recommended, see [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many?select-gpu=Accelerate#data-parallelism) for more info) you must launch the script with `python -m torch.distributed.launch script.py` or `accelerate launch script.py`. For DDP to work you must also check the following: - If you're using gradient_checkpointing, add the following to the TrainingArguments: `gradient_checkpointing_kwargs={'use_reentrant':False}` (more info [here](https://github.com/huggingface/transformers/issues/26969) - Ensure that the model is placed on the correct device: ```python from accelerate import PartialState device_string = PartialState().process_index model = AutoModelForCausalLM.from_pretrained( ... device_map={'':device_string} ) ``` ## GPTQ Conversion You may experience some issues with GPTQ Quantization after completing training. Lowering `gradient_accumulation_steps` to `4` will resolve most issues during the quantization process to GPTQ format. ## Extending `SFTTrainer` for Vision Language Models `SFTTrainer` does not inherently support vision-language data. However, we provide a guide on how to tweak the trainer to support vision-language data. Specifically, you need to use a custom data collator that is compatible with vision-language data. This guide outlines the steps to make these adjustments. For a concrete example, refer to the script [`examples/scripts/sft_vlm.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/sft_vlm.py) which demonstrates how to fine-tune the LLaVA 1.5 model on the [HuggingFaceH4/llava-instruct-mix-vsft](https://huggingface.co/datasets/HuggingFaceH4/llava-instruct-mix-vsft) dataset. ### Preparing the Data The data format is flexible, provided it is compatible with the custom collator that we will define later. A common approach is to use conversational data. Given that the data includes both text and images, the format needs to be adjusted accordingly. Below is an example of a conversational data format involving both text and images: ```python images = ["obama.png"] messages = [ { "role": "user", "content": [ {"type": "text", "text": "Who is this?"}, {"type": "image"} ] }, { "role": "assistant", "content": [ {"type": "text", "text": "Barack Obama"} ] }, { "role": "user", "content": [ {"type": "text", "text": "What is he famous for?"} ] }, { "role": "assistant", "content": [ {"type": "text", "text": "He is the 44th President of the United States."} ] } ] ``` To illustrate how this data format will be processed using the LLaVA model, you can use the following code: ```python from transformers import AutoProcessor processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf") print(processor.apply_chat_template(messages, tokenize=False)) ``` The output will be formatted as follows: ```txt Who is this? ASSISTANT: Barack Obama USER: What is he famous for? ASSISTANT: He is the 44th President of the United States. ``` <iframe src="https://huggingface.co/datasets/HuggingFaceH4/llava-instruct-mix-vsft/embed/viewer/default/train" frameborder="0" width="100%" height="560px"></iframe> ### A custom collator for processing multi-modal data Unlike the default behavior of `SFTTrainer`, processing multi-modal data is done on the fly during the data collation process. To do this, you need to define a custom collator that processes both the text and images. This collator must take a list of examples as input (see the previous section for an example of the data format) and return a batch of processed data. Below is an example of such a collator: ```python def collate_fn(examples): # Get the texts and images, and apply the chat template texts = [processor.apply_chat_template(example["messages"], tokenize=False) for example in examples] images = [example["images"][0] for example in examples] # Tokenize the texts and process the images batch = processor(texts, images, return_tensors="pt", padding=True) # The labels are the input_ids, and we mask the padding tokens in the loss computation labels = batch["input_ids"].clone() labels[labels == processor.tokenizer.pad_token_id] = -100 batch["labels"] = labels return batch ``` We can verify that the collator works as expected by running the following code: ```python from datasets import load_dataset dataset = load_dataset("HuggingFaceH4/llava-instruct-mix-vsft", split="train") examples = [dataset[0], dataset[1]] # Just two examples for the sake of the example collated_data = collate_fn(examples) print(collated_data.keys()) # dict_keys(['input_ids', 'attention_mask', 'pixel_values', 'labels']) ``` ### Training the vision-language model Now that we have prepared the data and defined the collator, we can proceed with training the model. To ensure that the data is not processed as text-only, we need to set a couple of arguments in the `SFTConfig`, specifically `remove_unused_columns` and `skip_prepare_dataset` to `True` to avoid the default processing of the dataset. Below is an example of how to set up the `SFTTrainer`. ```python training_args.remove_unused_columns = False training_args.dataset_kwargs = {"skip_prepare_dataset": True} trainer = SFTTrainer( model=model, args=training_args, data_collator=collate_fn, train_dataset=train_dataset, processing_class=processor.tokenizer, ) ``` A full example of training LLaVa 1.5 on the [HuggingFaceH4/llava-instruct-mix-vsft](https://huggingface.co/datasets/HuggingFaceH4/llava-instruct-mix-vsft) dataset can be found in the script [`examples/scripts/sft_vlm.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/sft_vlm.py). - [Experiment tracking](https://wandb.ai/huggingface/trl/runs/2b2c5l7s) - [Trained model](https://huggingface.co/HuggingFaceH4/sft-llava-1.5-7b-hf) ## SFTTrainer [[autodoc]] SFTTrainer ## SFTConfig [[autodoc]] SFTConfig ## Datasets In the SFTTrainer we smartly support `datasets.IterableDataset` in addition to other style datasets. This is useful if you are using large corpora that you do not want to save all to disk. The data will be tokenized and processed on the fly, even when packing is enabled. Additionally, in the SFTTrainer, we support pre-tokenized datasets if they are `datasets.Dataset` or `datasets.IterableDataset`. In other words, if such a dataset has a column of `input_ids`, no further processing (tokenization or packing) will be done, and the dataset will be used as-is. This can be useful if you have pretokenized your dataset outside of this script and want to re-use it directly. ### ConstantLengthDataset [[autodoc]] trainer.ConstantLengthDataset
trl/docs/source/sft_trainer.md/0
{ "file_path": "trl/docs/source/sft_trainer.md", "repo_id": "trl", "token_count": 11435 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from huggingface_hub import ModelCard from transformers import AutoTokenizer, HfArgumentParser @dataclass class ScriptArguments: r""" Arguments for the script. Args: push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the dataset to the Hugging Face Hub. repo_id (`str`, *optional*, defaults to `"trl-lib/lm-human-preferences-descriptiveness"`): Hugging Face repository ID to push the dataset to. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of workers to use for dataset processing. """ push_to_hub: bool = field( default=False, metadata={"help": "Whether to push the dataset to the Hugging Face Hub."}, ) repo_id: str = field( default="trl-lib/lm-human-preferences-descriptiveness", metadata={"help": "Hugging Face repository ID to push the dataset to."}, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of workers to use for dataset processing."}, ) # Edge cases handling: remove the cases where all samples are the same def samples_not_all_same(example): return not all(example["sample0"] == example[f"sample{j}"] for j in range(1, 4)) def to_prompt_completion(example, tokenizer): prompt = tokenizer.decode(example["query"]).strip() best_idx = example["best"] chosen = tokenizer.decode(example[f"sample{best_idx}"]) for rejected_idx in range(4): # take the first rejected sample that is different from the chosen one rejected = tokenizer.decode(example[f"sample{rejected_idx}"]) if chosen != rejected: break assert chosen != rejected return {"prompt": prompt, "chosen": chosen, "rejected": rejected} model_card = ModelCard(""" --- tags: [trl] --- # LM-Human-Preferences-Descriptiveness Dataset ## Summary The LM-Human-Preferences-Descriptiveness dataset is a processed subset of [OpenAI's LM-Human-Preferences](https://github.com/openai/lm-human-preferences), focusing specifically on enhancing the descriptiveness of generated text. It contains pairs of text samples, each labeled as either "chosen" or "rejected," based on human preferences regarding the level of detail and vividness in the descriptions. This dataset enables models to learn human preferences in descriptive language, improving their ability to generate rich and engaging narratives. ## Data Structure - **Format**: [Standard](https://huggingface.co/docs/trl/main/dataset_formats#standard) - **Type**: [Preference](https://huggingface.co/docs/trl/main/dataset_formats#preference) Columns: - `"prompt"`: The text sample. - `"chosen"`: A version of the text with enhanced descriptiveness. - `"rejected"`: A version of the text with less descriptiveness. This structure allows models to learn to prefer the _chosen_ response over the _rejected_ one, thereby aligning with human preferences in descriptive language. ## Generation script The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/lm-human-preferences-descriptiveness.py). """) if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] dataset = load_dataset( "json", data_files="https://openaipublic.blob.core.windows.net/lm-human-preferences/labels/descriptiveness/offline_5k.json", split="train", ) dataset = dataset.filter(samples_not_all_same, num_proc=script_args.dataset_num_proc) dataset = dataset.map( to_prompt_completion, num_proc=script_args.dataset_num_proc, remove_columns=["query", "sample0", "sample1", "sample2", "sample3", "best"], fn_kwargs={"tokenizer": AutoTokenizer.from_pretrained("gpt2")}, ) # train_size taken from https://github.com/openai/lm-human-preferences/blob/cbfd210bb8b08f6bc5c26878c10984b90f516c66/launch.py#L79) dataset = dataset.train_test_split(train_size=4992) if script_args.push_to_hub: dataset.push_to_hub(script_args.repo_id) model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
trl/examples/datasets/lm-human-preferences-descriptiveness.py/0
{ "file_path": "trl/examples/datasets/lm-human-preferences-descriptiveness.py", "repo_id": "trl", "token_count": 1687 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional, Union import evaluate import numpy as np import torch import torch.nn as nn from datasets import load_dataset from peft import LoraConfig, TaskType, get_peft_model from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerBase, Trainer, TrainerCallback, TrainingArguments, set_seed, ) from transformers.utils import PaddingStrategy # Define and parse arguments. @dataclass class ScriptArguments: """ These arguments vary depending on how many GPUs you have, what their capacity and features are, and what size model you want to train. """ local_rank: Optional[int] = field(default=-1, metadata={"help": "Used for multi-gpu"}) resume_from_checkpoint: Optional[bool] = field( default=False, metadata={"help": "If you want to resume training where it left off."}, ) deepspeed: Optional[str] = field( default=None, metadata={ "help": "Path to deepspeed config if using deepspeed. You may need this if the model that you want to train doesn't fit on a single GPU." }, ) per_device_train_batch_size: Optional[int] = field(default=4) per_device_eval_batch_size: Optional[int] = field(default=1) gradient_accumulation_steps: Optional[int] = field(default=1) learning_rate: Optional[float] = field(default=2e-5) weight_decay: Optional[float] = field(default=0.001) model_name: Optional[str] = field( default="gpt2", metadata={ "help": "The model that you want to train from the Hugging Face hub. E.g. gpt2, gpt2-xl, bert, etc." }, ) tokenizer_name: Optional[str] = field( default=None, metadata={ "help": "The tokenizer for your model, if left empty will use the default for your model", }, ) bf16: Optional[bool] = field( default=True, metadata={ "help": "This essentially cuts the training time in half if you want to sacrifice a little precision and have a supported GPU." }, ) num_train_epochs: Optional[int] = field( default=1, metadata={"help": "The number of training epochs for the reward model."}, ) train_subset: Optional[int] = field( default=100000, metadata={"help": "The size of the subset of the training data to use"}, ) eval_subset: Optional[int] = field( default=50000, metadata={"help": "The size of the subset of the eval data to use"}, ) gradient_checkpointing: Optional[bool] = field( default=False, metadata={"help": "Enables gradient checkpointing."}, ) optim: Optional[str] = field( default="adamw_hf", metadata={"help": "The optimizer to use."}, ) lr_scheduler_type: Optional[str] = field( default="linear", metadata={"help": "The lr scheduler"}, ) max_length: Optional[int] = field(default=512) eval_first_step: Optional[bool] = field( default=False, metadata={"help": "Whether to run eval after the first step"}, ) seed: Optional[int] = field( default=0, metadata={"help": "Random seed that will be set at the beginning of training."} ) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] set_seed(script_args.seed) # Load the human stack-exchange-paired dataset for tuning the reward model. train_dataset = load_dataset( "lvwerra/stack-exchange-paired", data_dir="data/reward", split="train", verification_mode="no_checks" ) if script_args.train_subset > 0: train_dataset = train_dataset.select(range(script_args.train_subset)) eval_dataset = load_dataset( "lvwerra/stack-exchange-paired", data_dir="data/evaluation", split="train", verification_mode="no_checks" ) if script_args.eval_subset > 0: eval_dataset = eval_dataset.select(range(script_args.eval_subset)) # Define the training args. Needs to be done before the model is loaded if you are using deepspeed. model_name_split = script_args.model_name.split("/")[-1] output_name = ( f"{model_name_split}_peft_stack-exchange-paired_rmts__{script_args.train_subset}_{script_args.learning_rate}" ) training_args = TrainingArguments( output_dir=output_name, learning_rate=script_args.learning_rate, per_device_train_batch_size=script_args.per_device_train_batch_size, per_device_eval_batch_size=script_args.per_device_eval_batch_size, num_train_epochs=script_args.num_train_epochs, weight_decay=script_args.weight_decay, eval_strategy="steps", eval_steps=500, save_strategy="steps", save_steps=500, gradient_accumulation_steps=script_args.gradient_accumulation_steps, gradient_checkpointing=script_args.gradient_checkpointing, deepspeed=script_args.deepspeed, local_rank=script_args.local_rank, remove_unused_columns=False, label_names=[], bf16=script_args.bf16, logging_strategy="steps", logging_steps=10, optim=script_args.optim, lr_scheduler_type=script_args.lr_scheduler_type, seed=script_args.seed, ) # Load the value-head model and tokenizer. tokenizer_name = script_args.tokenizer_name if script_args.tokenizer_name is not None else script_args.model_name tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_auth_token=True) tokenizer.pad_token = tokenizer.eos_token peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) model = AutoModelForSequenceClassification.from_pretrained( script_args.model_name, num_labels=1, torch_dtype=torch.bfloat16 ) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # Need to do this for gpt2, because it doesn't have an official pad token. tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = tokenizer.eos_token_id model.config.use_cache = not script_args.gradient_checkpointing num_proc = 24 # Can adjust to be higher if you have more processors. original_columns = train_dataset.column_names # Turn the dataset into pairs of post + summaries, where text_j is the preferred question + answer and text_k is the other. # Then tokenize the dataset. def preprocess_function(examples): new_examples = { "input_ids_j": [], "attention_mask_j": [], "input_ids_k": [], "attention_mask_k": [], } for question, response_j, response_k in zip(examples["question"], examples["response_j"], examples["response_k"]): tokenized_j = tokenizer("Question: " + question + "\n\nAnswer: " + response_j, truncation=True) tokenized_k = tokenizer("Question: " + question + "\n\nAnswer: " + response_k, truncation=True) new_examples["input_ids_j"].append(tokenized_j["input_ids"]) new_examples["attention_mask_j"].append(tokenized_j["attention_mask"]) new_examples["input_ids_k"].append(tokenized_k["input_ids"]) new_examples["attention_mask_k"].append(tokenized_k["attention_mask"]) return new_examples # preprocess the dataset and filter out QAs that are longer than script_args.max_length train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=num_proc, remove_columns=original_columns, ) train_dataset = train_dataset.filter( lambda x: len(x["input_ids_j"]) <= script_args.max_length and len(x["input_ids_k"]) <= script_args.max_length, num_proc=num_proc, ) eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=num_proc, remove_columns=original_columns, ) eval_dataset = eval_dataset.filter( lambda x: len(x["input_ids_j"]) <= script_args.max_length and len(x["input_ids_k"]) <= script_args.max_length, num_proc=num_proc, ) # We need to define a special data collator that batches the data in our j vs k format. @dataclass class RewardDataCollatorWithPadding: tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True pad_to_multiple_of: Optional[int] = None return_tensors: str = "pt" def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]: features_j = [] features_k = [] for feature in features: features_j.append( { "input_ids": feature["input_ids_j"], "attention_mask": feature["attention_mask_j"], } ) features_k.append( { "input_ids": feature["input_ids_k"], "attention_mask": feature["attention_mask_k"], } ) batch_j = self.tokenizer.pad( features_j, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch_k = self.tokenizer.pad( features_k, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch = { "input_ids_j": batch_j["input_ids"], "attention_mask_j": batch_j["attention_mask"], "input_ids_k": batch_k["input_ids"], "attention_mask_k": batch_k["attention_mask"], "return_loss": True, } return batch # Define the metric that we'll use for validation. accuracy = evaluate.load("accuracy") def compute_metrics(eval_pred): predictions, _ = eval_pred # Here, predictions is rewards_j and rewards_k. # We want to see how much of the time rewards_j > rewards_k. predictions = np.argmax(predictions, axis=0) labels = np.zeros(predictions.shape) return accuracy.compute(predictions=predictions, references=labels) class RewardTrainer(Trainer): # Define how to compute the reward loss. We use the InstructGPT pairwise logloss: https://huggingface.co/papers/2203.02155 def compute_loss(self, model, inputs, return_outputs=False): rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0] rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0] loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean() if return_outputs: return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k} return loss # Train the model, woohoo. trainer = RewardTrainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, data_collator=RewardDataCollatorWithPadding(tokenizer=tokenizer), ) if script_args.eval_first_step: class EvaluateFirstStepCallback(TrainerCallback): def on_step_end(self, args, state, control, **kwargs): if state.global_step == 1: control.should_evaluate = True trainer.add_callback(EvaluateFirstStepCallback()) trainer.train(script_args.resume_from_checkpoint) print("Saving last checkpoint of the model") model.save_pretrained(output_name + "_peft_last_checkpoint")
trl/examples/research_projects/stack_llama/scripts/reward_modeling.py/0
{ "file_path": "trl/examples/research_projects/stack_llama/scripts/reward_modeling.py", "repo_id": "trl", "token_count": 4691 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import itertools import tempfile import unittest import torch from accelerate.utils.memory import release_memory from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from transformers.testing_utils import require_peft, require_torch_accelerator, torch_device from transformers.utils import is_peft_available from trl import DPOConfig, DPOTrainer from ..testing_utils import require_bitsandbytes from .testing_constants import DPO_LOSS_TYPES, DPO_PRECOMPUTE_LOGITS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST if is_peft_available(): from peft import LoraConfig, PeftModel @require_torch_accelerator class DPOTrainerSlowTester(unittest.TestCase): def setUp(self): self.dataset = load_dataset("trl-internal-testing/zen", "standard_preference") self.peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=8, bias="none", task_type="CAUSAL_LM", ) self.max_length = 128 def tearDown(self): gc.collect() if torch_device == "cpu": torch.cuda.empty_cache() elif torch_device == "xpu": torch.xpu.empty_cache() gc.collect() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, DPO_LOSS_TYPES, DPO_PRECOMPUTE_LOGITS))) def test_dpo_bare_model(self, model_id, loss_type, pre_compute_logits): """ A test that tests the simple usage of `DPOTrainer` using a bare model in full precision. """ model = AutoModelForCausalLM.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=2, remove_unused_columns=False, gradient_accumulation_steps=2, learning_rate=9e-1, eval_strategy="steps", fp16=True, logging_strategy="no", report_to="none", beta=0.1, loss_type=loss_type, precompute_ref_log_probs=pre_compute_logits, max_length=self.max_length, ) # dpo train lora model trainer = DPOTrainer( model=model, ref_model=None, args=training_args, train_dataset=self.dataset["train"], eval_dataset=self.dataset["test"], processing_class=tokenizer, ) # train the model trainer.train() # save trained model or adapter trainer.save_model() release_memory(model, trainer) @parameterized.expand( list( itertools.product( MODELS_TO_TEST, DPO_LOSS_TYPES, DPO_PRECOMPUTE_LOGITS, GRADIENT_CHECKPOINTING_KWARGS, ) ) ) @require_peft def test_dpo_peft_model(self, model_id, loss_type, pre_compute_logits, gradient_checkpointing_kwargs): """ A test that tests the simple usage of `DPOTrainer` using a peft model in full precision + different scenarios of gradient checkpointing. """ model = AutoModelForCausalLM.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=2, remove_unused_columns=False, gradient_accumulation_steps=2, learning_rate=9e-1, eval_strategy="steps", fp16=True, logging_strategy="no", report_to="none", gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, generate_during_eval=False, loss_type=loss_type, precompute_ref_log_probs=pre_compute_logits, beta=0.1, max_length=self.max_length, ) # dpo train lora model trainer = DPOTrainer( model=model, ref_model=None, args=training_args, train_dataset=self.dataset["train"], eval_dataset=self.dataset["test"], processing_class=tokenizer, peft_config=self.peft_config, ) self.assertIsInstance(trainer.model, PeftModel) self.assertIsNone(trainer.ref_model) # train the model trainer.train() # save trained model or adapter trainer.save_model() release_memory(model, trainer) @parameterized.expand( list( itertools.product( MODELS_TO_TEST, DPO_LOSS_TYPES, DPO_PRECOMPUTE_LOGITS, GRADIENT_CHECKPOINTING_KWARGS, ) ) ) @require_bitsandbytes @require_peft def test_dpo_peft_model_qlora(self, model_id, loss_type, pre_compute_logits, gradient_checkpointing_kwargs): """ A test that tests the simple usage of `DPOTrainer` using QLoRA + different scenarios of gradient checkpointing. """ quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token if tokenizer.pad_token is None else tokenizer.pad_token with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=2, remove_unused_columns=False, gradient_accumulation_steps=2, learning_rate=9e-1, eval_strategy="steps", fp16=True, logging_strategy="no", report_to="none", gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, beta=0.1, generate_during_eval=False, loss_type=loss_type, precompute_ref_log_probs=pre_compute_logits, max_length=self.max_length, ) # dpo train lora model trainer = DPOTrainer( model=model, ref_model=None, args=training_args, train_dataset=self.dataset["train"], eval_dataset=self.dataset["test"], processing_class=tokenizer, peft_config=self.peft_config, ) self.assertIsInstance(trainer.model, PeftModel) self.assertIsNone(trainer.ref_model) # train the model trainer.train() # save trained model or adapter trainer.save_model() release_memory(model, trainer)
trl/tests/slow/test_dpo_slow.py/0
{ "file_path": "trl/tests/slow/test_dpo_slow.py", "repo_id": "trl", "token_count": 4082 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from unittest.mock import MagicMock import numpy as np import torch from datasets import Dataset, features, load_dataset from parameterized import parameterized from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForVision2Seq, AutoProcessor, AutoTokenizer, PreTrainedTokenizerBase, is_vision_available, ) from transformers.testing_utils import require_peft, require_torch_gpu_if_bnb_not_multi_backend_enabled, require_vision from trl import DPOConfig, DPOTrainer, FDivergenceType from .testing_utils import require_bitsandbytes, require_no_wandb if is_vision_available(): from PIL import Image class TestTokenizeRow(unittest.TestCase): def setUp(self): # Set up the mock tokenizer with specific behaviors self.tokenizer = MagicMock(spec=PreTrainedTokenizerBase) self.tokenizer.bos_token_id = 0 self.tokenizer.eos_token_id = 2 # Define mock return values for the tokenizer's 'input_ids' for the different text inputs self.tokenizer.return_value = { "input_ids": {"The sky is": [464, 6766, 318], " blue": [4171], " green": [4077]} } # Define tokenizer behavior when called def mock_tokenizer_call(text, add_special_tokens): token_map = { "The sky is": {"input_ids": [464, 6766, 318]}, " blue": {"input_ids": [4171]}, " green": {"input_ids": [4077]}, } return token_map[text] self.tokenizer.side_effect = mock_tokenizer_call def test_tokenize_row_no_truncation_no_special_tokens(self): # Define the input features features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} # Call the method with no truncation and no special tokens result = DPOTrainer.tokenize_row( features=features, processing_class=self.tokenizer, max_prompt_length=None, max_completion_length=None, add_special_tokens=False, ) # Assert the correct output without truncation or special tokens self.assertEqual( result, { "prompt_input_ids": [464, 6766, 318], "chosen_input_ids": [4171, 2], # eos_token added "rejected_input_ids": [4077, 2], # eos_token added }, ) def test_tokenize_row_with_truncation(self): # Define the input features features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} # Call the method with truncation result = DPOTrainer.tokenize_row( features=features, processing_class=self.tokenizer, max_prompt_length=2, max_completion_length=1, add_special_tokens=False, ) # Assert the correct output with truncation applied self.assertEqual( result, { "prompt_input_ids": [6766, 318], # truncated to the last 2 tokens "chosen_input_ids": [4171], # truncated to 1 token "rejected_input_ids": [4077], # truncated to 1 token }, ) def test_tokenize_row_with_special_tokens(self): # Define the input features features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} # Call the method with special tokens result = DPOTrainer.tokenize_row( features=features, processing_class=self.tokenizer, max_prompt_length=None, max_completion_length=None, add_special_tokens=True, ) # Assert the correct output with special tokens added self.assertEqual( result, { "prompt_input_ids": [0, 464, 6766, 318, 2], # bos_token and eos_token added "chosen_input_ids": [4171, 2], # eos_token added "rejected_input_ids": [4077, 2], # eos_token added }, ) def test_tokenize_row_with_truncation_and_special_tokens(self): # Define the input features features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} # Call the method with both truncation and special tokens result = DPOTrainer.tokenize_row( features=features, processing_class=self.tokenizer, max_prompt_length=4, max_completion_length=1, add_special_tokens=True, ) # Assert the correct output with both truncation and special tokens self.assertEqual( result, { "prompt_input_ids": [464, 6766, 318, 2], # truncated to 4 tokens with bos_token and eos_token "chosen_input_ids": [4171], # truncated to 1 token "rejected_input_ids": [4077], # truncated to 1 token }, ) class DPOTrainerTester(unittest.TestCase): def setUp(self): self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.tokenizer.pad_token = self.tokenizer.eos_token # get t5 as seq2seq example: model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration" self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) self.t5_ref_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) @parameterized.expand( [ ("qwen", "sigmoid", True), ("t5", "hinge", False), ("qwen", "ipo", False), ("t5", "ipo", True), ("qwen", "aot_pair", True), ("t5", "aot_pair", False), ("qwen", "aot", True), ("t5", "aot", False), ("qwen", "bco_pair", False), ("t5", "bco_pair", True), ("qwen", "sppo_hard", False), ("t5", "sppo_hard", True), ("qwen", "nca_pair", False), ("t5", "nca_pair", True), ("qwen", "robust", True), ("qwen", "exo_pair", False), ("t5", "exo_pair", True), ("qwen", "apo_zero", True), ("t5", "apo_down", False), ("qwen", "discopop", False), ] ) def test_dpo_trainer(self, name, loss_type, pre_compute): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, loss_type=loss_type, precompute_ref_log_probs=pre_compute, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") if name == "qwen": model = self.model ref_model = self.ref_model tokenizer = self.tokenizer elif name == "t5": model = self.t5_model ref_model = self.t5_ref_model tokenizer = self.t5_tokenizer trainer = DPOTrainer( model=model, ref_model=ref_model, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) def test_dpo_trainer_with_weighting(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, loss_type="sigmoid", precompute_ref_log_probs=False, use_weighting=True, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") trainer = DPOTrainer( model=self.model, ref_model=self.ref_model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) @parameterized.expand( [ (None, "Test when rpo_alpha is set to None"), (0.5, "Test when rpo_alpha is set to 0.5"), ] ) def test_dpo_trainer_without_providing_ref_model(self, rpo_alpha, _): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, precompute_ref_log_probs=True, rpo_alpha=rpo_alpha, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") trainer = DPOTrainer( model=self.model, ref_model=None, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.equal(param, new_param)) def test_dpo_trainer_with_ref_model_is_model(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") with self.assertRaises(ValueError): DPOTrainer( model=self.model, ref_model=self.model, # ref_model can't be the same as model args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], ) def test_precompute_ref_batch_size(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, precompute_ref_log_probs=True, precompute_ref_batch_size=4, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") trainer = DPOTrainer( model=self.model, ref_model=self.ref_model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) @require_peft def test_dpo_trainer_without_providing_ref_model_with_lora(self): from peft import LoraConfig lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, precompute_ref_log_probs=True, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") trainer = DPOTrainer( model=self.model, ref_model=None, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): if "lora" in n: new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.equal(param, new_param)) def test_dpo_trainer_padding_token_is_none(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") tokenizer = AutoTokenizer.from_pretrained(self.model_id) tokenizer.pad_token = None with self.assertRaisesRegex( ValueError, expected_regex=r"`padding_value` is not specified in `DPOConfig`, and `pad_token_id` is missing in " r"the `processing_class`. Please either set the `padding_value` argument in `DPOConfig`, or set " r"`tokenizer.pad_token` \(e.g., `tokenizer.pad_token = tokenizer.eos_token`\) before instantiating " r"the trainer.", ): trainer = DPOTrainer( model=self.model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) trainer.train() def test_dpo_trainer_w_dataset_num_proc(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, dataset_num_proc=5, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") tokenizer = AutoTokenizer.from_pretrained(self.model_id) trainer = DPOTrainer( model=self.model, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) trainer.train() def test_tr_dpo_trainer(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", precompute_ref_log_probs=False, sync_ref_model=True, ref_model_mixup_alpha=0.5, ref_model_sync_steps=1, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") trainer = DPOTrainer( model=self.model, ref_model=self.ref_model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) # params of the ref model as its the same as the model previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.ref_model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.equal(param, new_param)) @require_no_wandb def test_dpo_trainer_generate_during_eval_no_wandb(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, generate_during_eval=True, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") with self.assertRaisesRegex( ValueError, expected_regex="`generate_during_eval=True` requires Weights and Biases or Comet to be installed." " Please install `wandb` or `comet-ml` to resolve.", ): DPOTrainer( model=self.model, ref_model=None, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) @require_peft def test_dpo_lora_save(self): from peft import LoraConfig, get_peft_model lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) # lora model model = AutoModelForCausalLM.from_pretrained(self.model_id) model_peft = get_peft_model(model, lora_config) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, precompute_ref_log_probs=True, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") # dpo train lora model with a lora config trainer = DPOTrainer( model=model_peft, ref_model=None, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) # train the model trainer.train() # save peft adapter trainer.save_model() try: AutoModelForCausalLM.from_pretrained(tmp_dir) except OSError: self.fail("Loading the saved peft adapter failed") @require_peft @require_torch_gpu_if_bnb_not_multi_backend_enabled def test_dpo_lora_bf16_autocast_llama(self): # Note this test only works on compute capability > 7 GPU devices from peft import LoraConfig model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) # lora model model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", bf16=True, beta=0.1, generate_during_eval=True, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") # dpo train lora model with a lora config trainer = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) # train the model trainer.train() # save peft adapter trainer.save_model() @parameterized.expand( [ ("sigmoid", False, False), ("sigmoid", False, True), ("sigmoid", True, False), ("sigmoid", True, True), ("ipo", False, False), ("ipo", False, True), ("ipo", True, False), ("ipo", True, True), ("aot_pair", False, False), ("aot_pair", False, True), ("aot_pair", True, False), ("aot_pair", True, True), ("aot", False, False), ("aot", False, True), ("aot", True, False), ("aot", True, True), ("bco_pair", False, False), ("bco_pair", False, True), ("bco_pair", True, False), ("bco_pair", True, True), ("robust", False, False), ("robust", False, True), ("robust", True, False), ("robust", True, True), ] ) @require_bitsandbytes @require_peft @unittest.skip("You need a GPU with bf16 support in order to run these tests") def test_dpo_lora_bf16_autocast(self, loss_type, pre_compute, gen_during_eval): # Note this test only works on compute capability > 7 GPU devices from peft import LoraConfig lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) # lora model model = AutoModelForCausalLM.from_pretrained(self.model_id, load_in_4bit=True) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", bf16=True, beta=0.1, generate_during_eval=gen_during_eval, loss_type=loss_type, precompute_ref_log_probs=pre_compute, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") # dpo train lora model with a lora config trainer = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) # train the model trainer.train() # save peft adapter trainer.save_model() @require_peft def test_dpo_lora_tags(self): from peft import LoraConfig model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) # lora model model = AutoModelForCausalLM.from_pretrained(model_id) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") # dpo train lora model with a lora config trainer = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) for tag in ["dpo", "trl"]: self.assertIn(tag, trainer.model.model_tags) @require_peft def test_dpo_tags(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) # lora model model = AutoModelForCausalLM.from_pretrained(model_id) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") # dpo train lora model with a lora config trainer = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) for tag in ["dpo", "trl"]: self.assertIn(tag, trainer.model.model_tags) @require_peft def test_dpo_lora_force_use_ref(self): from peft import LoraConfig, get_peft_model lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) # lora model model = AutoModelForCausalLM.from_pretrained(self.model_id) model_peft = get_peft_model(model, lora_config) ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") with self.assertRaises(ValueError): # passing a peft_model as model and ref_model should error out, # unless you pass `force_use_ref_model` trainer = DPOTrainer( model=model_peft, ref_model=ref_model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, force_use_ref_model=True, report_to="none", ) trainer = DPOTrainer( model=model_peft, ref_model=ref_model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) # train the model trainer.train() def test_dpo_trainer_torch_dtype(self): # See https://github.com/huggingface/trl/issues/1751 dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=1, model_init_kwargs={"torch_dtype": "float16"}, ref_model_init_kwargs={"torch_dtype": "float16"}, report_to="none", ) trainer = DPOTrainer( model=self.model_id, ref_model=self.model_id, processing_class=self.tokenizer, args=training_args, train_dataset=dummy_dataset["train"], ) self.assertEqual(trainer.model.config.torch_dtype, torch.float16) self.assertEqual(trainer.ref_model.config.torch_dtype, torch.float16) # Now test when `torch_dtype` is provided but is wrong to either the model or the ref_model with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=1, model_init_kwargs={"torch_dtype": -1}, report_to="none", ) with self.assertRaises(ValueError) as context: _ = DPOTrainer( model=self.model_id, processing_class=self.tokenizer, args=training_args, train_dataset=dummy_dataset["train"], ) self.assertIn( "Invalid `torch_dtype` passed to the DPOConfig. Expected a string with either `torch.dtype` or 'auto', but got -1.", str(context.exception), ) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=1, ref_model_init_kwargs={"torch_dtype": -1}, report_to="none", ) with self.assertRaises(ValueError) as context: _ = DPOTrainer( model=self.model_id, ref_model=self.model_id, processing_class=self.tokenizer, args=training_args, train_dataset=dummy_dataset["train"], ) self.assertIn( "Invalid `torch_dtype` passed to the DPOConfig. Expected a string with either `torch.dtype` or 'auto', but got -1.", str(context.exception), ) def test_dpo_loss_alpha_div_f(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) # lora model model = AutoModelForCausalLM.from_pretrained(model_id) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", f_divergence_type=FDivergenceType.ALPHA_DIVERGENCE.value, f_alpha_divergence_coef=0.5, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") # dpo train lora model with a lora config trainer = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) # Fake chosen and rejected log probs policy_chosen_logps = torch.FloatTensor([410.0, 0.1]) policy_rejected_logps = torch.FloatTensor([810.5, 0.2]) reference_chosen_logps = torch.FloatTensor([-610.0, -0.1]) reference_rejected_logps = torch.FloatTensor([110.6, 0.5]) losses, _, _ = trainer.dpo_loss( policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps ) self.assertTrue(torch.isfinite(losses).cpu().numpy().all()) def test_dpo_loss_js_div_f(self): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" tokenizer = AutoTokenizer.from_pretrained(model_id) # lora model model = AutoModelForCausalLM.from_pretrained(model_id) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", f_divergence_type=FDivergenceType.JS_DIVERGENCE.value, f_alpha_divergence_coef=0.5, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") # dpo train lora model with a lora config trainer = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) # Fake chosen and rejected log probs policy_chosen_logps = torch.FloatTensor([410.0, 0.1]) policy_rejected_logps = torch.FloatTensor([95.5, 0.2]) reference_chosen_logps = torch.FloatTensor([-610.0, -0.1]) reference_rejected_logps = torch.FloatTensor([5.5, 0.5]) losses, _, _ = trainer.dpo_loss( policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps ) self.assertTrue(torch.isfinite(losses).cpu().numpy().all()) def test_dpo_trainer_use_logits_to_keep(self): model_id = "trl-internal-testing/tiny-LlamaForCausalLM-3.2" tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained(model_id) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, use_logits_to_keep=True, rpo_alpha=0.5, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") # dpo train lora model with a lora config trainer = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) training_args.use_logits_to_keep = False trainer2 = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) # Fake batch prompt_input_ids = torch.randint(1, 1000, (2, 10)) chosen_input_ids = torch.randint(1, 1000, (2, 5)) rejected_input_ids = torch.randint(1, 1000, (2, 7)) prompt_attention_mask = torch.ones_like(prompt_input_ids) chosen_attention_mask = torch.ones_like(chosen_input_ids) rejected_attention_mask = torch.ones_like(rejected_input_ids) batch = { "prompt_input_ids": prompt_input_ids.to(model.device), "chosen_input_ids": chosen_input_ids.to(model.device), "rejected_input_ids": rejected_input_ids.to(model.device), "prompt_attention_mask": prompt_attention_mask.to(model.device), "chosen_attention_mask": chosen_attention_mask.to(model.device), "rejected_attention_mask": rejected_attention_mask.to(model.device), } output = trainer.concatenated_forward(model, batch) output2 = trainer2.concatenated_forward(model, batch) np.testing.assert_allclose(output["nll_loss"].item(), output2["nll_loss"].item(), atol=1e-5) np.testing.assert_allclose( output["mean_chosen_logits"].item(), output2["mean_chosen_logits"].item(), atol=1e-5 ) np.testing.assert_allclose( output["mean_rejected_logits"].item(), output2["mean_rejected_logits"].item(), atol=1e-5 ) for i in range(output["chosen_logps"].shape[0]): np.testing.assert_allclose( output["chosen_logps"][i].item(), output2["chosen_logps"][i].item(), atol=1e-5 ) np.testing.assert_allclose( output["rejected_logps"][i].item(), output2["rejected_logps"][i].item(), atol=1e-5 ) trainer.train() def test_dpo_trainer_with_tools(self): model_id = "trl-internal-testing/tiny-LlamaForCausalLM-3.2" tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained(model_id) # Define dummy test tools def get_current_temperature(location: str): """ Gets the temperature at a given location. Args: location: The location to get the temperature for """ return 22.0 with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, tools=[get_current_temperature], ) dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference") trainer = DPOTrainer( model=model, ref_model=None, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) # We don't run the training, but at this stage, the dataset is supposed to be pre-processed. When # pre-processing, we expect the available tools to be explicitly mentioned in the system prompt. That's # what we're checking here self.assertIn("get_current_temperature", tokenizer.decode(trainer.train_dataset["prompt_input_ids"][0])) def test_padding_free(self): model_id = "trl-internal-testing/tiny-LlamaForCausalLM-3.2" tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token # Normally, we need `attn_implementation="flash_attention_2"` to that the model returns correct logits. # Without it, the logits may be incorrect, but that's fine here. This test focuses only on the inner logic # of padding_free. model = AutoModelForCausalLM.from_pretrained(model_id) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, learning_rate=9e-1, per_device_train_batch_size=2, padding_free=True, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") trainer = DPOTrainer( model=model, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) @require_vision class DPOVisionTrainerTester(unittest.TestCase): @parameterized.expand( [ ("trl-internal-testing/tiny-Idefics2ForConditionalGeneration",), # ("trl-internal-testing/tiny-PaliGemmaForConditionalGeneration",), ("trl-internal-testing/tiny-LlavaForConditionalGeneration",), ("trl-internal-testing/tiny-LlavaNextForConditionalGeneration",), ] ) def test_vdpo_trainer(self, model_id): # fmt: off dataset_dict = { "prompt": [ [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Describe the image in great detail."}]}], [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Is this bus in the USA?"}]}], [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Give a thorough description of the image."}]}], [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "Who are the people in the image?"}]}], [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What is written?"}]}], ], "chosen": [ [{"role": "assistant", "content": [{"type": "text", "text": "The image features a modern, multi-colored train."}]}], [{"role": "assistant", "content": [{"type": "text", "text": "Yes, it can be assumed that this bus is in the USA."}]}], [{"role": "assistant", "content": [{"type": "text", "text": "The image features a forest path."}]}], [{"role": "assistant", "content": [{"type": "text", "text": "There are two individuals, possibly girls or women."}]}], [{"role": "assistant", "content": [{"type": "text", "text": '"ccpb".'}]}], ], "rejected": [ [{"role": "assistant", "content": [{"type": "text", "text": "The image features a modern, colorful train."}]}], [{"role": "assistant", "content": [{"type": "text", "text": "No, it's not in the USA."}]}], [{"role": "assistant", "content": [{"type": "text", "text": "The image features a forest path surrounded by trees."}]}], [{"role": "assistant", "content": [{"type": "text", "text": "In the image, there are two individuals."}]}], [{"role": "assistant", "content": [{"type": "text", "text": '"ccpb".'}]}], ], "images": [ [Image.fromarray(np.random.randint(0, 255, (92, 33, 3), dtype=np.uint8))], [Image.fromarray(np.random.randint(0, 255, (64, 48, 3), dtype=np.uint8))], [Image.fromarray(np.random.randint(0, 255, (80, 152, 3), dtype=np.uint8))], [Image.fromarray(np.random.randint(0, 255, (57, 24, 3), dtype=np.uint8))], [Image.fromarray(np.random.randint(0, 255, (102, 48, 3), dtype=np.uint8))], ], } # fmt: on dataset = Dataset.from_dict(dataset_dict) dataset = dataset.cast_column("images", features.Sequence(features.Image())) # Instantiate the model and processor model = AutoModelForVision2Seq.from_pretrained(model_id) ref_model = AutoModelForVision2Seq.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) with tempfile.TemporaryDirectory() as tmp_dir: training_args = DPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, remove_unused_columns=False, learning_rate=0.01, # increase learning rate to speed up test max_prompt_length=None, # don't truncate to avoid issues with patch tokens max_length=None, report_to="none", ) trainer = DPOTrainer( model=model, ref_model=ref_model, args=training_args, processing_class=processor, train_dataset=dataset, eval_dataset=dataset, ) # Save the initial weights, so we can check if they have changed after training previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the trainable params have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases if model_id in [ "trl-internal-testing/tiny-LlavaForConditionalGeneration", "trl-internal-testing/tiny-LlavaNextForConditionalGeneration", ] and ( n.startswith("vision_tower.vision_model.encoder.layers.1") or n == "vision_tower.vision_model.post_layernorm.weight" ): # For some reason, these params are not updated. This is probably not related to TRL, but to # the model itself. We should investigate this further, but for now we just skip these params. continue self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) if __name__ == "__main__": unittest.main()
trl/tests/test_dpo_trainer.py/0
{ "file_path": "trl/tests/test_dpo_trainer.py", "repo_id": "trl", "token_count": 27027 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch import torch.nn as nn from datasets import Dataset from transformers import Trainer, TrainingArguments from trl.trainer.callbacks import RichProgressCallback class DummyModel(nn.Module): def __init__(self): super().__init__() self.a = nn.Parameter(torch.tensor(1.0)) def forward(self, x): return self.a * x class TestRichProgressCallback(unittest.TestCase): def setUp(self): self.dummy_model = DummyModel() self.dummy_train_dataset = Dataset.from_list([{"x": 1.0, "y": 2.0}] * 5) self.dummy_val_dataset = Dataset.from_list([{"x": 1.0, "y": 2.0}] * 101) def test_rich_progress_callback_logging(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, per_device_eval_batch_size=2, per_device_train_batch_size=2, num_train_epochs=4, eval_strategy="steps", eval_steps=1, logging_strategy="steps", logging_steps=1, save_strategy="no", report_to="none", disable_tqdm=True, ) callbacks = [RichProgressCallback()] trainer = Trainer( model=self.dummy_model, train_dataset=self.dummy_train_dataset, eval_dataset=self.dummy_val_dataset, args=training_args, callbacks=callbacks, ) trainer.train() trainer.train()
trl/tests/test_rich_progress_callback.py/0
{ "file_path": "trl/tests/test_rich_progress_callback.py", "repo_id": "trl", "token_count": 986 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Callable, Literal, Optional, Union from datasets import Dataset, Value from transformers import AutoTokenizer from ..trainer.utils import ConstantLengthDataset FORMAT_MAPPING = { "chatml": [{"content": Value(dtype="string", id=None), "role": Value(dtype="string", id=None)}], "instruction": {"completion": Value(dtype="string", id=None), "prompt": Value(dtype="string", id=None)}, } def conversations_formatting_function( tokenizer: AutoTokenizer, messages_field: Literal["messages", "conversations"], tools: Optional[list] = None ): r""" return a callable function that takes in a "messages" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset along with the schema of the list of functions in the tools list. """ def format_dataset(examples): if isinstance(examples[messages_field][0], list): output_texts = [] for i in range(len(examples[messages_field])): output_texts.append( tokenizer.apply_chat_template(examples[messages_field][i], tokenize=False, tools=tools) ) return output_texts else: return tokenizer.apply_chat_template(examples[messages_field], tokenize=False, tools=tools) return format_dataset def instructions_formatting_function(tokenizer: AutoTokenizer): r""" return a callable function that takes in an "instructions" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset """ def format_dataset(examples): if isinstance(examples["prompt"], list): output_texts = [] for i in range(len(examples["prompt"])): converted_sample = [ {"role": "user", "content": examples["prompt"][i]}, {"role": "assistant", "content": examples["completion"][i]}, ] output_texts.append(tokenizer.apply_chat_template(converted_sample, tokenize=False)) return output_texts else: converted_sample = [ {"role": "user", "content": examples["prompt"]}, {"role": "assistant", "content": examples["completion"]}, ] return tokenizer.apply_chat_template(converted_sample, tokenize=False) return format_dataset def get_formatting_func_from_dataset( dataset: Union[Dataset, ConstantLengthDataset], tokenizer: AutoTokenizer, tools: Optional[list] = None ) -> Optional[Callable]: r""" Finds the correct formatting function based on the dataset structure. Currently supported datasets are: - `ChatML` with [{"role": str, "content": str}] - `instruction` with [{"prompt": str, "completion": str}] Args: dataset (Dataset): User dataset tokenizer (AutoTokenizer): Tokenizer used for formatting Returns: Callable: Formatting function if the dataset format is supported else None """ if isinstance(dataset, Dataset): if "messages" in dataset.features: if dataset.features["messages"] == FORMAT_MAPPING["chatml"]: logging.info("Formatting dataset with chatml format") return conversations_formatting_function(tokenizer, "messages", tools) if "conversations" in dataset.features: if dataset.features["conversations"] == FORMAT_MAPPING["chatml"]: logging.info("Formatting dataset with chatml format") return conversations_formatting_function(tokenizer, "conversations", tools) elif dataset.features == FORMAT_MAPPING["instruction"]: logging.info("Formatting dataset with instruction format") return instructions_formatting_function(tokenizer) return None
trl/trl/extras/dataset_formatting.py/0
{ "file_path": "trl/trl/extras/dataset_formatting.py", "repo_id": "trl", "token_count": 1642 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # Full training python trl/scripts/sft.py \ --model_name_or_path Qwen/Qwen2-0.5B \ --dataset_name trl-lib/Capybara \ --learning_rate 2.0e-5 \ --num_train_epochs 1 \ --packing \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing \ --logging_steps 25 \ --eval_strategy steps \ --eval_steps 100 \ --output_dir Qwen2-0.5B-SFT \ --push_to_hub # LoRA python trl/scripts/sft.py \ --model_name_or_path Qwen/Qwen2-0.5B \ --dataset_name trl-lib/Capybara \ --learning_rate 2.0e-4 \ --num_train_epochs 1 \ --packing \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing \ --logging_steps 25 \ --eval_strategy steps \ --eval_steps 100 \ --use_peft \ --lora_r 32 \ --lora_alpha 16 \ --output_dir Qwen2-0.5B-SFT \ --push_to_hub """ import argparse from datasets import load_dataset from transformers import AutoTokenizer from trl import ( ModelConfig, ScriptArguments, SFTConfig, SFTTrainer, TrlParser, get_kbit_device_map, get_peft_config, get_quantization_config, ) def main(script_args, training_args, model_args): ################ # Model init kwargs & Tokenizer ################ quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, trust_remote_code=model_args.trust_remote_code, attn_implementation=model_args.attn_implementation, torch_dtype=model_args.torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) training_args.model_init_kwargs = model_kwargs tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, use_fast=True ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token ################ # Dataset ################ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) ################ # Training ################ trainer = SFTTrainer( model=model_args.model_name_or_path, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=get_peft_config(model_args), ) trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name) def make_parser(subparsers: argparse._SubParsersAction = None): dataclass_types = (ScriptArguments, SFTConfig, ModelConfig) if subparsers is not None: parser = subparsers.add_parser("sft", help="Run the SFT training script", dataclass_types=dataclass_types) else: parser = TrlParser(dataclass_types) return parser if __name__ == "__main__": parser = make_parser() script_args, training_args, model_args = parser.parse_args_and_config() main(script_args, training_args, model_args)
trl/trl/scripts/sft.py/0
{ "file_path": "trl/trl/scripts/sft.py", "repo_id": "trl", "token_count": 1589 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import textwrap from copy import deepcopy from typing import Any, Callable, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from accelerate.utils import is_deepspeed_available from datasets import Dataset from transformers import ( AutoModelForCausalLM, BaseImageProcessor, DataCollator, FeatureExtractionMixin, GenerationConfig, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, is_wandb_available, ) from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalPrediction from transformers.utils import is_liger_kernel_available, is_peft_available from ..models import PreTrainedModelWrapper from ..models.utils import unwrap_model_for_generation from .gkd_config import GKDConfig from .sft_trainer import SFTTrainer from .utils import ( DataCollatorForChatML, disable_dropout_in_model, empty_cache, generate_model_card, get_comet_experiment_url, ) if is_deepspeed_available(): import deepspeed if is_liger_kernel_available(): from liger_kernel.transformers import AutoLigerKernelForCausalLM if is_peft_available(): from peft import PeftConfig if is_wandb_available(): import wandb class GKDTrainer(SFTTrainer): _tag_names = ["trl", "gkd"] def __init__( self, model: Optional[Union[PreTrainedModel, nn.Module, str]] = None, teacher_model: Union[PreTrainedModel, nn.Module, str] = None, args: Optional[GKDConfig] = None, data_collator: Optional[DataCollator] = None, # type: ignore train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None, callbacks: Optional[list[TrainerCallback]] = None, optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, peft_config: Optional["PeftConfig"] = None, formatting_func: Optional[Callable] = None, ): # add remove_unused_columns=False to the dataclass args args.remove_unused_columns = False data_collator = DataCollatorForChatML(tokenizer=processing_class, max_length=args.max_seq_length) super().__init__( model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, peft_config=peft_config, formatting_func=formatting_func, ) if args.teacher_model_init_kwargs is None: teacher_model_init_kwargs = {} elif not isinstance(teacher_model, str): raise ValueError( "You passed teacher_model_init_kwargs to the GKDConfig, but your teacher_model is already instantiated." ) else: teacher_model_init_kwargs = args.teacher_model_init_kwargs teacher_model_init_kwargs["torch_dtype"] = ( teacher_model_init_kwargs["torch_dtype"] if teacher_model_init_kwargs["torch_dtype"] in ["auto", None] else getattr(torch, teacher_model_init_kwargs["torch_dtype"]) ) if isinstance(teacher_model, str): if args.use_liger: teacher_model = AutoLigerKernelForCausalLM.from_pretrained(teacher_model, **teacher_model_init_kwargs) else: teacher_model = AutoModelForCausalLM.from_pretrained(teacher_model, **teacher_model_init_kwargs) # Disable dropout in the model if args.disable_dropout: disable_dropout_in_model(self.model) if self.is_deepspeed_enabled: self.teacher_model = self._prepare_deepspeed(teacher_model) else: self.teacher_model = self.accelerator.prepare_model(teacher_model, evaluation_mode=True) self.lmbda = args.lmbda self.beta = args.beta self.temperature = args.temperature self.seq_kd = args.seq_kd self.generation_config = GenerationConfig( max_new_tokens=args.max_new_tokens, temperature=args.temperature, do_sample=True, top_k=0, use_cache=False if args.gradient_checkpointing else True, pad_token_id=self.processing_class.pad_token_id, ) # Set custom EOS tokens if they are specified by the model's generation # config. This is important for models with the Llama 3 chat template, # which use special tokens <|eot_id|> and <|eom_id|> to mark the end of # turns or messages. if ( hasattr(self.model.generation_config, "eos_token_id") and self.model.generation_config.eos_token_id is not None ): self.generation_config.eos_token_id = self.model.generation_config.eos_token_id def _prepare_dataset(self, dataset, *args): # SFTTrainer._prepare_dataset() applies the chat template and rename the messages column to text. However, we # need to keep the messages column as it is. We use the following workaround to keep the messages column. dataset = dataset.add_column("_messages", dataset["messages"]) dataset = super()._prepare_dataset(dataset, *args) dataset = dataset.rename_column("_messages", "messages") return dataset @staticmethod def generalized_jsd_loss( student_logits, teacher_logits, labels=None, beta=0.5, temperature=1.0, reduction="batchmean" ): """ Compute the generalized Jensen-Shannon Divergence loss for knowledge distillation using F.kl_div. See Eq. (1) of https://huggingface.co/papers/2306.13649 for the definition. Args: student_logits: Tensor of shape (batch_size, sequence_length, vocab_size) teacher_logits: Tensor of shape (batch_size, sequence_length, vocab_size) labels: Tensor of shape (batch_size, sequence_length) with -100 for padding tokens to ignore when computing loss beta: Interpolation coefficient between 0 and 1 (default: 0.5) temperature: Softmax temperature (default: 1.0) reduction: Specifies the reduction to apply to the output (default: 'batchmean') Returns: loss: Scalar tensor with the generalized JSD loss """ # Apply temperature scaling student_logits = student_logits / temperature teacher_logits = teacher_logits / temperature # Compute log probabilities for student and probabilities for teacher student_log_probs = F.log_softmax(student_logits, dim=-1) teacher_log_probs = F.log_softmax(teacher_logits, dim=-1) # Compute the log of the mixture distribution # log(a + b) = log(exp(log(a)) + exp(log(b))) -> for mixture beta = torch.tensor(beta, dtype=student_log_probs.dtype) mixture_log_probs = torch.logsumexp( torch.stack([student_log_probs + torch.log(beta), teacher_log_probs + torch.log(1 - beta)]), dim=0, ) # Compute KL divergences using F.kl_div # PyTorch differs from the standard mathematical definition, so the order of the probability distributions is swapped compared to that defined in the paper. kl_teacher = F.kl_div(mixture_log_probs, teacher_log_probs, reduction="none", log_target=True) kl_student = F.kl_div(mixture_log_probs, student_log_probs, reduction="none", log_target=True) # Compute the Generalized Jensen-Shannon Divergence jsd = beta * kl_teacher + (1 - beta) * kl_student # Masking if labels is not None: mask = labels != -100 jsd = jsd[mask] # Apply reduction if reduction == "batchmean": return jsd.sum() / mask.sum() if labels is not None else jsd.sum() / (jsd.size(0) * jsd.size(1)) elif reduction == "sum": return jsd.sum() elif reduction == "mean": return jsd.mean() else: return jsd def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None): # compute student output outputs_student = model( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], ) # compute teacher output in eval mode self.teacher_model.eval() with torch.no_grad(): outputs_teacher = self.teacher_model( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], ) # slice the logits for the generated tokens using the inputs["prompts"] lengths prompt_lengths = inputs["prompts"].shape[1] shifted_student_logits = outputs_student.logits[:, prompt_lengths - 1 : -1, :] shifted_teacher_logits = outputs_teacher.logits[:, prompt_lengths - 1 : -1, :] shifted_labels = inputs["labels"][:, prompt_lengths:] # compute loss loss = self.generalized_jsd_loss( student_logits=shifted_student_logits, teacher_logits=shifted_teacher_logits, labels=shifted_labels, beta=self.beta, ) # empty cache empty_cache() # Return loss return (loss, outputs_student) if return_outputs else loss @staticmethod def generate_on_policy_outputs(model, inputs, generation_config, pad_token_id=None): # Generate output with respect to the prompt only generated_outputs = model.generate( input_ids=inputs["prompts"], attention_mask=inputs.get("prompt_attention_mask", None), generation_config=generation_config, return_dict_in_generate=True, ) # Get the generated token IDs generated_tokens = generated_outputs.sequences # Calculate new attention mask new_attention_mask = torch.ones_like(generated_tokens) new_labels = generated_tokens.clone() # If there's pad_token_id, set attention mask to 0 for padding tokens if pad_token_id is not None: new_labels[new_labels == pad_token_id] = -100 new_attention_mask[generated_tokens == pad_token_id] = 0 return generated_tokens, new_attention_mask, new_labels def training_step( self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None ) -> torch.Tensor: """ Perform a training step for the Generalized Knowledge Distillation (GKD) model. This method implements the on-policy learning approach described in the GKD paper. With probability `self.lmbda`, it generates new responses using the student model, which are then used for training instead of the original inputs. """ if self.seq_kd: with unwrap_model_for_generation(self.teacher_model, self.accelerator) as unwrapped_model: new_input_ids, new_attention_mask, new_labels = self.generate_on_policy_outputs( unwrapped_model, inputs, self.generation_config, self.processing_class.pad_token_id ) inputs["input_ids"] = new_input_ids inputs["attention_mask"] = new_attention_mask inputs["labels"] = new_labels if random.random() <= self.lmbda: with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: new_input_ids, new_attention_mask, new_labels = self.generate_on_policy_outputs( unwrapped_model, inputs, self.generation_config, self.processing_class.pad_token_id ) inputs["input_ids"] = new_input_ids inputs["attention_mask"] = new_attention_mask inputs["labels"] = new_labels loss = super().training_step(model, inputs, num_items_in_batch) return loss def _prepare_deepspeed(self, model: PreTrainedModelWrapper): # Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473 deepspeed_plugin = self.accelerator.state.deepspeed_plugin config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config) if model is not None: if hasattr(model, "config"): hidden_size = ( max(model.config.hidden_sizes) if getattr(model.config, "hidden_sizes", None) else getattr(model.config, "hidden_size", None) ) if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3: # Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0` # This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081 config_kwargs.update( { "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, } ) # If ZeRO-3 is used, we shard both the active and reference model. # Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0) if config_kwargs["zero_optimization"]["stage"] != 3: config_kwargs["zero_optimization"]["stage"] = 0 model, *_ = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @inproceedings{agarwal2024on-policy, title = {{On-Policy Distillation of Language Models: Learning from Self-Generated Mistakes}}, author = {Rishabh Agarwal and Nino Vieillard and Yongchao Zhou and Piotr Stanczyk and Sabela Ramos Garea and Matthieu Geist and Olivier Bachem}, year = 2024, booktitle = {The Twelfth International Conference on Learning Representations, {ICLR} 2024, Vienna, Austria, May 7-11, 2024}, publisher = {OpenReview.net}, url = {https://openreview.net/forum?id=3zKtaqxLhW}, }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="GKD", trainer_citation=citation, paper_title="On-Policy Distillation of Language Models: Learning from Self-Generated Mistakes", paper_id="2306.13649", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/gkd_trainer.py/0
{ "file_path": "trl/trl/trainer/gkd_trainer.py", "repo_id": "trl", "token_count": 7522 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from transformers import TrainingArguments @dataclass class PRMConfig(TrainingArguments): r""" Configuration class for the [`PRMTrainer`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: learning_rate (`float`, *optional*, defaults to `1e-5`): Initial learning rate for [`AdamW`] optimizer. The default value replaces that of [`~transformers.TrainingArguments`]. max_length (`int` or `None`, *optional*, defaults to `1024`): Maximum length of the sequences (prompt + completion) used for truncation. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt used for truncation. max_completion_length (`int` or `None`, *optional*, defaults to `None`): Maximum length of the completion used for truncation. The completion is the concatenation of the steps. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model. step_separator (`str`, *optional*, defaults to `"\n"`): Separator used to separate each step of the reasoning process. train_on_last_step_only (`bool`, *optional*, defaults to `False`): Whether to train only on the last step. dataset_num_proc (`int`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. """ learning_rate: float = field( default=1e-5, metadata={ "help": "Initial learning rate for `AdamW` optimizer. The default value replaces that of " "`TrainingArguments`." }, ) max_length: Optional[int] = field( default=1024, metadata={"help": "Maximum length of the sequences (prompt + completion) used for truncation."}, ) max_prompt_length: Optional[int] = field( default=512, metadata={"help": "Maximum length of the prompt used for truncation."}, ) max_completion_length: Optional[int] = field( default=None, metadata={ "help": "Maximum length of the completion used for truncation. The completion is the concatenation of the " "steps." }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropout in the model and reference model."}, ) step_separator: str = field( default="\n", metadata={"help": "Separator used to separate each step of the reasoning process."}, ) train_on_last_step_only: bool = field( default=False, metadata={"help": "Whether to train only on the last step."}, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, )
trl/trl/trainer/prm_config.py/0
{ "file_path": "trl/trl/trainer/prm_config.py", "repo_id": "trl", "token_count": 1315 }
# FP8 Benchmarks Comparing and running [TransformerEngine](https://github.com/NVIDIA/TransformerEngine) FP8 with accelerate ## Overview This repo provides scripts which compare native TransformerEngine model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following: * Single GPU training (`non_distributed.py`) * Multi-GPU training via DistributedDataParallelism (`ddp.py`) * Fully Sharded Data Parallelism (`fsdp.py`) * DeepSpeed ZeRO 1-3 (`deepspeed.py`) To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `TransformerEngine` manually. ## Running: There are official Docker images located at `huggingface/accelerate:gpu-fp8-transformerengine-nightly` which can be used. You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed. For single GPU, run it via `python`: ```bash python non_distributed.py ``` For the rest, run it via `accelerate launch`: ```bash accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py ```
accelerate/benchmarks/fp8/transformer_engine/README.md/0
{ "file_path": "accelerate/benchmarks/fp8/transformer_engine/README.md", "repo_id": "accelerate", "token_count": 326 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Add Accelerate to your code Each distributed training framework has their own way of doing things which can require writing a lot of custom code to adapt it to your PyTorch training code and training environment. Accelerate offers a friendly way to interface with these distributed training frameworks without having to learn the specific details of each one. Accelerate takes care of those details for you, so you can focus on the training code and scale it to any distributed training environment. In this tutorial, you'll learn how to adapt your existing PyTorch code with Accelerate and get you on your way toward training on distributed systems with ease! You'll start with a basic PyTorch training loop (it assumes all the training objects like `model` and `optimizer` have been setup already) and progressively integrate Accelerate into it. ```python device = "cuda" model.to(device) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss.backward() optimizer.step() scheduler.step() ``` ## Accelerator The [`Accelerator`] is the main class for adapting your code to work with Accelerate. It knows about the distributed setup you're using such as the number of different processes and your hardware type. This class also provides access to many of the necessary methods for enabling your PyTorch code to work in any distributed training environment and for managing and executing processes across devices. That's why you should always start by importing and creating an [`Accelerator`] instance in your script. ```python from accelerate import Accelerator accelerator = Accelerator() ``` The [`Accelerator`] also knows which device to move your PyTorch objects to, so it is recommended to let Accelerate handle this for you. ```diff - device = "cuda" + device = accelerator.device model.to(device) ``` ## Prepare PyTorch objects Next, you need to prepare your PyTorch objects (model, optimizer, scheduler, etc.) for distributed training. The [`~Accelerator.prepare`] method takes care of placing your model in the appropriate container (like single GPU or multi-GPU) for your training setup, adapting the optimizer and scheduler to use Accelerate's [`~optimizer.AcceleratedOptimizer`] and [`~scheduler.AcceleratedScheduler`], and creating a new dataloader that can be sharded across processes. > [!TIP] > Accelerate only prepares objects that inherit from their respective PyTorch classes such as `torch.optim.Optimizer`. The PyTorch objects are returned in the same order they're sent. ```py model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) ``` ## Training loop Finally, remove the `to(device)` calls to the inputs and targets in the training loop because Accelerate's DataLoader classes automatically places them on the right device. You should also replace the usual `backward()` pass with Accelerate's [`~Accelerator.backward`] method which scales the gradients for you and uses the appropriate `backward()` method depending on your distributed setup (for example, DeepSpeed or Megatron). ```diff - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) - loss.backward() + accelerator.backward(loss) ``` Put everything together and your new Accelerate training loop should now look like this! ```python from accelerate import Accelerator accelerator = Accelerator() device = accelerator.device model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() ``` ## Training features Accelerate offers additional features - like gradient accumulation, gradient clipping, mixed precision training and more - you can add to your script to improve your training run. Let's explore these three features. ### Gradient accumulation Gradient accumulation enables you to train on larger batch sizes by accumulating the gradients over multiple batches before updating the weights. This can be useful for getting around memory limitations. To enable this feature in Accelerate, specify the `gradient_accumulation_steps` parameter in the [`Accelerator`] class and add the [`~Accelerator.accumulate`] context manager to your script. ```diff + accelerator = Accelerator(gradient_accumulation_steps=2) model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader) for input, label in training_dataloader: + with accelerator.accumulate(model): predictions = model(input) loss = loss_function(predictions, label) accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() ``` ### Gradient clipping Gradient clipping is a technique to prevent "exploding gradients", and Accelerate offers: * [`~Accelerator.clip_grad_value_`] to clip gradients to a minimum and maximum value * [`~Accelerator.clip_grad_norm_`] for normalizing gradients to a certain value ### Mixed precision Mixed precision accelerates training by using a lower precision data type like fp16 (half-precision) to calculate the gradients. For the best performance with Accelerate, the loss should be computed inside your model (like in Transformers models) because computations outside of the model are computed in full precision. Set the mixed precision type to use in the [`Accelerator`], and then use the [`~Accelerator.autocast`] context manager to automatically cast the values to the specified data type. > [!WARNING] > Accelerate enables automatic mixed precision, so [`~Accelerator.autocast`] is only needed if there are other mixed precision operations besides those performed on loss by [`~Accelerator.backward`] which already handles the scaling. ```diff + accelerator = Accelerator(mixed_precision="fp16") + with accelerator.autocast(): loss = complex_loss_function(outputs, target) ``` ## Save and load Accelerate can also save and load a *model* once training is complete or you can also save the model and optimizer *state* which could be useful for resuming training. ### Model Once all processes are complete, unwrap the model with the [`~Accelerator.unwrap_model`] method before saving it because the [`~Accelerator.prepare`] method wrapped your model into the proper interface for distributed training. If you don't unwrap the model, saving the model state dictionary also saves any potential extra layers from the larger model and you won't be able to load the weights back into your base model. You should use the [`~Accelerator.save_model`] method to unwrap and save the model state dictionary. This method can also save a model into sharded checkpoints or into the [safetensors](https://hf.co/docs/safetensors/index) format. <hfoptions id="save"> <hfoption id="single checkpoint"> ```py accelerator.wait_for_everyone() accelerator.save_model(model, save_directory) ``` <Tip> For models from the [Transformers](https://hf.co/docs/transformers/index) library, save the model with the [`~transformers.PreTrainedModel.save_pretrained`] method so that it can be reloaded with the [`~transformers.PreTrainedModel.from_pretrained`] method. ```py from transformers import AutoModel unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( "path/to/my_model_directory", is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) model = AutoModel.from_pretrained("path/to/my_model_directory") ``` </Tip> To load your weights, use the [`~Accelerator.unwrap_model`] method to unwrap the model first before loading the weights. All model parameters are references to tensors, so this loads your weights inside `model`. ```py unwrapped_model = accelerator.unwrap_model(model) path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin") unwrapped_model.load_state_dict(torch.load(path_to_checkpoint)) ``` </hfoption> <hfoption id="sharded checkpoint"> Set `safe_serialization=True` to save the model in the safetensor format. ```py accelerator.wait_for_everyone() accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True) ``` To load a sharded checkpoint or a safetensor formatted checkpoint, use the [`~accelerate.load_checkpoint_in_model`] method. This method allows you to load a checkpoint onto a specific device. ```py load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device}) ``` </hfoption> </hfoptions> ### State During training, you may want to save the current state of the model, optimizer, random generators, and potentially learning rate schedulers so they can be restored in the *same script*. You should add the [`~Accelerator.save_state`] and [`~Accelerator.load_state`] methods to your script to save and load states. To further customize where and how states are saved through [`~Accelerator.save_state`], use the [`~utils.ProjectConfiguration`] class. For example, if `automatic_checkpoint_naming` is enabled, each saved checkpoint is stored at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`. Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function. > [!TIP] > If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, you can additionally pass `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`]. This extends Accelerate's DataLoader classes with a `load_state_dict` and `state_dict` function, and makes it so `Accelerator.save_state` and `Accelerator.load_state` also track how far into the training dataset it has read when persisting the model.
accelerate/docs/source/basic_tutorials/migration.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/migration.md", "repo_id": "accelerate", "token_count": 3069 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Accelerate Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable. ```diff + from accelerate import Accelerator + accelerator = Accelerator() + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) + accelerator.backward(loss) optimizer.step() scheduler.step() ``` Built on `torch_xla` and `torch.distributed`, Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms. Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training! <Tip> To get a better idea of this process, make sure to check out the [Tutorials](basic_tutorials/overview)! </Tip> This code can then be launched on any system through Accelerate's CLI interface: ```bash accelerate launch {my_script.py} ``` <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./basic_tutorials/overview" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div> <p class="text-gray-700">Learn the basics and become familiar with using Accelerate. Start here if you are using Accelerate for the first time!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./usage_guides/explore" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use Accelerate to solve real-world problems.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./concept_guides/gradient_synchronization" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">High-level explanations for building a better understanding of important topics such as avoiding subtle nuances and pitfalls in distributed training and DeepSpeed.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/accelerator" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">Technical descriptions of how Accelerate classes and methods work.</p> </a> </div> </div>
accelerate/docs/source/index.md/0
{ "file_path": "accelerate/docs/source/index.md", "repo_id": "accelerate", "token_count": 1354 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quicktour There are many ways to launch and run your code depending on your training environment ([torchrun](https://pytorch.org/docs/stable/elastic/run.html), [DeepSpeed](https://www.deepspeed.ai/), etc.) and available hardware. Accelerate offers a unified interface for launching and training on different distributed setups, allowing you to focus on your PyTorch training code instead of the intricacies of adapting your code to these different setups. This allows you to easily scale your PyTorch code for training and inference on distributed setups with hardware like GPUs and TPUs. Accelerate also provides Big Model Inference to make loading and running inference with really large models that usually don't fit in memory more accessible. This quicktour introduces the three main features of Accelerate: * a unified command line launching interface for distributed training scripts * a training library for adapting PyTorch training code to run on different distributed setups * Big Model Inference ## Unified launch interface Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM. But in most cases, you should always run [`accelerate config`](package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup. ```bash accelerate config ``` The [`accelerate config`](package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine. After you've configured your environment, you can test your setup with [`accelerate test`](package_reference/cli#accelerate-test), which launches a short script to test the distributed environment. ```bash accelerate test ``` > [!TIP] > Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache. Once your environment is setup, launch your training script with [`accelerate launch`](package_reference/cli#accelerate-launch)! ```bash accelerate launch path_to_script.py --args_for_the_script ``` To learn more, check out the [Launch distributed code](basic_tutorials/launch) tutorial for more information about launching your scripts. We also have a [configuration zoo](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates) which showcases a number of premade **minimal** example configurations for a variety of setups you can run. ## Adapt training code The next main feature of Accelerate is the [`Accelerator`] class which adapts your PyTorch code to run on different distributed setups. You only need to add a few lines of code to your training script to enable it to run on multiple GPUs or TPUs. ```diff + from accelerate import Accelerator + accelerator = Accelerator() + device = accelerator.device + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) + accelerator.backward(loss) optimizer.step() scheduler.step() ``` 1. Import and instantiate the [`Accelerator`] class at the beginning of your training script. The [`Accelerator`] class initializes everything necessary for distributed training, and it automatically detects your training environment (a single machine with a GPU, a machine with several GPUs, several machines with multiple GPUs or a TPU, etc.) based on how the code was launched. ```python from accelerate import Accelerator accelerator = Accelerator() ``` 2. Remove calls like `.cuda()` on your model and input data. The [`Accelerator`] class automatically places these objects on the appropriate device for you. > [!WARNING] > This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU. > [!WARNING] > Accelerate does not use non-blocking transfers by default for its automatic device placement, which can result in potentially unwanted CUDA synchronizations. You can enable non-blocking transfers by passing a [`~utils.dataclasses.DataLoaderConfiguration`] with `non_blocking=True` set as the `dataloader_config` when initializing the [`Accelerator`]. As usual, non-blocking transfers will only work if the dataloader also has `pin_memory=True` set. Be wary that using non-blocking transfers from GPU to CPU may cause incorrect results if it results in CPU operations being performed on non-ready tensors. ```py device = accelerator.device ``` 3. Pass all relevant PyTorch objects for training (optimizer, model, dataloader(s), learning rate scheduler) to the [`~Accelerator.prepare`] method as soon as they're created. This method wraps the model in a container optimized for your distributed setup, uses Accelerates version of the optimizer and scheduler, and creates a sharded version of your dataloader for distribution across GPUs or TPUs. ```python model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, lr_scheduler ) ``` 4. Replace `loss.backward()` with [`~Accelerator.backward`] to use the correct `backward()` method for your training setup. ```py accelerator.backward(loss) ``` Read [Accelerate’s internal mechanisms](concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code. ### Distributed evaluation To perform distributed evaluation, pass your validation dataloader to the [`~Accelerator.prepare`] method: ```python validation_dataloader = accelerator.prepare(validation_dataloader) ``` Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes. Note that the tensors needs to be 1D and that we concatenate the tensors along the first dimension. ```python for inputs, targets in validation_dataloader: predictions = model(inputs) # Gather all predictions and targets all_predictions, all_targets = accelerator.gather_for_metrics((predictions, targets)) # Example of use with a *Datasets.Metric* metric.add_batch(all_predictions, all_targets) ``` For more complex cases (e.g. 2D tensors, don't want to concatenate tensors, dict of 3D tensors), you can pass `use_gather_object=True` in `gather_for_metrics`. This will return the list of objects after gathering. Note that using it with GPU tensors is not well supported and inefficient. > [!TIP] > Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric. ## Big Model Inference Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory. > [!TIP] > Take a look at the [Handling big models for inference](concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood. ### Empty weights initialization The [`~accelerate.init_empty_weights`] context manager initializes models of any size by creating a *model skeleton* and moving and placing parameters each time they're created to PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. This way, not all weights are immediately loaded and only a small part of the model is loaded into memory at a time. For example, loading an empty [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model takes significantly less memory than fully loading the models and weights on the CPU. ```py from accelerate import init_empty_weights from transformers import AutoConfig, AutoModelForCausalLM config = AutoConfig.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) ``` ### Load and dispatch weights The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices. The `device_map` parameter determines where to place each model layer, and specifiying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection). ```py from accelerate import load_checkpoint_and_dispatch model_checkpoint = "your-local-model-folder" model = load_checkpoint_and_dispatch( model, checkpoint=model_checkpoint, device_map="auto", no_split_module_classes=['Block'] ) ``` ## Next steps Now that you've been introduced to the main Accelerate features, your next steps could include: * Check out the [tutorials](basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library. * Dive into the [guides](usage_guides/explore) to see how to use Accelerate for specific use-cases. * Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](concept_guides/internal_mechanism). * Look up classes and commands in the [API reference](package_reference/accelerator) to see what parameters and options are available.
accelerate/docs/source/quicktour.md/0
{ "file_path": "accelerate/docs/source/quicktour.md", "repo_id": "accelerate", "token_count": 3047 }
<!-- Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Profiler Profiler is a tool that allows the collection of performance metrics during training and inference. Profiler’s context manager API can be used to better understand what model operators are the most expensive, examine their input shapes and stack traces, study device kernel activity, and visualize the execution trace. It provides insights into the performance of your model, allowing you to optimize and improve it. This guide explains how to use PyTorch Profiler to measure the time and memory consumption of the model’s operators and how to integrate this with Accelerate. We will cover various use cases and provide examples for each. ## Using profiler to analyze execution time Profiler allows one to check which operators were called during the execution of a code range wrapped with a profiler context manager. Let’s see how we can use profiler to analyze the execution time: <hfoptions id="cpu execution time"> <hfoption id="PyTorch"> ```python import torch import torchvision.models as models from torch.profiler import profile, record_function, ProfilerActivity model = models.resnet18() inputs = torch.randn(5, 3, 224, 224) with profile(activities=[ProfilerActivity.CPU], record_shapes=True) as prof: model(inputs) print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) ``` </hfoption> <hfoption id="Accelerate"> ```python from accelerate import Accelerator, ProfileKwargs import torch import torchvision.models as models model = models.resnet18() inputs = torch.randn(5, 3, 224, 224) profile_kwargs = ProfileKwargs( activities=["cpu"], record_shapes=True ) accelerator = Accelerator(cpu=True, kwargs_handlers=[profile_kwargs]) model = accelerator.prepare(model) with accelerator.profile() as prof: with torch.no_grad(): model(inputs) print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) ``` </hfoption> </hfoptions> The resulting table output (omitting some columns): ``` --------------------------------- ------------ ------------ ------------ ------------ Name Self CPU CPU total CPU time avg # of Calls --------------------------------- ------------ ------------ ------------ ------------ aten::conv2d 171.000us 52.260ms 2.613ms 20 aten::convolution 227.000us 52.089ms 2.604ms 20 aten::_convolution 270.000us 51.862ms 2.593ms 20 aten::mkldnn_convolution 51.273ms 51.592ms 2.580ms 20 aten::batch_norm 118.000us 7.059ms 352.950us 20 aten::_batch_norm_impl_index 315.000us 6.941ms 347.050us 20 aten::native_batch_norm 6.305ms 6.599ms 329.950us 20 aten::max_pool2d 40.000us 4.008ms 4.008ms 1 aten::max_pool2d_with_indices 3.968ms 3.968ms 3.968ms 1 aten::add_ 780.000us 780.000us 27.857us 28 --------------------------------- ------------ ------------ ------------ ------------ Self CPU time total: 67.016ms ``` To get a finer granularity of results and include operator input shapes, pass `group_by_input_shape=True` (note: this requires running the profiler with `record_shapes=True`): ```python print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10)) ``` ## Using profiler to analyze memory consumption Profiler can also show the amount of memory (used by the model’s tensors) that was allocated (or released) during the execution of the model’s operators. To enable memory profiling functionality pass `profile_memory=True`. <hfoptions id="memory consumption"> <hfoption id="PyTorch"> ```python model = models.resnet18() inputs = torch.randn(5, 3, 224, 224) with profile(activities=[ProfilerActivity.CPU], profile_memory=True, record_shapes=True) as prof: model(inputs) print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10)) ``` </hfoption> <hfoption id="Accelerate"> ```python model = models.resnet18() inputs = torch.randn(5, 3, 224, 224) profile_kwargs = ProfileKwargs( activities=["cpu"], profile_memory=True, record_shapes=True ) accelerator = Accelerator(cpu=True, kwargs_handlers=[profile_kwargs]) model = accelerator.prepare(model) with accelerator.profile() as prof: model(inputs) print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10)) ``` </hfoption> </hfoptions> The resulting table output (omitting some columns): ``` --------------------------------- ------------ ------------ ------------ Name CPU Mem Self CPU Mem # of Calls --------------------------------- ------------ ------------ ------------ aten::empty 94.85 Mb 94.85 Mb 205 aten::max_pool2d_with_indices 11.48 Mb 11.48 Mb 1 aten::addmm 19.53 Kb 19.53 Kb 1 aten::mean 10.00 Kb 10.00 Kb 1 aten::empty_strided 492 b 492 b 5 aten::cat 240 b 240 b 6 aten::abs 480 b 240 b 4 aten::masked_select 120 b 112 b 1 aten::ne 61 b 53 b 3 aten::eq 30 b 30 b 1 --------------------------------- ------------ ------------ ------------ Self CPU time total: 69.332ms ``` ## Exporting chrome trace You can examine the sequence of profiled operators and CUDA kernels in Chrome trace viewer (`chrome://tracing`): ![profile_export](https://github.com/huggingface/accelerate/assets/100389977/5acb193f-6d11-4f7b-9873-c600c19e8172) <hfoptions id="exporting chrome trace"> <hfoption id="PyTorch"> ```python model = models.resnet18().cuda() inputs = torch.randn(5, 3, 224, 224).cuda() with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof: model(inputs) prof.export_chrome_trace("trace.json") ``` </hfoption> <hfoption id="Accelerate"> ```python model = models.resnet18() inputs = torch.randn(5, 3, 224, 224).cuda() profile_kwargs = ProfileKwargs( activities=["cpu", "cuda"], output_trace_dir="trace" ) accelerator = Accelerator(kwargs_handlers=[profile_kwargs]) model = accelerator.prepare(model) with accelerator.profile() as prof: model(inputs) # The trace will be saved to the specified directory ``` For other hardware accelerators, e.g. XPU, you can change `cuda` to `xpu` in the above example code. </hfoption> </hfoptions> ## Using Profiler to Analyze Long-Running Jobs Profiler offers an additional API to handle long-running jobs (such as training loops). Tracing all of the execution can be slow and result in very large trace files. To avoid this, use optional arguments: - `schedule_option`: Scheduling options allow you to control when profiling is active. This is useful for long-running jobs to avoid collecting too much data. Available keys are `wait`, `warmup`, `active`, `repeat` and `skip_first`. The profiler will skip the first `skip_first` steps, then wait for `wait` steps, then do the warmup for the next `warmup` steps, then do the active recording for the next `active` steps and then repeat the cycle starting with `wait` steps. The optional number of cycles is specified with the `repeat` parameter, the zero value means that the cycles will continue until the profiling is finished. - `on_trace_ready`: specifies a function that takes a reference to the profiler as an input and is called by the profiler each time the new trace is ready. To illustrate how the API works, consider the following example: <hfoptions id="custom handler"> <hfoption id="PyTorch"> ```python from torch.profiler import schedule my_schedule = schedule( skip_first=1, wait=5, warmup=1, active=3, repeat=2 ) def trace_handler(p): output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=10) print(output) p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json") with profile( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], schedule=my_schedule, on_trace_ready=trace_handler ) as p: for idx in range(8): model(inputs) p.step() ``` </hfoption> <hfoption id="Accelerate"> ```python def trace_handler(p): output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=10) print(output) p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json") profile_kwargs = ProfileKwargs( activities=["cpu", "cuda"], schedule_option={"wait": 5, "warmup": 1, "active": 3, "repeat": 2, "skip_first": 1}, on_trace_ready=trace_handler ) accelerator = Accelerator(kwargs_handlers=[profile_kwargs]) model = accelerator.prepare(model) with accelerator.profile() as prof: for idx in range(8): model(inputs) prof.step() ``` </hfoption> </hfoptions> ## FLOPS Use formula to estimate the FLOPs (floating point operations) of specific operators (matrix multiplication and 2D convolution). To measure floating-point operations (FLOPS): <hfoptions id="FLOPS"> <hfoption id="PyTorch"> ```python with profile( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], with_flops=True ) as prof: model(inputs) print(prof.key_averages().table(sort_by="flops", row_limit=10)) ``` </hfoption> <hfoption id="Accelerate"> ```python profile_kwargs = ProfileKwargs( with_flops=True ) accelerator = Accelerator(kwargs_handlers=[profile_kwargs]) with accelerator.profile() as prof: model(inputs) print(prof.key_averages().table(sort_by="flops", row_limit=10)) ``` </hfoption> </hfoptions> The resulting table output (omitting some columns): ``` ------------------------------------------------------- ------------ ------------ ------------ Name Self CPU Self CUDA Total FLOPs ------------------------------------------------------- ------------ ------------ ------------ aten::conv2d 197.000us 0.000us 18135613440.000 aten::addmm 103.000us 17.000us 5120000.000 aten::mul 29.000us 2.000us 30.000 aten::convolution 409.000us 0.000us -- aten::_convolution 253.000us 0.000us -- aten::cudnn_convolution 5.465ms 2.970ms -- cudaEventRecord 138.000us 0.000us -- cudaStreamIsCapturing 43.000us 0.000us -- cudaStreamGetPriority 40.000us 0.000us -- cudaDeviceGetStreamPriorityRange 10.000us 0.000us -- ------------------------------------------------------- ------------ ------------ ------------ Self CPU time total: 21.938ms Self CUDA time total: 4.165ms ``` ## Conclusion and Further Information PyTorch Profiler is a powerful tool for analyzing the performance of your models. By integrating it with Accelerate, you can easily profile your models and gain insights into their performance, helping you to optimize and improve them. For more detailed information, refer to the [PyTorch Profiler documentation](https://pytorch.org/docs/stable/profiler.html).
accelerate/docs/source/usage_guides/profiler.md/0
{ "file_path": "accelerate/docs/source/usage_guides/profiler.md", "repo_id": "accelerate", "token_count": 5179 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from transformers import AutoModelForCausalLM, AutoTokenizer from accelerate import PartialState, prepare_pippy # sdpa implementation which is the default torch>2.1.2 fails with the tracing + attention mask kwarg # with attn_implementation="eager" mode, the forward is very slow for some reason model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-chat-hf", low_cpu_mem_usage=True, attn_implementation="sdpa" ) model.eval() # Input configs # Create example inputs for the model tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") prompts = ("I would like to", "I really like to") # bs = 2, sending 2 per process tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer(prompts, return_tensors="pt", padding=True) # Create a pipeline stage from the model # Using `auto` is equivalent to letting `device_map="auto"` figure # out device mapping and will also split the model according to the # number of total GPUs available if it fits on one GPU model = prepare_pippy(model, split_points="auto", example_kwargs=inputs) # You can pass `gather_output=True` to have the output from the model # available on all GPUs # model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True) # currently we don't support `model.generate` # output = model.generate(**inputs, max_new_tokens=1) prompts = ("I would like to", "I really like to", "The weather is pretty") # bs = 3 inputs = tokenizer(prompts, return_tensors="pt", padding=True) inputs = inputs.to(0) with torch.no_grad(): output = model(**inputs) # The outputs are only on the final process by default if PartialState().is_last_process: next_token_logits = output[0][:, -1, :] next_token = torch.argmax(next_token_logits, dim=-1) print(tokenizer.batch_decode(next_token)) PartialState().destroy_process_group()
accelerate/examples/inference/pippy/llama.py/0
{ "file_path": "accelerate/examples/inference/pippy/llama.py", "repo_id": "accelerate", "token_count": 768 }
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def get_config_parser(subparsers=None): parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) # The main config parser config_parser = config_command_parser(subparsers) # The subparser to add commands to subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand") # Then add other parsers with the parent parser default_command_parser(subcommands, parents=[parent_parser]) update_command_parser(subcommands, parents=[parent_parser]) return config_parser def main(): config_parser = get_config_parser() args = config_parser.parse_args() if not hasattr(args, "func"): config_parser.print_help() exit(1) # Run args.func(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/config/__init__.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/__init__.py", "repo_id": "accelerate", "token_count": 516 }
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Main driver for the selection menu, based on https://github.com/bchao1/bullet """ import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP in_colab = False try: in_colab = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class BulletMenu: """ A CLI menu to select a choice from a list of choices using the keyboard. """ def __init__(self, prompt: str = None, choices: list = []): self.position = 0 self.choices = choices self.prompt = prompt if sys.platform == "win32": self.arrow_char = "*" else: self.arrow_char = "➔ " def write_choice(self, index, end: str = ""): if sys.platform != "win32": writeColor(self.choices[index], 32, end) else: forceWrite(self.choices[index], end) def print_choice(self, index: int): "Prints the choice at the given index" if index == self.position: forceWrite(f" {self.arrow_char} ") self.write_choice(index) else: forceWrite(f" {self.choices[index]}") reset_cursor() def move_direction(self, direction: Direction, num_spaces: int = 1): "Should not be directly called, used to move a direction of either up or down" old_position = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(old_position) move_cursor(num_spaces, direction.name) self.print_choice(self.position) @input.mark(KEYMAP["up"]) def move_up(self): self.move_direction(Direction.UP) @input.mark(KEYMAP["down"]) def move_down(self): self.move_direction(Direction.DOWN) @input.mark(KEYMAP["newline"]) def select(self): move_cursor(len(self.choices) - self.position, "DOWN") return self.position @input.mark(KEYMAP["interrupt"]) def interrupt(self): move_cursor(len(self.choices) - self.position, "DOWN") raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)]) def select_row(self): index = int(chr(self.current_selection)) movement = index - self.position if index == self.position: return if index < len(self.choices): if self.position > index: self.move_direction(Direction.UP, -movement) elif self.position < index: self.move_direction(Direction.DOWN, movement) else: return else: return def run(self, default_choice: int = 0): "Start the menu and return the selected choice" if self.prompt: linebreak() forceWrite(self.prompt, "\n") if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter", "\n") else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") self.position = default_choice for i in range(len(self.choices)): self.print_choice(i) forceWrite("\n") move_cursor(len(self.choices) - self.position, "UP") with cursor.hide(): while True: if in_colab: try: choice = int(builtins.input()) except ValueError: choice = default_choice else: choice = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices) + 1): move_cursor(1, "UP") clear_line() self.write_choice(choice, "\n") return choice
accelerate/src/accelerate/commands/menu/selection_menu.py/0
{ "file_path": "accelerate/src/accelerate/commands/menu/selection_menu.py", "repo_id": "accelerate", "token_count": 2187 }
#!/usr/bin/env python # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each `examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the others are used to either get the code that matters, or to preprocess them (such as stripping comments) """ import os from typing import List def get_function_contents_by_name(lines: List[str], name: str): """ Extracts a function from `lines` of segmented source code with the name `name`. Args: lines (`List[str]`): Source code of a script seperated by line. name (`str`): The name of the function to extract. Should be either `training_function` or `main` """ if name != "training_function" and name != "main": raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'") good_lines, found_start = [], False for line in lines: if not found_start and f"def {name}" in line: found_start = True good_lines.append(line) continue if found_start: if name == "training_function" and "def main" in line: return good_lines if name == "main" and "if __name__" in line: return good_lines good_lines.append(line) def clean_lines(lines: List[str]): """ Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n') Args: lines (`List[str]`): Source code of a script seperated by line. """ return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"] def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None): """ Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be used when testing to see if `complete_*_.py` examples have all of the implementations from each of the `examples/by_feature/*` scripts. It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter. Args: base_filename (`str` or `os.PathLike`): The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py` feature_filename (`str` or `os.PathLike`): The filepath of a single feature example script. The contents of this script are checked to see if they exist in `base_filename` parser_only (`bool`): Whether to compare only the `main()` sections in both files, or to compare the contents of `training_loop()` secondary_filename (`str`, *optional*): A potential secondary filepath that should be included in the check. This function extracts the base functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py` """ with open(base_filename) as f: base_file_contents = f.readlines() with open(os.path.abspath(os.path.join("examples", "nlp_example.py"))) as f: full_file_contents = f.readlines() with open(feature_filename) as f: feature_file_contents = f.readlines() if secondary_filename is not None: with open(secondary_filename) as f: secondary_file_contents = f.readlines() # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content if parser_only: base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main")) full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main")) feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main")) if secondary_filename is not None: secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main")) else: base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function")) full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function")) feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function")) if secondary_filename is not None: secondary_file_func = clean_lines( get_function_contents_by_name(secondary_file_contents, "training_function") ) _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n" # Specific code in our script that differs from the full version, aka what is new new_feature_code = [] passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement it = iter(feature_file_func) for i in range(len(feature_file_func) - 1): if i not in passed_idxs: line = next(it) if (line not in full_file_func) and (line.lstrip() != _dl_line): if "TESTING_MOCKED_DATALOADERS" not in line: new_feature_code.append(line) passed_idxs.append(i) else: # Skip over the `config['num_epochs'] = 2` statement _ = next(it) # Extract out just the new parts from the full_file_training_func new_full_example_parts = [] passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement for i, line in enumerate(base_file_func): if i not in passed_idxs: if (line not in full_file_func) and (line.lstrip() != _dl_line): if "TESTING_MOCKED_DATALOADERS" not in line: new_full_example_parts.append(line) passed_idxs.append(i) # Finally, get the overall diff diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts] if secondary_filename is not None: diff_from_two = [line for line in full_file_contents if line not in secondary_file_func] diff_from_example = [line for line in diff_from_example if line not in diff_from_two] return diff_from_example
accelerate/src/accelerate/test_utils/examples.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/examples.py", "repo_id": "accelerate", "token_count": 2735 }
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import io import math import time from copy import deepcopy from pathlib import Path import numpy as np import torch from torch.utils.data import DataLoader, Dataset from accelerate import Accelerator from accelerate.data_loader import SeedableRandomSampler, prepare_data_loader from accelerate.state import AcceleratorState from accelerate.test_utils import RegressionDataset, are_the_same_tensors from accelerate.utils import ( DataLoaderConfiguration, DistributedType, gather, is_bf16_available, is_datasets_available, is_ipex_available, is_mlu_available, is_musa_available, is_npu_available, is_pytest_available, is_xpu_available, set_seed, synchronize_rng_states, ) # TODO: remove RegressionModel4XPU once ccl support empty buffer in broadcasting. if is_xpu_available(): from accelerate.test_utils import RegressionModel4XPU as RegressionModel else: from accelerate.test_utils import RegressionModel def generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler=False): "Creates a dataloader that can also use the `SeedableRandomSampler`" if use_seedable_sampler: # The SeedableRandomSampler is needed during distributed setups # for full reproducability across processes with the `DataLoader` sampler = SeedableRandomSampler( generator=generator, data_source=train_set, num_samples=len(train_set), ) return DataLoader(train_set, batch_size=batch_size, sampler=sampler) else: return DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) def print_main(state): print(f"Printing from the main process {state.process_index}") def print_local_main(state): print(f"Printing from the local main process {state.local_process_index}") def print_last(state): print(f"Printing from the last process {state.process_index}") def print_on(state, process_idx): print(f"Printing from process {process_idx}: {state.process_index}") def process_execution_check(): accelerator = Accelerator() num_processes = accelerator.num_processes # Test main_process_first context manager path = Path("check_main_process_first.txt") with accelerator.main_process_first(): if accelerator.is_main_process: time.sleep(0.1) # ensure main process takes longest with open(path, "a+") as f: f.write("Currently in the main process\n") else: with open(path, "a+") as f: f.write("Now on another process\n") accelerator.wait_for_everyone() if accelerator.is_main_process: with open(path) as f: text = "".join(f.readlines()) try: assert text.startswith("Currently in the main process\n"), "Main process was not first" if num_processes > 1: assert text.endswith("Now on another process\n"), "Main process was not first" assert ( text.count("Now on another process\n") == accelerator.num_processes - 1 ), f"Only wrote to file {text.count('Now on another process') + 1} times, not {accelerator.num_processes}" except AssertionError: path.unlink() raise if accelerator.is_main_process and path.exists(): path.unlink() accelerator.wait_for_everyone() # Test the decorators f = io.StringIO() with contextlib.redirect_stdout(f): accelerator.on_main_process(print_main)(accelerator.state) result = f.getvalue().rstrip() if accelerator.is_main_process: assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0" else: assert f.getvalue().rstrip() == "", f'{result} != ""' f.truncate(0) f.seek(0) with contextlib.redirect_stdout(f): accelerator.on_local_main_process(print_local_main)(accelerator.state) if accelerator.is_local_main_process: assert f.getvalue().rstrip() == "Printing from the local main process 0" else: assert f.getvalue().rstrip() == "" f.truncate(0) f.seek(0) with contextlib.redirect_stdout(f): accelerator.on_last_process(print_last)(accelerator.state) if accelerator.is_last_process: assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}" else: assert f.getvalue().rstrip() == "" f.truncate(0) f.seek(0) for process_idx in range(num_processes): with contextlib.redirect_stdout(f): accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx) if accelerator.process_index == process_idx: assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}" else: assert f.getvalue().rstrip() == "" f.truncate(0) f.seek(0) def init_state_check(): # Test we can instantiate this twice in a row. state = AcceleratorState() if state.local_process_index == 0: print("Testing, testing. 1, 2, 3.") print(state) def rng_sync_check(): state = AcceleratorState() synchronize_rng_states(["torch"]) assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU." if state.distributed_type == DistributedType.MULTI_GPU: synchronize_rng_states(["cuda"]) assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU." elif state.distributed_type == DistributedType.MULTI_XPU: synchronize_rng_states(["xpu"]) assert are_the_same_tensors(torch.xpu.get_rng_state()), "RNG states improperly synchronized on XPU." generator = torch.Generator() synchronize_rng_states(["generator"], generator=generator) assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator." if state.local_process_index == 0: print("All rng are properly synched.") def dl_preparation_check(): state = AcceleratorState() length = 32 * state.num_processes dl = DataLoader(range(length), batch_size=8) dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result) print(state.process_index, result, type(dl)) assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." dl = DataLoader(range(length), batch_size=8) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, split_batches=True, ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result) assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." if state.process_index == 0: print("Non-shuffled dataloader passing.") dl = DataLoader(range(length), batch_size=8, shuffle=True) dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result).tolist() result.sort() assert result == list(range(length)), "Wrong shuffled dataloader result." dl = DataLoader(range(length), batch_size=8, shuffle=True) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, split_batches=True, ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result).tolist() result.sort() assert result == list(range(length)), "Wrong shuffled dataloader result." if state.local_process_index == 0: print("Shuffled dataloader passing.") def central_dl_preparation_check(): state = AcceleratorState() length = 32 * state.num_processes dl = DataLoader(range(length), batch_size=8) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result) assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." dl = DataLoader(range(length), batch_size=8) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, split_batches=True, dispatch_batches=True, ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result) assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." if state.process_index == 0: print("Non-shuffled central dataloader passing.") dl = DataLoader(range(length), batch_size=8, shuffle=True) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result).tolist() result.sort() assert result == list(range(length)), "Wrong shuffled dataloader result." dl = DataLoader(range(length), batch_size=8, shuffle=True) dl = prepare_data_loader( dl, state.device, state.num_processes, state.process_index, put_on_device=True, split_batches=True, dispatch_batches=True, ) result = [] for batch in dl: result.append(gather(batch)) result = torch.cat(result).tolist() result.sort() assert result == list(range(length)), "Wrong shuffled dataloader result." if state.local_process_index == 0: print("Shuffled central dataloader passing.") def custom_sampler_check(): state = AcceleratorState() class CustomDataset(Dataset): def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __getitem__(self, index): return self.data[index] class CustomBatchSampler: def __init__(self, dataset_length: int, batch_size: int, shuffle: bool = True): self.batch_size = batch_size self.data_index = np.arange(dataset_length) self.shuffle = shuffle def __iter__(self): num_batches = len(self) if self.shuffle: index = np.random.permutation(self.data_index) else: index = self.data_index output = np.array_split(index, num_batches) yield from output def __len__(self): return math.ceil(len(self.data_index) / self.batch_size) dataset = CustomDataset(range(32 * state.num_processes)) sampler = CustomBatchSampler(len(dataset), batch_size=8) dl = DataLoader(dataset, batch_sampler=sampler) dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index) # We need just ensure that `dl.batch_sampler` (or `dl.batch_sampler.batch_sampler` is indeed the old batch sampler if hasattr(dl.batch_sampler, "batch_sampler"): assert isinstance( dl.batch_sampler.batch_sampler, CustomBatchSampler ), "Custom sampler was changed after calling `prepare_data_loader`" else: assert isinstance( dl.batch_sampler, CustomBatchSampler ), "Custom sampler was changed after calling `prepare_data_loader`" def check_seedable_sampler(): # Set seed set_seed(42) train_set = RegressionDataset(length=10, seed=42) train_dl = DataLoader(train_set, batch_size=2, shuffle=True) config = DataLoaderConfiguration(use_seedable_sampler=True) accelerator = Accelerator(dataloader_config=config) train_dl = accelerator.prepare(train_dl) original_items = [] for _ in range(3): for batch in train_dl: original_items.append(batch["x"]) original_items = torch.cat(original_items) # Set seed again and the epoch set_seed(42) train_dl.set_epoch(0) new_items = [] for _ in range(3): for batch in train_dl: new_items.append(batch["x"]) new_items = torch.cat(new_items) assert torch.allclose(original_items, new_items), "Did not obtain the same items with the same seed and epoch." def check_seedable_sampler_in_batch_sampler_shard(): set_seed(42) config = DataLoaderConfiguration(use_seedable_sampler=True) accelerator = Accelerator(dataloader_config=config) assert accelerator.num_processes > 1, "This test requires more than one process." dataloader = DataLoader(list(range(10)), batch_size=1, shuffle=True) prepared_data_loader = prepare_data_loader( dataloader=dataloader, use_seedable_sampler=True, ) target_sampler = prepared_data_loader.batch_sampler.batch_sampler.sampler assert isinstance( target_sampler, SeedableRandomSampler ), "Sampler in BatchSamplerShard is not SeedableRandomSampler." def check_seedable_sampler_with_data_seed(): # Set seed set_seed(42) data_seed = 42 train_set = RegressionDataset(length=10, seed=42) train_dl = DataLoader(train_set, batch_size=2, shuffle=True) config = DataLoaderConfiguration(use_seedable_sampler=True, data_seed=data_seed) accelerator = Accelerator(dataloader_config=config) prepared_dl = accelerator.prepare(train_dl) original_items = [] for _ in range(3): for batch in prepared_dl: original_items.append(batch["x"]) original_items = torch.cat(original_items) # Set new data seed config.data_seed = 43 accelerator = Accelerator(dataloader_config=config) prepared_dl = accelerator.prepare(train_dl) new_items = [] for _ in range(3): for batch in prepared_dl: new_items.append(batch["x"]) new_items = torch.cat(new_items) assert not torch.allclose(original_items, new_items), "Obtained the same items with different data seed." def mock_training(length, batch_size, generator, use_seedable_sampler=False): set_seed(42) generator.manual_seed(42) train_set = RegressionDataset(length=length, seed=42) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) for epoch in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) loss.backward() optimizer.step() return train_set, model def training_check(use_seedable_sampler=False): state = AcceleratorState() generator = torch.Generator() batch_size = 8 length = batch_size * 4 * state.num_processes train_set, old_model = mock_training(length, batch_size * state.num_processes, generator, use_seedable_sampler) assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes." assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes." accelerator = Accelerator() train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.") dataloader_config = DataLoaderConfiguration(split_batches=True, use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader( train_set, generator, batch_size * state.num_processes, use_seedable_sampler ) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." accelerator.print("Training yielded the same results on one CPU or distributes setup with batch split.") if torch.cuda.is_available() or is_npu_available() or is_mlu_available() or is_musa_available(): # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16 print("FP16 training check.") AcceleratorState._reset_state() dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(mixed_precision="fp16", dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." if torch.cuda.is_available(): # Mostly a test that model.forward will have autocast when running unwrap_model(model, keep_fp32_wrapper=True) print("Keep fp32 wrapper check.") AcceleratorState._reset_state() accelerator = Accelerator(mixed_precision="fp16") model = torch.nn.Linear(2, 4) model = accelerator.prepare(model) model_with_fp32_wrapper = accelerator.unwrap_model(model, keep_fp32_wrapper=True) # Run forward with fp16 as input. # When the model is with mixed precision wrapper, no error will be raised. input_tensor = torch.Tensor([1, 2]).to(dtype=torch.float16, device=accelerator.device) output = model_with_fp32_wrapper(input_tensor) # BF16 support is only for CPU + TPU, and some GPU if is_bf16_available(): # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16 print("BF16 training check.") AcceleratorState._reset_state() dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(mixed_precision="bf16", dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." # IPEX support is only for CPU if is_ipex_available(): print("ipex BF16 training check.") AcceleratorState._reset_state() dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(mixed_precision="bf16", cpu=True, dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." # XPU support is only for XPU if is_xpu_available(): print("xpu BF16 training check.") AcceleratorState._reset_state() dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler) accelerator = Accelerator(mixed_precision="bf16", cpu=False, dataloader_config=dataloader_config) train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) set_seed(42) generator.manual_seed(42) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() model = accelerator.unwrap_model(model).cpu() assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on XPU or distributed training." assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on XPU or distributed training." def test_split_between_processes_dataset(datasets_Dataset): state = AcceleratorState() data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes)]) with state.split_between_processes(data, apply_padding=False) as results: assert ( len(results) == 2 ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)]) with state.split_between_processes(data, apply_padding=False) as results: if state.is_last_process: assert ( len(results) == 1 ), f"Last process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}" else: assert ( len(results) == 2 ), f"One of the intermediate processes did not receive two items. Process index: {state.process_index}; Length: {len(results)}" data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)]) with state.split_between_processes(data, apply_padding=True) as results: if state.num_processes == 1: assert ( len(results) == 1 ), f"Single process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}" else: assert ( len(results) == 2 ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" state.wait_for_everyone() def test_split_between_processes_list(): state = AcceleratorState() data = list(range(0, 2 * state.num_processes)) with state.split_between_processes(data) as results: assert ( len(results) == 2 ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" data = list(range(0, (3 * state.num_processes) - 1)) with state.split_between_processes(data, apply_padding=True) as results: if state.is_last_process: # Test that the last process gets the extra item(s) num_samples_per_device = math.ceil(len(data) / state.num_processes) assert ( len(results) == num_samples_per_device ), f"Last process did not get the extra item(s). Process index: {state.process_index}; Length: {len(results)}" state.wait_for_everyone() def test_split_between_processes_nested_dict(): state = AcceleratorState() a = [1, 2, 3, 4, 5, 6, 7, 8] b = ["a", "b", "c", "d", "e", "f", "g", "h"] c = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]) if state.num_processes in (1, 2, 4): data = {"a": a, "b": b, "c": c} data_copy = deepcopy(data) with state.split_between_processes(data) as results: if state.process_index == 0: assert results["a"] == data_copy["a"][: 8 // state.num_processes] elif state.num_processes == 2: assert results["a"] == data_copy["a"][4:] elif state.process_index == 3: # We return a list each time assert results["a"] == data_copy["a"][-2:], f'Expected: {data_copy["a"][-2]}, Actual: {results["a"]}' if state.process_index == 0: assert results["b"] == data_copy["b"][: 8 // state.num_processes] elif state.num_processes == 2: assert results["b"] == data_copy["b"][4:] elif state.process_index == 3: assert results["b"] == data_copy["b"][-2:] if state.process_index == 0: assert torch.allclose( results["c"], data_copy["c"][: 8 // state.num_processes] ), f"Did not obtain expected values on process 0, expected `{data['c'][:8 // state.num_processes]}`, received: {results['c']}" elif state.num_processes == 2: assert torch.allclose( results["c"], data_copy["c"][4:] ), f"Did not obtain expected values on process 2, expected `{data['c'][4:]}`, received: {results['c']}" elif state.process_index == 3: assert torch.allclose( results["c"], data_copy["c"][-2:] ), f"Did not obtain expected values on process 4, expected `{data['c'][-2:]}`, received: {results['c']}" state.wait_for_everyone() def test_split_between_processes_tensor(): state = AcceleratorState() if state.num_processes > 1: data = torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]).to(state.device) with state.split_between_processes(data) as results: if state.process_index == 0: assert torch.allclose(results, torch.tensor([0, 1, 2, 3]).to(state.device)) else: assert torch.allclose(results, torch.tensor([4, 5, 6, 7]).to(state.device)) state.wait_for_everyone() def test_split_between_processes_evenly(): state = AcceleratorState() if state.num_processes in (1, 2, 4, 8): data = list(range(17)) num_samples_per_process = len(data) // state.num_processes num_extras = len(data) % state.num_processes with state.split_between_processes(data) as results: if state.process_index < num_extras: assert ( len(results) == num_samples_per_process + 1 ), f"Each Process should have even elements. Expected: {num_samples_per_process + 1}, Actual: {len(results)}" else: assert ( len(results) == num_samples_per_process ), f"Each Process should have even elements. Expected: {num_samples_per_process}, Actual: {len(results)}" state.wait_for_everyone() def test_trigger(): accelerator = Accelerator() # should start with being false assert accelerator.check_trigger() is False # set a breakpoint on the main process if accelerator.is_main_process: accelerator.set_trigger() # check it's been activated across all processes # calls `all_reduce` and triggers a sync assert accelerator.check_trigger() is True # check it's been reset after the sync assert accelerator.check_trigger() is False def test_reinstantiated_state(): import pytest AcceleratorState._reset_state() simple_model = torch.nn.Linear(1, 1) # First define an accelerator accelerator = Accelerator() # Then call `reset_state`, breaking the state existing in the accelerator AcceleratorState._reset_state() # Now try and prepare a simple model, should raise the custom error early with pytest.raises(AttributeError) as cm: accelerator.prepare(simple_model) assert "`AcceleratorState` object has no attribute" in str(cm.value.args[0]) assert "This happens if `AcceleratorState._reset_state()`" in str(cm.value.args[0]) def main(): accelerator = Accelerator() state = accelerator.state if state.local_process_index == 0: print("**Initialization**") init_state_check() state.wait_for_everyone() if state.distributed_type == DistributedType.MULTI_GPU: num_processes_per_node = torch.cuda.device_count() else: num_processes_per_node = state.num_processes # We only run this test on non-multinode if num_processes_per_node == state.num_processes: if state.process_index == 0: print("\n**Test process execution**") process_execution_check() if state.process_index == 0: print("\n**Test split between processes as a list**") test_split_between_processes_list() if state.process_index == 0: print("\n**Test split between processes as a dict**") test_split_between_processes_nested_dict() if state.process_index == 0: print("\n**Test split between processes as a tensor**") test_split_between_processes_tensor() if state.process_index == 0: print("\n**Test split between processes evenly**") test_split_between_processes_evenly() if state.process_index == 0: print("\n**Test split between processes as a datasets.Dataset**") if is_datasets_available(): from datasets import Dataset as datasets_Dataset test_split_between_processes_dataset(datasets_Dataset) else: print("Skipped because Hugging Face datasets is not available") if state.local_process_index == 0: print("\n**Test random number generator synchronization**") rng_sync_check() if state.local_process_index == 0: print("\n**DataLoader integration test**") dl_preparation_check() if state.distributed_type != DistributedType.XLA: central_dl_preparation_check() custom_sampler_check() check_seedable_sampler() check_seedable_sampler_with_data_seed() if state.num_processes > 1: check_seedable_sampler_in_batch_sampler_shard() # Trainings are not exactly the same in DeepSpeed and CPU mode if state.distributed_type == DistributedType.DEEPSPEED: return if state.local_process_index == 0: print("\n**Training integration test**") training_check(use_seedable_sampler=False) training_check(use_seedable_sampler=True) if state.local_process_index == 0: print("\n**Breakpoint trigger test**") test_trigger() if is_pytest_available(): if state.local_process_index == 0: print("\n**Test reinstantiated state**") test_reinstantiated_state() state.destroy_process_group() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_script.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_script.py", "repo_id": "accelerate", "token_count": 13893 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import gc import importlib import inspect import json import logging import os import re import shutil import tempfile import warnings from collections import OrderedDict, defaultdict from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from ..state import AcceleratorState from .constants import SAFE_WEIGHTS_NAME, WEIGHTS_NAME from .dataclasses import AutocastKwargs, CustomDtype, DistributedType from .imports import ( is_mlu_available, is_mps_available, is_musa_available, is_npu_available, is_peft_available, is_torch_xla_available, is_xpu_available, ) from .memory import clear_device_cache, get_xpu_available_memory from .offload import load_offloaded_weight, offload_weight, save_offload_index from .tqdm import is_tqdm_available, tqdm from .versions import compare_versions, is_torch_version if is_npu_available(check_device=False): import torch_npu # noqa: F401 if is_mlu_available(check_device=False): import torch_mlu # noqa: F401 if is_musa_available(check_device=False): import torch_musa # noqa: F401 from safetensors import safe_open from safetensors.torch import load_file as safe_load_file WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" logger = logging.getLogger(__name__) def is_peft_model(model): from .other import extract_model_from_parallel if is_peft_available(): from peft import PeftModel return is_peft_available() and isinstance(extract_model_from_parallel(model), PeftModel) def check_device_same(first_device, second_device): """ Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False` for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same Args: first_device (`torch.device`): First device to check second_device (`torch.device`): Second device to check """ if first_device.type != second_device.type: return False if first_device.type == "cuda" and first_device.index is None: # In case the first_device is a cuda device and have # the index attribute set to `None`, default it to `0` first_device = torch.device("cuda", index=0) if second_device.type == "cuda" and second_device.index is None: # In case the second_device is a cuda device and have # the index attribute set to `None`, default it to `0` second_device = torch.device("cuda", index=0) return first_device == second_device def convert_file_size_to_int(size: Union[int, str]): """ Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ mem_size = -1 err_msg = ( f"`size` {size} is not in a valid format. Use an integer for bytes, or a string with an unit (like '5.0GB')." ) try: if isinstance(size, int): mem_size = size elif size.upper().endswith("GIB"): mem_size = int(float(size[:-3]) * (2**30)) elif size.upper().endswith("MIB"): mem_size = int(float(size[:-3]) * (2**20)) elif size.upper().endswith("KIB"): mem_size = int(float(size[:-3]) * (2**10)) elif size.upper().endswith("GB"): int_size = int(float(size[:-2]) * (10**9)) mem_size = int_size // 8 if size.endswith("b") else int_size elif size.upper().endswith("MB"): int_size = int(float(size[:-2]) * (10**6)) mem_size = int_size // 8 if size.endswith("b") else int_size elif size.upper().endswith("KB"): int_size = int(float(size[:-2]) * (10**3)) mem_size = int_size // 8 if size.endswith("b") else int_size except ValueError: raise ValueError(err_msg) if mem_size < 0: raise ValueError(err_msg) return mem_size def dtype_byte_size(dtype: torch.dtype): """ Returns the size (in bytes) occupied by one parameter of type `dtype`. Example: ```py >>> dtype_byte_size(torch.float32) 4 ``` """ if dtype == torch.bool: return 1 / 8 elif dtype == CustomDtype.INT2: return 1 / 4 elif dtype == CustomDtype.INT4: return 1 / 2 elif dtype == CustomDtype.FP8: return 1 elif is_torch_version(">=", "2.1.0") and dtype == torch.float8_e4m3fn: return 1 bit_search = re.search(r"[^\d](\d+)$", str(dtype)) if bit_search is None: raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") bit_size = int(bit_search.groups()[0]) return bit_size // 8 def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: """ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. """ _SIZE = { torch.int64: 8, torch.float32: 4, torch.int32: 4, torch.bfloat16: 2, torch.float16: 2, torch.int16: 2, torch.uint8: 1, torch.int8: 1, torch.bool: 1, torch.float64: 8, } try: storage_ptr = tensor.untyped_storage().data_ptr() storage_size = tensor.untyped_storage().nbytes() except Exception: # Fallback for torch==1.10 try: storage_ptr = tensor.storage().data_ptr() storage_size = tensor.storage().size() * _SIZE[tensor.dtype] except NotImplementedError: # Fallback for meta storage storage_ptr = 0 # On torch >=2.0 this is the tensor size storage_size = tensor.nelement() * _SIZE[tensor.dtype] return tensor.device, storage_ptr, storage_size def set_module_tensor_to_device( module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None, dtype: Optional[Union[str, torch.dtype]] = None, fp16_statistics: Optional[torch.HalfTensor] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). Args: module (`torch.nn.Module`): The module in which the tensor we want to move lives. tensor_name (`str`): The full name of the parameter/buffer. device (`int`, `str` or `torch.device`): The device on which to set the tensor. value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any other device). dtype (`torch.dtype`, *optional*): If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to the dtype of the existing parameter in the model. fp16_statistics (`torch.HalfTensor`, *optional*): The list of fp16 statistics to set on the module, used for 8 bit model serialization. tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`): A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight on the device for all others, instead of duplicating memory. """ # Recurse if needed if "." in tensor_name: splits = tensor_name.split(".") for split in splits[:-1]: new_module = getattr(module, split) if new_module is None: raise ValueError(f"{module} has no attribute {split}.") module = new_module tensor_name = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") is_buffer = tensor_name in module._buffers old_value = getattr(module, tensor_name) # Treat the case where old_value (or a custom `value`, typically offloaded to RAM/disk) belongs to a tied group, and one of the weight # in the tied group has already been dispatched to the device, by avoiding reallocating memory on the device and just copying the pointer. if ( value is not None and tied_params_map is not None and value.data_ptr() in tied_params_map and device in tied_params_map[value.data_ptr()] ): module._parameters[tensor_name] = tied_params_map[value.data_ptr()][device] return elif ( tied_params_map is not None and old_value.data_ptr() in tied_params_map and device in tied_params_map[old_value.data_ptr()] ): module._parameters[tensor_name] = tied_params_map[old_value.data_ptr()][device] return if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") param = module._parameters[tensor_name] if tensor_name in module._parameters else None param_cls = type(param) if value is not None: # We can expect mismatches when using bnb 4bit since Params4bit will reshape and pack the weights. # In other cases, we want to make sure we're not loading checkpoints that do not match the config. if old_value.shape != value.shape and param_cls.__name__ != "Params4bit": raise ValueError( f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this looks incorrect.' ) if dtype is None: # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model value = value.to(old_value.dtype) elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): value = value.to(dtype) device_quantization = None with torch.no_grad(): # leave it on cpu first before moving them to cuda # # fix the case where the device is meta, we don't want to put it on cpu because there is no data =0 if ( param is not None and param.device.type != "cuda" and torch.device(device).type == "cuda" and param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"] ): device_quantization = device device = "cpu" # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if isinstance(device, int): if is_npu_available(): device = f"npu:{device}" elif is_mlu_available(): device = f"mlu:{device}" elif is_musa_available(): device = f"musa:{device}" elif is_xpu_available(): device = f"xpu:{device}" if "xpu" in str(device) and not is_xpu_available(): raise ValueError(f'{device} is not available, you should use device="cpu" instead') if value is None: new_value = old_value.to(device) if dtype is not None and device in ["meta", torch.device("meta")]: if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): new_value = new_value.to(dtype) if not is_buffer: module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad) elif isinstance(value, torch.Tensor): new_value = value.to(device) else: new_value = torch.tensor(value, device=device) if device_quantization is not None: device = device_quantization if is_buffer: module._buffers[tensor_name] = new_value elif value is not None or not check_device_same(torch.device(device), module._parameters[tensor_name].device): param_cls = type(module._parameters[tensor_name]) kwargs = module._parameters[tensor_name].__dict__ if param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"]: if param_cls.__name__ == "Int8Params" and new_value.dtype == torch.float32: # downcast to fp16 if any - needed for 8bit serialization new_value = new_value.to(torch.float16) # quantize module that are going to stay on the cpu so that we offload quantized weights if device == "cpu" and param_cls.__name__ == "Int8Params": new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(0).to("cpu") new_value.CB = new_value.CB.to("cpu") new_value.SCB = new_value.SCB.to("cpu") else: new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device) elif param_cls.__name__ in ["QTensor", "QBitsTensor"]: new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad).to(device) elif param_cls.__name__ in ["AffineQuantizedTensor"]: if importlib.util.find_spec("torchao") is not None and compare_versions("torchao", ">=", "0.7.0"): # TorchAO v0.7.0 made layout_tensor an internal private variable and exposed tensor_impl args = (new_value.tensor_impl,) else: args = (new_value.layout_tensor,) args += ( new_value.block_size, new_value.shape, new_value.quant_min, new_value.quant_max, new_value.zero_point_domain, ) new_value = torch.nn.Parameter(param_cls(*args), requires_grad=old_value.requires_grad).to(device) else: new_value = param_cls(new_value, requires_grad=old_value.requires_grad).to(device) module._parameters[tensor_name] = new_value if fp16_statistics is not None: module._parameters[tensor_name].SCB = fp16_statistics.to(device) del fp16_statistics # as we put the weight to meta, it doesn't have SCB attr anymore. make sure that it is not a meta weight if ( module.__class__.__name__ == "Linear8bitLt" and getattr(module.weight, "SCB", None) is None and str(module.weight.device) != "meta" ): # quantize only if necessary device_index = torch.device(device).index if torch.device(device).type == "cuda" else None if not getattr(module.weight, "SCB", None) and device_index is not None: if module.bias is not None and module.bias.device.type != "meta": # if a bias exists, we need to wait until the bias is set on the correct device module = module.cuda(device_index) elif module.bias is None: # if no bias exists, we can quantize right away module = module.cuda(device_index) elif ( module.__class__.__name__ == "Linear4bit" and getattr(module.weight, "quant_state", None) is None and str(module.weight.device) != "meta" ): # quantize only if necessary device_index = torch.device(device).index if torch.device(device).type == "cuda" else None if not getattr(module.weight, "quant_state", None) and device_index is not None: module.weight = module.weight.cuda(device_index) # clean pre and post foward hook if device != "cpu": clear_device_cache() # When handling tied weights, we update tied_params_map to keep track of the tied weights that have already been allocated on the device in # order to avoid duplicating memory, see above. if ( tied_params_map is not None and old_value.data_ptr() in tied_params_map and device not in tied_params_map[old_value.data_ptr()] ): tied_params_map[old_value.data_ptr()][device] = new_value elif ( value is not None and tied_params_map is not None and value.data_ptr() in tied_params_map and device not in tied_params_map[value.data_ptr()] ): tied_params_map[value.data_ptr()][device] = new_value def named_module_tensors( module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False ): """ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True` it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`. Args: module (`torch.nn.Module`): The module we want the tensors on. include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result. recurse (`bool`, *optional`, defaults to `False`): Whether or not to go look in every submodule or just return the direct parameters and buffers. remove_non_persistent (`bool`, *optional*, defaults to `False`): Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers = True """ yield from module.named_parameters(recurse=recurse) if include_buffers: non_persistent_buffers = set() if remove_non_persistent: non_persistent_buffers = get_non_persistent_buffers(module, recurse=recurse) for named_buffer in module.named_buffers(recurse=recurse): name, _ = named_buffer if name not in non_persistent_buffers: yield named_buffer def get_non_persistent_buffers(module: nn.Module, recurse: bool = False): """ Gather all non persistent buffers of a given modules into a set Args: module (`nn.Module`): The module we want the non persistent buffers on. recurse (`bool`, *optional*, defaults to `False`): Whether or not to go look in every submodule or just return the direct non persistent buffers. """ non_persistent_buffers_set = module._non_persistent_buffers_set if recurse: for _, m in module.named_modules(): non_persistent_buffers_set |= m._non_persistent_buffers_set return non_persistent_buffers_set class FindTiedParametersResult(list): """ This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not a list or on the `values` method as in the future this will be removed. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def values(self): warnings.warn( "The 'values' method of FindTiedParametersResult is deprecated and will be removed in Accelerate v1.3.0. ", FutureWarning, ) return sum([x[1:] for x in self], []) def check_tied_parameters_in_config(model: nn.Module): """ Check if there is any indication in the given model that some weights should be tied. Args: model (`torch.nn.Module`): The model to inspect Returns: bool: True if the model needs to have tied weights """ # based on model.tie_weights() method has_tied_word_embedding = False has_tied_encoder_decoder = False has_tied_module = False if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]: has_tied_word_embedding = ( hasattr(model, "config") and getattr(model.config, "tie_word_embeddings", False) and model.get_output_embeddings() ) has_tied_encoder_decoder = ( hasattr(model, "config") and getattr(model.config, "is_encoder_decoder", False) and getattr(model.config, "tie_encoder_decoder", False) ) has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules()) return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module]) def _get_param_device(param, device_map): if param in device_map: return device_map[param] parent_param = ".".join(param.split(".")[:-1]) if parent_param == param: raise ValueError(f"The `device_map` does not contain the module {param}.") else: return _get_param_device(parent_param, device_map) def check_tied_parameters_on_same_device(tied_params, device_map): """ Check if tied parameters are on the same device Args: tied_params (`List[List[str]]`): A list of lists of parameter names being all tied together. device_map (`Dict[str, Union[int, str, torch.device]]`): A map that specifies where each submodule should go. """ for tie_param in tied_params: tie_param_devices = {} for param in tie_param: tie_param_devices[param] = _get_param_device(param, device_map) if len(set(tie_param_devices.values())) > 1: logger.warn( f"Tied parameters are on different devices: {tie_param_devices}. " "Please modify your custom device map or set `device_map='auto'`. " ) def find_tied_parameters(model: torch.nn.Module, **kwargs): """ Find the tied parameters in a given model. <Tip warning={true}> The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore them. </Tip> Args: model (`torch.nn.Module`): The model to inspect. Returns: List[List[str]]: A list of lists of parameter names being all tied together. Example: ```py >>> from collections import OrderedDict >>> import torch.nn as nn >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))])) >>> model.linear2.weight = model.linear1.weight >>> find_tied_parameters(model) [['linear1.weight', 'linear2.weight']] ``` """ # get ALL model parameters and thier names all_named_parameters = {name: param for name, param in model.named_parameters(remove_duplicate=False)} # get ONLY unique named parameters, # if parameter is tied and have multiple names, it will be included only once no_duplicate_named_parameters = {name: param for name, param in model.named_parameters(remove_duplicate=True)} # the difference of the two sets will give us the tied parameters tied_param_names = set(all_named_parameters.keys()) - set(no_duplicate_named_parameters.keys()) # 'tied_param_names' contains the names of parameters that are tied in the model, but we do not know # which names refer to the same parameter. To identify this, we need to group them together. tied_param_groups = {} for tied_param_name in tied_param_names: tied_param = all_named_parameters[tied_param_name] for param_name, param in no_duplicate_named_parameters.items(): # compare if parameters are the same, if so, group thier names together if param is tied_param: if param_name not in tied_param_groups: tied_param_groups[param_name] = [] tied_param_groups[param_name].append(tied_param_name) return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in tied_param_groups.items()]) def retie_parameters(model, tied_params): """ Reties tied parameters in a given model if the link was broken (for instance when adding hooks). Args: model (`torch.nn.Module`): The model in which to retie parameters. tied_params (`List[List[str]]`): A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`. """ for tied_group in tied_params: param_to_tie = None # two loops : the first one to set param_to_tie , the second one to change the values of tied_group for param_name in tied_group: module = model splits = param_name.split(".") for split in splits[:-1]: module = getattr(module, split) param = getattr(module, splits[-1]) if param_to_tie is None and param.device != torch.device("meta"): param_to_tie = param break if param_to_tie is not None: for param_name in tied_group: module = model splits = param_name.split(".") for split in splits[:-1]: module = getattr(module, split) setattr(module, splits[-1], param_to_tie) def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype: """ Just does torch.dtype(dtype) if necessary. """ if isinstance(dtype, str): # We accept "torch.float16" or just "float16" dtype = dtype.replace("torch.", "") dtype = getattr(torch, dtype) return dtype def compute_module_sizes( model: nn.Module, dtype: Optional[Union[str, torch.device]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, buffers_only: bool = False, ): """ Compute the size of each submodule of a given model. """ if dtype is not None: dtype = _get_proper_dtype(dtype) dtype_size = dtype_byte_size(dtype) if special_dtypes is not None: special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()} special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()} module_sizes = defaultdict(int) module_list = [] if not buffers_only: module_list = named_module_tensors(model, recurse=True) else: module_list = model.named_buffers(recurse=True) for name, tensor in module_list: if special_dtypes is not None and name in special_dtypes: size = tensor.numel() * special_dtypes_size[name] elif dtype is None: size = tensor.numel() * dtype_byte_size(tensor.dtype) elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): # According to the code in set_module_tensor_to_device, these types won't be converted # so use their original size here size = tensor.numel() * dtype_byte_size(tensor.dtype) else: size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) name_parts = name.split(".") for idx in range(len(name_parts) + 1): module_sizes[".".join(name_parts[:idx])] += size return module_sizes def compute_module_total_buffer_size( model: nn.Module, dtype: Optional[Union[str, torch.device]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, ): """ Compute the total size of buffers in each submodule of a given model. """ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes, buffers_only=True) return module_sizes.get("", 0) def get_max_layer_size( modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str] ): """ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The definition of a layer being: - a module with no direct children (just parameters and buffers) - a module whose class name is in the list `no_split_module_classes` Args: modules (`List[Tuple[str, torch.nn.Module]]`): The list of named modules where we want to determine the maximum layer size. module_sizes (`Dict[str, int]`): A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. Returns: `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size. """ max_size = 0 layer_names = [] modules_to_treat = modules.copy() while len(modules_to_treat) > 0: module_name, module = modules_to_treat.pop(0) modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else [] if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: # No splitting this one so we compare to the max_size size = module_sizes[module_name] if size > max_size: max_size = size layer_names = [module_name] elif size == max_size: layer_names.append(module_name) else: modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat return max_size, layer_names def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None): """ Get the maximum memory available if nothing is passed, converts string to int otherwise. """ import psutil if max_memory is None: max_memory = {} # Make sure CUDA is initialized on each GPU to have the right memory info. if is_npu_available(): for i in range(torch.npu.device_count()): try: _ = torch.tensor(0, device=torch.device("npu", i)) max_memory[i] = torch.npu.mem_get_info(i)[0] except Exception: logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") continue elif is_mlu_available(): for i in range(torch.mlu.device_count()): try: _ = torch.tensor(0, device=torch.device("mlu", i)) max_memory[i] = torch.mlu.mem_get_info(i)[0] except Exception: logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") continue elif is_musa_available(): for i in range(torch.musa.device_count()): try: _ = torch.tensor(0, device=torch.device("musa", i)) max_memory[i] = torch.musa.mem_get_info(i)[0] except Exception: logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") continue elif is_xpu_available(): for i in range(torch.xpu.device_count()): try: _ = torch.tensor(0, device=torch.device("xpu", i)) max_memory[i] = get_xpu_available_memory(i) except Exception: logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") continue else: for i in range(torch.cuda.device_count()): try: _ = torch.tensor([0], device=i) max_memory[i] = torch.cuda.mem_get_info(i)[0] except Exception: logger.info(f"Device {i} seems unavailable, Proceeding to check subsequent devices.") continue # allocate everything in the mps device as the RAM is shared if is_mps_available(): max_memory["mps"] = psutil.virtual_memory().available else: max_memory["cpu"] = psutil.virtual_memory().available return max_memory for key in max_memory: if isinstance(max_memory[key], str): max_memory[key] = convert_file_size_to_int(max_memory[key]) # Need to sort the device by type to make sure that we allocate the gpu first. # As gpu/npu/xpu are represented by int, we need to sort them first. gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)] gpu_devices.sort() # check if gpu/npu/xpu devices are available and if not, throw a warning if is_npu_available(): num_devices = torch.npu.device_count() elif is_mlu_available(): num_devices = torch.mlu.device_count() elif is_musa_available(): num_devices = torch.musa.device_count() elif is_xpu_available(): num_devices = torch.xpu.device_count() else: num_devices = torch.cuda.device_count() for device in gpu_devices: if device >= num_devices or device < 0: logger.warning(f"Device {device} is not available, available devices are {list(range(num_devices))}") # Add the other devices in the preset order if they are available all_devices = gpu_devices + [k for k in ["mps", "cpu", "disk"] if k in max_memory.keys()] # Raise an error if a device is not recognized for k in max_memory.keys(): if k not in all_devices: raise ValueError( f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'" ) max_memory = {k: max_memory[k] for k in all_devices} return max_memory def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""): """ Cleans a device_map by grouping all submodules that go on the same device together. """ # Get the value of the current module and if there is only one split across several keys, regroup it. prefix = "" if module_name == "" else f"{module_name}." values = [v for k, v in device_map.items() if k.startswith(prefix)] if len(set(values)) == 1 and len(values) > 1: for k in [k for k in device_map if k.startswith(prefix)]: del device_map[k] device_map[module_name] = values[0] # Recurse over the children children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)] idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1 children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules) for child in children_modules: clean_device_map(device_map, module_name=child) return device_map def load_offloaded_weights(model, index, offload_folder): """ Loads the weights from the offload folder into the model. Args: model (`torch.nn.Module`): The model to load the weights into. index (`dict`): A dictionary containing the parameter name and its metadata for each parameter that was offloaded from the model. offload_folder (`str`): The folder where the offloaded weights are stored. """ if index is None or len(index) == 0: # Nothing to do return for param_name, metadata in index.items(): if "SCB" in param_name: continue fp16_statistics = None if "weight" in param_name and param_name.replace("weight", "SCB") in index.keys(): weight_name = param_name.replace("weight", "SCB") fp16_statistics = load_offloaded_weight( os.path.join(offload_folder, f"{weight_name}.dat"), index[weight_name] ) tensor_file = os.path.join(offload_folder, f"{param_name}.dat") weight = load_offloaded_weight(tensor_file, metadata) set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics) def get_module_leaves(module_sizes): module_children = {} for module in module_sizes: if module == "" or "." not in module: continue parent = module.rsplit(".", 1)[0] module_children[parent] = module_children.get(parent, 0) + 1 leaves = [module for module in module_sizes if module_children.get(module, 0) == 0 and module != ""] return leaves def get_balanced_memory( model: nn.Module, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, dtype: Optional[Union[str, torch.dtype]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, low_zero: bool = False, ): """ Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU. <Tip> All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the meta device (as it would if initialized within the `init_empty_weights` context manager). </Tip> Args: model (`torch.nn.Module`): The model to analyze. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. Example: `max_memory={0: "1GB"}`. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): If provided, special dtypes to consider for some specific weights (will override dtype used as default for all weights). low_zero (`bool`, *optional*): Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the Transformers generate function). """ # Get default / clean up max_memory user_not_set_max_memory = max_memory is None max_memory = get_max_memory(max_memory) if is_npu_available(): expected_device_type = "npu" elif is_mlu_available(): expected_device_type = "mlu" elif is_musa_available(): expected_device_type = "musa" elif is_xpu_available(): expected_device_type = "xpu" else: expected_device_type = "cuda" num_devices = len([d for d in max_memory if torch.device(d).type == expected_device_type and max_memory[d] > 0]) if num_devices == 0: return max_memory if num_devices == 1: # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer low_zero = False # If user just asked us to handle memory usage, we should avoid OOM if user_not_set_max_memory: for key in max_memory.keys(): if isinstance(key, int): max_memory[key] *= 0.9 # 90% is a good compromise logger.info( f"We will use 90% of the memory on device {key} for storing the model, and 10% for the buffer to avoid OOM. " "You can set `max_memory` in to a higher value to use more memory (at your own risk)." ) break # only one device module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices) # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to # add which is the biggest of: # - the size of no split block (if applicable) # - the mean of the layer sizes if no_split_module_classes is None: no_split_module_classes = [] elif not isinstance(no_split_module_classes, (list, tuple)): no_split_module_classes = [no_split_module_classes] # Identify the size of the no_split_block modules if len(no_split_module_classes) > 0: no_split_children = {} for name, size in module_sizes.items(): if name == "": continue submodule = model for submodule_name in name.split("."): submodule = getattr(submodule, submodule_name) class_name = submodule.__class__.__name__ if class_name in no_split_module_classes and class_name not in no_split_children: no_split_children[class_name] = size if set(no_split_children.keys()) == set(no_split_module_classes): break buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0 else: buffer = 0 # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters leaves = get_module_leaves(module_sizes) module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves} # Once removed, leaves are the final modules. leaves = get_module_leaves(module_sizes) mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1)) buffer = int(1.25 * max(buffer, mean_leaves)) per_gpu += buffer # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them) gpus_idx_list = list( sorted( device_id for device_id, device_mem in max_memory.items() if isinstance(device_id, int) and device_mem > 0 ) ) # The last device is left with max_memory just in case the buffer is not enough. for idx in gpus_idx_list[:-1]: max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx]) if low_zero: min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)])) max_memory[0] = min(min_zero, max_memory[0]) return max_memory def calculate_maximum_sizes(model: torch.nn.Module): "Computes the total size of the model and its largest layer" sizes = compute_module_sizes(model) # `transformers` models store this information for us no_split_modules = getattr(model, "_no_split_modules", None) if no_split_modules is None: no_split_modules = [] modules_to_treat = ( list(model.named_parameters(recurse=False)) + list(model.named_children()) + list(model.named_buffers(recurse=False)) ) largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules) total_size = sizes[""] return total_size, largest_layer def _init_infer_auto_device_map( model: nn.Module, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, dtype: Optional[Union[str, torch.dtype]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, ) -> Tuple[ List[Union[int, str]], Dict[Union[int, str], Union[int, str]], List[Union[int, str]], List[int], Dict[str, int], List[List[str]], List[str], List[Tuple[str, nn.Module]], ]: """ Initialize variables required for computing the device map for model allocation. """ max_memory = get_max_memory(max_memory) if no_split_module_classes is None: no_split_module_classes = [] elif not isinstance(no_split_module_classes, (list, tuple)): no_split_module_classes = [no_split_module_classes] devices = list(max_memory.keys()) if "disk" not in devices: devices.append("disk") gpus = [device for device in devices if device not in ["cpu", "disk"]] # Devices that need to keep space for a potential offloaded layer. if "mps" in gpus: main_devices = ["mps"] elif len(gpus) > 0: main_devices = [gpus[0], "cpu"] else: main_devices = ["cpu"] module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) tied_parameters = find_tied_parameters(model) if check_tied_parameters_in_config(model) and len(tied_parameters) == 0: logger.warn( "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." ) # Direct submodules and parameters modules_to_treat = ( list(model.named_parameters(recurse=False)) + list(model.named_children()) + list(model.named_buffers(recurse=False)) ) return ( devices, max_memory, main_devices, gpus, module_sizes, tied_parameters, no_split_module_classes, modules_to_treat, ) def get_module_size_with_ties( tied_params, module_size, module_sizes, modules_to_treat, ) -> Tuple[int, List[str], List[nn.Module]]: """ Calculate the total size of a module, including its tied parameters. Args: tied_params (`List[str]`): The list of tied parameters. module_size (`int`): The size of the module without tied parameters. module_sizes (`Dict[str, int]`): A dictionary mapping each layer name to its size. modules_to_treat (`List[Tuple[str, nn.Module]]`): The list of named modules to treat. Returns: `Tuple[int, List[str], List[nn.Module]]`: The total size of the module, the names of the tied modules, and the tied modules. """ if len(tied_params) < 1: return module_size, [], [] tied_module_names = [] tied_modules = [] for tied_param in tied_params: tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if tied_param.startswith(n + ".")][0] tied_module_names.append(modules_to_treat[tied_module_index][0]) tied_modules.append(modules_to_treat[tied_module_index][1]) module_size_with_ties = module_size for tied_param, tied_module_name in zip(tied_params, tied_module_names): module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param] return module_size_with_ties, tied_module_names, tied_modules def fallback_allocate( modules: List[Tuple[str, nn.Module]], module_sizes: Dict[str, int], size_limit: Union[int, str], no_split_module_classes: Optional[List[str]] = None, tied_parameters: Optional[List[List[str]]] = None, ) -> Tuple[Optional[str], Optional[nn.Module], List[Tuple[str, nn.Module]]]: """ Find a module that fits in the size limit using BFS and return it with its name and the remaining modules. Args: modules (`List[Tuple[str, nn.Module]]`): The list of named modules to search in. module_sizes (`Dict[str, int]`): A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). size_limit (`Union[int, str]`): The maximum size a module can have. no_split_module_classes (`Optional[List[str]]`, *optional*): A list of class names for layers we don't want to be split. tied_parameters (`Optional[List[List[str]]`, *optional*): A list of lists of parameter names being all tied together. Returns: `Tuple[Optional[str], Optional[nn.Module], List[Tuple[str, nn.Module]]]`: A tuple containing: - The name of the module that fits within the size limit. - The module itself. - The list of remaining modules after the found module is removed. """ try: size_limit = convert_file_size_to_int(size_limit) except ValueError: return None, None, modules if no_split_module_classes is None: no_split_module_classes = [] if tied_parameters is None: tied_parameters = [] modules_to_search = modules.copy() module_found = False while modules_to_search: name, module = modules_to_search.pop(0) tied_param_groups = [ tied_group for tied_group in tied_parameters if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group) ] tied_params = sum( [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_groups], [] ) module_size_with_ties, _, _ = get_module_size_with_ties( tied_params, module_sizes[name], module_sizes, modules_to_search ) # If the module fits in the size limit, we found it. if module_size_with_ties <= size_limit: module_found = True break # The module is too big, we need to split it if possible. modules_children = ( [] if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor) else list(module.named_children()) ) # Split fails, move to the next module if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: continue # split is possible, add the children to the list of modules to search modules_children = list(module.named_parameters(recurse=False)) + modules_children modules_to_search = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_search if not module_found: return None, None, modules # Prepare the module list for removal of the found module current_names = [n for n, _ in modules] dot_idx = [i for i, c in enumerate(name) if c == "."] for dot_index in dot_idx: parent_name = name[:dot_index] if parent_name in current_names: parent_module_idx = current_names.index(parent_name) _, parent_module = modules[parent_module_idx] module_children = list(parent_module.named_parameters(recurse=False)) + list( parent_module.named_children() ) modules = ( modules[:parent_module_idx] + [(f"{parent_name}.{n}", v) for n, v in module_children] + modules[parent_module_idx + 1 :] ) current_names = [n for n, _ in modules] # Now the target module should be directly in the list target_idx = current_names.index(name) name, module = modules.pop(target_idx) return name, module, modules def infer_auto_device_map( model: nn.Module, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, dtype: Optional[Union[str, torch.dtype]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.dtype]]] = None, verbose: bool = False, clean_result: bool = True, offload_buffers: bool = False, fallback_allocation: bool = False, ): """ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk, such that: - we don't exceed the memory available of any of the GPU. - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that has the largest size. - if offload to the CPU is needed,we don't exceed the RAM available on the CPU. - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk that has the largest size. <Tip> All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the meta device (as it would if initialized within the `init_empty_weights` context manager). </Tip> Args: model (`torch.nn.Module`): The model to analyze. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. Example: `max_memory={0: "1GB"}`. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): If provided, special dtypes to consider for some specific weights (will override dtype used as default for all weights). verbose (`bool`, *optional*, defaults to `False`): Whether or not to provide debugging statements as the function builds the device_map. clean_result (`bool`, *optional*, defaults to `True`): Clean the resulting device_map by grouping all submodules that go on the same device together. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters. fallback_allocation (`bool`, *optional*, defaults to `False`): When regular allocation fails, try to allocate a module that fits in the size limit using BFS. """ # Initialize the variables ( devices, max_memory, main_devices, gpus, module_sizes, tied_parameters, no_split_module_classes, modules_to_treat, ) = _init_infer_auto_device_map(model, max_memory, no_split_module_classes, dtype, special_dtypes) device_map = OrderedDict() current_device = 0 device_memory_used = {device: 0 for device in devices} device_buffer_sizes = {} device_minimum_assignment_memory = {} # Initialize maximum largest layer, to know which space to keep in memory max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes) # Ready ? This is going to be a bit messy. while len(modules_to_treat) > 0: name, module = modules_to_treat.pop(0) if verbose: print(f"\nTreating module {name}.") # Max size in the remaining layers may have changed since we took one, so we maybe update it. max_layer_names = [n for n in max_layer_names if n != name and not n.startswith(name + ".")] if len(max_layer_names) == 0: max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) # Assess size needed module_size = module_sizes[name] # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module # and the other is not. # Note: If we are currently processing the name `compute.weight`, an other parameter named # e.g. `compute.weight_submodule.parameter` # needs to be considered outside the current module, hence the check with additional dots. tied_param_groups = [ tied_group for tied_group in tied_parameters if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group) ] if verbose and len(tied_param_groups) > 0: print(f" Found the relevant tied param groups {tied_param_groups}") # Then we keep track of all the parameters that are tied to the current module, but not in the current module tied_params = sum( [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_groups], [] ) if verbose and len(tied_params) > 0: print(f" So those parameters need to be taken into account {tied_params}") device = devices[current_device] current_max_size = max_memory[device] if device != "disk" else None current_memory_reserved = 0 # Reduce max size available by the largest layer. if devices[current_device] in main_devices: current_max_size = current_max_size - max_layer_size current_memory_reserved = max_layer_size module_size_with_ties, tied_module_names, tied_modules = get_module_size_with_ties( tied_params, module_size, module_sizes, modules_to_treat ) # The module and its tied modules fit on the current device. if current_max_size is None or device_memory_used[device] + module_size_with_ties <= current_max_size: if verbose: output = f"Putting {name}" if tied_module_names: output += f" and {tied_module_names}" else: output += f" (size={module_size})" if current_max_size is not None: output += f" (available={current_max_size - device_memory_used[device]})" output += f" on {device}." print(output) device_memory_used[device] += module_size_with_ties # Assign the primary module to the device. device_map[name] = device # Assign tied modules if any. for tied_module_name in tied_module_names: if tied_module_name in [m[0] for m in modules_to_treat]: # Find the index of the tied module in the list tied_module_index = next(i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name) # Remove the tied module from the list to prevent reprocessing modules_to_treat.pop(tied_module_index) # Assign the tied module to the device device_map[tied_module_name] = device # Buffer Handling if not offload_buffers and isinstance(module, nn.Module): # Compute the total buffer size for the module current_buffer_size = compute_module_total_buffer_size( module, dtype=dtype, special_dtypes=special_dtypes ) # Update the buffer size on the device device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size continue # The current module itself fits, so we try to split the tied modules. if len(tied_params) > 0 and device_memory_used[device] + module_size <= current_max_size: # can we split one of the tied modules to make it smaller or do we need to go on the next device? if verbose: print( f"Not enough space on {devices[current_device]} to put {name} and {tied_module_names} (space " f"available {current_max_size - device_memory_used[device]}, needed size {module_size_with_ties})." ) split_happened = False for tied_module_name, tied_module in zip(tied_module_names, tied_modules): tied_module_children = list(tied_module.named_children()) if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes: # can't break this one. continue if verbose: print(f"Splitting {tied_module_name}.") tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children] tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0] modules_to_treat = ( [(name, module)] + modules_to_treat[:tied_module_index] + tied_module_children + modules_to_treat[tied_module_index + 1 :] ) # Update the max layer size. max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) split_happened = True break if split_happened: continue # If the tied module is not split, we go to the next device if verbose: print("None of the tied module can be split, going to the next device.") # The current module itself doesn't fit, so we have to split it or go to the next device. if device_memory_used[device] + module_size >= current_max_size: # Split or not split? modules_children = ( [] if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor) else list(module.named_children()) ) if verbose: print( f"Not enough space on {devices[current_device]} to put {name} (space available " f"{current_max_size - device_memory_used[device]}, module size {module_size})." ) if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: # -> no split, we go to the next device if verbose: print("This module cannot be split, going to the next device.") else: # -> split, we replace the module studied by its children + parameters if verbose: print(f"Splitting {name}.") modules_children = list(module.named_parameters(recurse=False)) + modules_children modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat # Update the max layer size. max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) continue # If no module is assigned to the current device, we attempt to allocate a fallback module # if fallback_allocation is enabled. if device_memory_used[device] == 0 and fallback_allocation and device != "disk": # We try to allocate a module that fits in the size limit using BFS. # Recompute the current max size as we need to consider the current module as well. current_max_size = max_memory[device] - max(max_layer_size, module_size_with_ties) fallback_module_name, fallback_module, remaining_modules = fallback_allocate( modules_to_treat, module_sizes, current_max_size - device_memory_used[device], no_split_module_classes, tied_parameters, ) # use the next iteration to put the fallback module on the next device to avoid code duplication if fallback_module is not None: modules_to_treat = [(fallback_module_name, fallback_module)] + [(name, module)] + remaining_modules continue if device_memory_used[device] == 0: device_minimum_assignment_memory[device] = module_size_with_ties + current_memory_reserved # Neither the current module nor any tied modules can be split, so we move to the next device. device_memory_used[device] = device_memory_used[device] + current_memory_reserved current_device += 1 modules_to_treat = [(name, module)] + modules_to_treat device_memory_used = {device: mem for device, mem in device_memory_used.items() if mem > 0} if clean_result: device_map = clean_device_map(device_map) non_gpu_buffer_size = device_buffer_sizes.get("cpu", 0) + device_buffer_sizes.get("disk", 0) if non_gpu_buffer_size > 0 and not offload_buffers: is_buffer_fit_any_gpu = False for gpu_device, gpu_max_memory in max_memory.items(): if gpu_device == "cpu" or gpu_device == "disk": continue if not is_buffer_fit_any_gpu: gpu_memory_used = device_memory_used.get(gpu_device, 0) if gpu_max_memory >= non_gpu_buffer_size + gpu_memory_used: is_buffer_fit_any_gpu = True if len(gpus) > 0 and not is_buffer_fit_any_gpu: warnings.warn( f"Current model requires {non_gpu_buffer_size} bytes of buffer for offloaded layers, which seems does " f"not fit any GPU's remaining memory. If you are experiencing a OOM later, please consider using " f"offload_buffers=True." ) if device_minimum_assignment_memory: devices_info = "\n".join( f" - {device}: {mem} bytes required" for device, mem in device_minimum_assignment_memory.items() ) logger.info( f"Based on the current allocation process, no modules could be assigned to the following devices due to " f"insufficient memory:\n" f"{devices_info}\n" f"These minimum requirements are specific to this allocation attempt and may vary. Consider increasing " f"the available memory for these devices to at least the specified minimum, or adjusting the model config." ) return device_map def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]): """ Checks a device map covers everything in a given model. Args: model (`torch.nn.Module`): The model to check the device map against. device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check. """ all_model_tensors = [name for name, _ in model.state_dict().items()] for module_name in device_map.keys(): if module_name == "": all_model_tensors.clear() break else: all_model_tensors = [ name for name in all_model_tensors if not name == module_name and not name.startswith(module_name + ".") ] if len(all_model_tensors) > 0: non_covered_params = ", ".join(all_model_tensors) raise ValueError( f"The device_map provided does not give any device for the following parameters: {non_covered_params}" ) def load_state_dict(checkpoint_file, device_map=None): """ Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the weights can be fast-loaded directly on the GPU. Args: checkpoint_file (`str`): The path to the checkpoint to load. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. """ if checkpoint_file.endswith(".safetensors"): with safe_open(checkpoint_file, framework="pt") as f: metadata = f.metadata() weight_names = f.keys() if metadata is None: logger.warn( f"The safetensors archive passed at {checkpoint_file} does not contain metadata. " "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata." ) metadata = {"format": "pt"} if metadata.get("format") not in ["pt", "tf", "flax"]: raise OSError( f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " "you save your model with the `save_pretrained` method." ) elif metadata["format"] != "pt": raise ValueError(f"The checkpoint passed was saved with {metadata['format']}, we need a the pt format.") if device_map is None: return safe_load_file(checkpoint_file) else: # if we only have one device we can load everything directly if len(set(device_map.values())) == 1: device = list(device_map.values())[0] target_device = device if isinstance(device, int): if is_xpu_available(): target_device = f"xpu:{device}" elif is_npu_available(): target_device = f"npu:{device}" return safe_load_file(checkpoint_file, device=target_device) devices = list(set(device_map.values()) - {"disk"}) # cpu device should always exist as fallback option if "cpu" not in devices: devices.append("cpu") # For each device, get the weights that go there device_weights = {device: [] for device in devices} for module_name, device in device_map.items(): if device in devices: device_weights[device].extend( [k for k in weight_names if k == module_name or k.startswith(module_name + ".")] ) # all weights that haven't defined a device should be loaded on CPU device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])]) tensors = {} if is_tqdm_available(): progress_bar = tqdm( main_process_only=False, total=sum([len(device_weights[device]) for device in devices]), unit="w", smoothing=0, leave=False, ) else: progress_bar = None for device in devices: target_device = device if isinstance(device, int): if is_xpu_available(): target_device = f"xpu:{device}" elif is_npu_available(): target_device = f"npu:{device}" with safe_open(checkpoint_file, framework="pt", device=target_device) as f: for key in device_weights[device]: if progress_bar is not None: progress_bar.set_postfix(dev=device, refresh=False) progress_bar.set_description(key) tensors[key] = f.get_tensor(key) if progress_bar is not None: progress_bar.update() if progress_bar is not None: progress_bar.close() return tensors else: return torch.load(checkpoint_file, map_location=torch.device("cpu")) def get_state_dict_offloaded_model(model: nn.Module): """ Returns the state dictionary for an offloaded model via iterative onloading Args: model (`torch.nn.Module`): The offloaded model we want to save """ state_dict = {} placeholders = set() for name, module in model.named_modules(): if name == "": continue try: with align_module_device(module, "cpu"): module_state_dict = module.state_dict() except MemoryError: raise MemoryError("Offloaded module must fit in CPU memory to call save_model!") from None for key in module_state_dict: # ignore placeholder parameters that are still on the meta device if module_state_dict[key].device == torch.device("meta"): placeholders.add(name + f".{key}") continue params = module_state_dict[key] state_dict[name + f".{key}"] = params.to("cpu") # move buffers to cpu for key in placeholders.copy(): if key in state_dict: placeholders.remove(key) if placeholders: logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}") return state_dict def get_state_dict_from_offload( module: nn.Module, module_name: str, state_dict: Dict[str, Union[str, torch.tensor]], device_to_put_offload: Union[int, str, torch.device] = "cpu", ): """ Retrieve the state dictionary (with parameters) from an offloaded module and load into a specified device (defaults to cpu). Args: module: (`torch.nn.Module`): The module we want to retrieve a state dictionary from module_name: (`str`): The name of the module of interest state_dict (`Dict[str, Union[int, str, torch.device]]`): Dictionary of {module names: parameters} device_to_put_offload (`Union[int, str, torch.device]`): Device to load offloaded parameters into, defaults to the cpu. """ root = module_name[: module_name.rfind(".")] # module name without .weight or .bias # do not move parameters if the module is not offloaded if not has_offloaded_params(module): device_to_put_offload = None # assign the device to which the offloaded parameters will be sent with align_module_device(module, device_to_put_offload): for m_key, params in module.state_dict().items(): if (root + f".{m_key}") in state_dict: state_dict[root + f".{m_key}"] = params return state_dict def load_checkpoint_in_model( model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, dtype: Optional[Union[str, torch.dtype]] = None, offload_state_dict: bool = False, offload_buffers: bool = False, keep_in_fp32_modules: List[str] = None, offload_8bit_bnb: bool = False, strict: bool = False, ): """ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded. <Tip warning={true}> Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`]. </Tip> Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the buffers in the weights offloaded to disk. keep_in_fp32_modules(`List[str]`, *optional*): A list of the modules that we keep in `torch.float32` dtype. offload_8bit_bnb (`bool`, *optional*): Whether or not to enable offload of 8-bit modules on cpu/disk. strict (`bool`, *optional*, defaults to `False`): Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's state_dict. """ if offload_8bit_bnb: from .bnb import quantize_and_offload_8bit tied_params = find_tied_parameters(model) if check_tied_parameters_in_config(model) and len(tied_params) == 0: logger.warn( "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." ) if device_map is not None: check_tied_parameters_on_same_device(tied_params, device_map) if offload_folder is None and device_map is not None and "disk" in device_map.values(): raise ValueError( "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`." ) elif offload_folder is not None and device_map is not None and "disk" in device_map.values(): os.makedirs(offload_folder, exist_ok=True) if isinstance(dtype, str): # We accept "torch.float16" or just "float16" dtype = dtype.replace("torch.", "") dtype = getattr(torch, dtype) checkpoint_files = None index_filename = None if os.path.isfile(checkpoint): if str(checkpoint).endswith(".json"): index_filename = checkpoint else: checkpoint_files = [checkpoint] elif os.path.isdir(checkpoint): # check if the whole state dict is present potential_state_bin = [f for f in os.listdir(checkpoint) if f == WEIGHTS_NAME] potential_state_safetensor = [f for f in os.listdir(checkpoint) if f == SAFE_WEIGHTS_NAME] if len(potential_state_bin) == 1: checkpoint_files = [os.path.join(checkpoint, potential_state_bin[0])] elif len(potential_state_safetensor) == 1: checkpoint_files = [os.path.join(checkpoint, potential_state_safetensor[0])] else: # otherwise check for sharded checkpoints potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")] if len(potential_index) == 0: raise ValueError( f"{checkpoint} is not a folder containing a `.index.json` file or a {WEIGHTS_NAME} or a {SAFE_WEIGHTS_NAME} file" ) elif len(potential_index) == 1: index_filename = os.path.join(checkpoint, potential_index[0]) else: raise ValueError( f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones." ) else: raise ValueError( "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded " f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}." ) if index_filename is not None: checkpoint_folder = os.path.split(index_filename)[0] with open(index_filename) as f: index = json.loads(f.read()) if "weight_map" in index: index = index["weight_map"] checkpoint_files = sorted(list(set(index.values()))) checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files] # Logic for missing/unexepected keys goes here. offload_index = {} if offload_state_dict: state_dict_folder = tempfile.mkdtemp() state_dict_index = {} unexpected_keys = set() model_keys = set(model.state_dict().keys()) buffer_names = [name for name, _ in model.named_buffers()] for checkpoint_file in checkpoint_files: loaded_checkpoint = load_state_dict(checkpoint_file, device_map=device_map) if device_map is None: model.load_state_dict(loaded_checkpoint, strict=strict) unexpected_keys.update(set(loaded_checkpoint.keys()) - model_keys) else: for param_name, param in loaded_checkpoint.items(): # skip SCB parameter (for 8-bit serialization) if "SCB" in param_name: continue if param_name not in model_keys: unexpected_keys.add(param_name) if not strict: continue # Skip loading this parameter. module_name = param_name while len(module_name) > 0 and module_name not in device_map: module_name = ".".join(module_name.split(".")[:-1]) if module_name == "" and "" not in device_map: # TODO: group all errors and raise at the end. raise ValueError(f"{param_name} doesn't have any device set.") param_device = device_map[module_name] new_dtype = dtype if dtype is not None and torch.is_floating_point(param): if keep_in_fp32_modules is not None and dtype == torch.float16: proceed = False for key in keep_in_fp32_modules: if ((key in param_name) and (key + "." in param_name)) or key == param_name: proceed = True break if proceed: new_dtype = torch.float32 if "weight" in param_name and param_name.replace("weight", "SCB") in loaded_checkpoint.keys(): if param.dtype == torch.int8: fp16_statistics = loaded_checkpoint[param_name.replace("weight", "SCB")] else: fp16_statistics = None if param_device == "disk": if offload_buffers or param_name not in buffer_names: if new_dtype is None: new_dtype = param.dtype if offload_8bit_bnb: quantize_and_offload_8bit( model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics ) continue else: set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) offload_weight(param, param_name, offload_folder, index=offload_index) elif param_device == "cpu" and offload_state_dict: if new_dtype is None: new_dtype = param.dtype if offload_8bit_bnb: quantize_and_offload_8bit( model, param, param_name, new_dtype, state_dict_folder, state_dict_index, fp16_statistics ) else: set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) offload_weight(param, param_name, state_dict_folder, index=state_dict_index) else: set_module_tensor_to_device( model, param_name, param_device, value=param, dtype=new_dtype, fp16_statistics=fp16_statistics, ) # Force Python to clean up. del loaded_checkpoint gc.collect() if not strict and len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {checkpoint} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}. This may or may not be an issue - make sure that the checkpoint does not have unnecessary parameters, or that the model definition correctly corresponds to the checkpoint." ) save_offload_index(offload_index, offload_folder) # Load back offloaded state dict on CPU if offload_state_dict: load_offloaded_weights(model, state_dict_index, state_dict_folder) shutil.rmtree(state_dict_folder) retie_parameters(model, tied_params) def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None): """ Return a context manager for autocasting mixed precision Args: native_amp (`bool`, *optional*, defaults to False): Whether mixed precision is actually enabled. cache_enabled (`bool`, *optional*, defaults to True): Whether the weight cache inside autocast should be enabled. """ state = AcceleratorState() if autocast_kwargs is None: autocast_kwargs = {} else: autocast_kwargs = autocast_kwargs.to_kwargs() if native_amp: device_type = ( "cuda" if (state.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_gpu=True)) else state.device.type ) if state.mixed_precision == "fp16": return torch.autocast(device_type=device_type, dtype=torch.float16, **autocast_kwargs) elif state.mixed_precision in ["bf16", "fp8"] and state.distributed_type in [ DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.FSDP, DistributedType.XLA, ]: return torch.autocast(device_type=device_type, dtype=torch.bfloat16, **autocast_kwargs) else: return torch.autocast(device_type=device_type, **autocast_kwargs) else: return contextlib.nullcontext() def get_grad_scaler(distributed_type: DistributedType = None, **kwargs): """ A generic helper which will initialize the correct `GradScaler` implementation based on the environment and return it. Args: distributed_type (`DistributedType`, *optional*, defaults to None): The type of distributed environment. kwargs: Additional arguments for the utilized `GradScaler` constructor. """ if distributed_type == DistributedType.FSDP: from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler return ShardedGradScaler(**kwargs) if is_torch_xla_available(check_is_gpu=True): import torch_xla.amp as xamp return xamp.GradScaler(**kwargs) elif is_mlu_available(): return torch.mlu.amp.GradScaler(**kwargs) elif is_musa_available(): return torch.musa.amp.GradScaler(**kwargs) elif is_npu_available(): return torch.npu.amp.GradScaler(**kwargs) elif is_xpu_available(): return torch.amp.GradScaler("xpu", **kwargs) else: if is_torch_version(">=", "2.3"): return torch.amp.GradScaler("cuda", **kwargs) else: return torch.cuda.amp.GradScaler(**kwargs) def has_offloaded_params(module: torch.nn.Module) -> bool: """ Checks if a module has offloaded parameters by checking if the given module has a AlignDevicesHook attached with offloading enabled Args: module (`torch.nn.Module`): The module to check for an offload hook. Returns: bool: `True` if the module has an offload hook and offloading is enabled, `False` otherwise. """ from ..hooks import AlignDevicesHook # avoid circular import return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload @contextlib.contextmanager def align_module_device(module: torch.nn.Module, execution_device: Optional[torch.device] = None): """ Context manager that moves a module's parameters to the specified execution device. Args: module (`torch.nn.Module`): Module with parameters to align. execution_device (`torch.device`, *optional*): If provided, overrides the module's execution device within the context. Otherwise, use hook execution device or pass """ if has_offloaded_params(module): if execution_device is not None: original_device = module._hf_hook.execution_device module._hf_hook.execution_device = execution_device try: module._hf_hook.pre_forward(module) yield finally: module._hf_hook.post_forward(module, None) if execution_device is not None: module._hf_hook.execution_device = original_device elif execution_device is not None: devices = {name: param.device for name, param in module.named_parameters(recurse=False)} try: for name in devices: set_module_tensor_to_device(module, name, execution_device) yield finally: for name, device in devices.items(): set_module_tensor_to_device(module, name, device) else: yield
accelerate/src/accelerate/utils/modeling.py/0
{ "file_path": "accelerate/src/accelerate/utils/modeling.py", "repo_id": "accelerate", "token_count": 38832 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, get_launch_command, path_in_accelerate_package, require_multi_device, require_non_cpu, require_non_torch_xla, slow, ) from accelerate.utils import patch_environment from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.fsdp_utils import disable_fsdp_ram_efficient_loading, enable_fsdp_ram_efficient_loading set_seed(42) BERT_BASE_CASED = "bert-base-cased" LLAMA_TESTING = "hf-internal-testing/tiny-random-LlamaForCausalLM" FP16 = "fp16" BF16 = "bf16" dtypes = [FP16, BF16] @require_non_cpu @require_non_torch_xla class FSDPPluginIntegration(AccelerateTestCase): def setUp(self): super().setUp() self.dist_env = dict( MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1", ) self.fsdp_env = dict(ACCELERATE_USE_FSDP="true", **self.dist_env) def test_sharding_strategy(self): from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy # check that giving enums works fine for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): env = self.fsdp_env.copy() env["FSDP_SHARDING_STRATEGY"] = f"{i + 1}" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) fsdp_plugin = FullyShardedDataParallelPlugin(sharding_strategy=ShardingStrategy(i + 1)) assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) # check that giving names works fine for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): env = self.fsdp_env.copy() env["FSDP_SHARDING_STRATEGY"] = strategy with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) fsdp_plugin = FullyShardedDataParallelPlugin(sharding_strategy=strategy) assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) def test_backward_prefetch(self): from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH): expected_value = None if prefetch_policy == "NO_PREFETCH" else BackwardPrefetch(i + 1) env = self.fsdp_env.copy() env["FSDP_BACKWARD_PREFETCH"] = prefetch_policy with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert ( fsdp_plugin.backward_prefetch == expected_value ), f"Actual: {fsdp_plugin.backward_prefetch} != Expected: {expected_value}" # Check if torch enum works if prefetch_policy != "NO_PREFETCH": fsdp_plugin = FullyShardedDataParallelPlugin(backward_prefetch=BackwardPrefetch(i + 1)) assert fsdp_plugin.backward_prefetch == expected_value # Check if name works fsdp_plugin = FullyShardedDataParallelPlugin(backward_prefetch=prefetch_policy) assert fsdp_plugin.backward_prefetch == expected_value def test_state_dict_type(self): from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE): env = self.fsdp_env.copy() env["FSDP_STATE_DICT_TYPE"] = state_dict_type with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.state_dict_type == StateDictType(i + 1) if state_dict_type == "FULL_STATE_DICT": assert fsdp_plugin.state_dict_config.offload_to_cpu assert fsdp_plugin.state_dict_config.rank0_only fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_type=StateDictType(i + 1)) assert fsdp_plugin.state_dict_type == StateDictType(i + 1) if state_dict_type == "FULL_STATE_DICT": assert fsdp_plugin.state_dict_config.offload_to_cpu assert fsdp_plugin.state_dict_config.rank0_only # We can also override the state_dict_type, # typical case: user trains with sharded, but final save is with full fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_type="FULL_STATE_DICT") fsdp_plugin.set_state_dict_type("SHARDED_STATE_DICT") assert fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT def test_auto_wrap_policy(self): for model_name in [LLAMA_TESTING, BERT_BASE_CASED]: model = AutoModel.from_pretrained(model_name) layer_to_wrap = "LlamaDecoderLayer" if model_name == LLAMA_TESTING else "BertLayer" for policy in FSDP_AUTO_WRAP_POLICY: env = self.fsdp_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = policy transformer_cls_to_wrap = None min_num_params = None env.pop("FSDP_TRANSFORMER_CLS_TO_WRAP", None) env.pop("FSDP_MIN_NUM_PARAMS", None) if policy == "TRANSFORMER_BASED_WRAP": env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = layer_to_wrap transformer_cls_to_wrap = layer_to_wrap elif policy == "SIZE_BASED_WRAP": env["FSDP_MIN_NUM_PARAMS"] = "2000" min_num_params = 2000 # First test via env with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(model) if policy == "NO_WRAP": assert fsdp_plugin.auto_wrap_policy is None else: assert isinstance(fsdp_plugin.auto_wrap_policy, functools.partial) # Then manually set the policy fsdp_plugin = FullyShardedDataParallelPlugin( auto_wrap_policy=policy, transformer_cls_names_to_wrap=transformer_cls_to_wrap, min_num_params=min_num_params, ) fsdp_plugin.set_auto_wrap_policy(model) if policy == "NO_WRAP": assert fsdp_plugin.auto_wrap_policy is None else: assert isinstance(fsdp_plugin.auto_wrap_policy, functools.partial) env = self.fsdp_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = "TRANSFORMER_BASED_WRAP" env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "T5Layer" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() with self.assertRaises(Exception) as cm: fsdp_plugin.set_auto_wrap_policy(model) assert "Could not find the transformer layer class T5Layer in the model." in str(cm.exception) fsdp_plugin = FullyShardedDataParallelPlugin( auto_wrap_policy="TRANSFORMER_BASED_WRAP", transformer_cls_names_to_wrap="T5Layer", ) with self.assertRaises(Exception) as cm: fsdp_plugin.set_auto_wrap_policy(model) assert "Could not find the transformer layer class T5Layer in the model." in str(cm.exception) env = self.fsdp_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = "SIZE_BASED_WRAP" env["FSDP_MIN_NUM_PARAMS"] = "0" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(model) assert fsdp_plugin.auto_wrap_policy is None fsdp_plugin = FullyShardedDataParallelPlugin( auto_wrap_policy="SIZE_BASED_WRAP", min_num_params=0, ) fsdp_plugin.set_auto_wrap_policy(model) assert fsdp_plugin.auto_wrap_policy is None def test_mixed_precision(self): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: env = self.fsdp_env.copy() env["ACCELERATE_MIXED_PRECISION"] = mp_dtype with mockenv_context(**env): accelerator = Accelerator() if mp_dtype == "fp16": dtype = torch.float16 elif mp_dtype == "bf16": dtype = torch.bfloat16 mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) assert accelerator.state.fsdp_plugin.mixed_precision_policy == mp_policy if mp_dtype == FP16: assert isinstance(accelerator.scaler, ShardedGradScaler) elif mp_dtype == BF16: assert accelerator.scaler is None AcceleratorState._reset_state(True) plugin = FullyShardedDataParallelPlugin( mixed_precision_policy={"param_dtype": dtype, "reduce_dtype": dtype, "buffer_dtype": dtype} ) assert plugin.mixed_precision_policy == mp_policy with mockenv_context(**self.dist_env): accelerator = Accelerator(fsdp_plugin=plugin) assert accelerator.state.fsdp_plugin.mixed_precision_policy == mp_policy AcceleratorState._reset_state(True) def test_mixed_precision_buffer_autocast_override(self): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: if mp_dtype == "fp16": dtype = torch.float16 elif mp_dtype == "bf16": dtype = torch.bfloat16 mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=torch.float32) env = self.fsdp_env.copy() env["ACCELERATE_MIXED_PRECISION"] = mp_dtype with mockenv_context(**env): accelerator = Accelerator() accelerator.state.fsdp_plugin.set_mixed_precision(dtype, buffer_autocast=True, override=True) assert accelerator.state.fsdp_plugin.mixed_precision_policy == mp_policy if mp_dtype == FP16: assert isinstance(accelerator.scaler, ShardedGradScaler) elif mp_dtype == BF16: assert accelerator.scaler is None AcceleratorState._reset_state(True) def test_cpu_offload(self): from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: env = self.fsdp_env.copy() env["FSDP_OFFLOAD_PARAMS"] = str(flag).lower() with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.cpu_offload == CPUOffload(offload_params=flag) fsdp_plugin = FullyShardedDataParallelPlugin(cpu_offload=flag) assert fsdp_plugin.cpu_offload == CPUOffload(offload_params=flag) def test_cpu_ram_efficient_loading(self): enable_fsdp_ram_efficient_loading() fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.cpu_ram_efficient_loading is True assert os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING") == "True" disable_fsdp_ram_efficient_loading() fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.cpu_ram_efficient_loading is False assert os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING") == "False" # Skip this test when TorchXLA is available because accelerate.launch does not support TorchXLA FSDP. @require_non_torch_xla @require_multi_device @slow class FSDPIntegrationTest(TempDirTestCase): test_scripts_folder = path_in_accelerate_package("test_utils", "scripts", "external_deps") def setUp(self): super().setUp() self.performance_lower_bound = 0.82 self.performance_configs = [ "fsdp_shard_grad_op_transformer_based_wrap", "fsdp_full_shard_transformer_based_wrap", ] self.peak_memory_usage_upper_bound = { "multi_gpu_fp16": 3200, "fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000, "fsdp_full_shard_transformer_based_wrap_fp16": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } self.n_train = 160 self.n_val = 160 def test_performance(self): self.test_file_path = self.test_scripts_folder / "test_performance.py" cmd = get_launch_command(num_processes=2, num_machines=1, machine_rank=0, use_fsdp=True) for config in self.performance_configs: cmd_config = cmd.copy() for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): if strategy.lower() in config: cmd_config.append(f"--fsdp_sharding_strategy={strategy}") break if "fp32" in config: cmd_config.append("--mixed_precision=no") else: cmd_config.append("--mixed_precision=fp16") if "cpu_offload" in config: cmd_config.append("--fsdp_offload_params=True") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}") break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) def test_checkpointing(self): self.test_file_path = self.test_scripts_folder / "test_checkpointing.py" cmd = get_launch_command( num_processes=2, num_machines=1, machine_rank=0, use_fsdp=True, mixed_precision="fp16", fsdp_transformer_layer_cls_to_wrap="BertLayer", ) for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): cmd_config = cmd.copy() cmd_config.append(f"--fsdp_sharding_strategy={strategy}") if strategy != "FULL_SHARD": continue state_dict_config_index = len(cmd_config) for state_dict_type in FSDP_STATE_DICT_TYPE: # Todo: Currently failing for `LOCAL_STATE_DICT` with error # Unexpected key(s) in state_dict: "_fsdp_wrapped_module._flat_param". if state_dict_type == "LOCAL_STATE_DICT": continue cmd_config = cmd_config[:state_dict_config_index] cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", "--partial_train_epoch=1", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) cmd_config = cmd_config[:-1] resume_from_checkpoint = os.path.join(self.tmpdir, "epoch_0") cmd_config.extend( [ f"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) def test_peak_memory_usage(self): self.test_file_path = self.test_scripts_folder / "test_peak_memory_usage.py" cmd = get_launch_command(num_processes=2, num_machines=1, machine_rank=0) for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): cmd_config = cmd.copy() if "fp16" in spec: cmd_config.extend(["--mixed_precision=fp16"]) else: cmd_config.extend(["--mixed_precision=no"]) if "multi_gpu" in spec: continue else: cmd_config.extend(["--use_fsdp"]) for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): if strategy.lower() in spec: cmd_config.append(f"--fsdp_sharding_strategy={strategy}") break if "cpu_offload" in spec: cmd_config.append("--fsdp_offload_params=True") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}") break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--peak_memory_upper_bound={peak_mem_upper_bound}", f"--n_train={self.n_train}", f"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config)
accelerate/tests/fsdp/test_fsdp.py/0
{ "file_path": "accelerate/tests/fsdp/test_fsdp.py", "repo_id": "accelerate", "token_count": 9529 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import torch import torch.nn as nn from torch.fx import symbolic_trace from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_device, torch_device torch_device = f"{torch_device}:0" if torch_device != "cpu" else "cpu" class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class PreForwardHook(ModelHook): def pre_forward(self, module, *args, **kwargs): return (args[0] + 1,) + args[1:], kwargs class PostForwardHook(ModelHook): def post_forward(self, module, output): return output + 1 class HooksModelTester(unittest.TestCase): def test_add_and_remove_hooks(self): test_model = ModelForTest() test_hook = ModelHook() add_hook_to_module(test_model, test_hook) assert test_model._hf_hook == test_hook assert hasattr(test_model, "_old_forward") # Check adding the hook did not change the name or the signature assert test_model.forward.__name__ == "forward" assert list(inspect.signature(test_model.forward).parameters) == ["x"] remove_hook_from_module(test_model) assert not hasattr(test_model, "_hf_hook") assert not hasattr(test_model, "_old_forward") def test_append_and_remove_hooks(self): test_model = ModelForTest() test_hook = ModelHook() add_hook_to_module(test_model, test_hook) add_hook_to_module(test_model, test_hook, append=True) assert isinstance(test_model._hf_hook, SequentialHook) is True assert len(test_model._hf_hook.hooks) == 2 assert hasattr(test_model, "_old_forward") # Check adding the hook did not change the name or the signature assert test_model.forward.__name__ == "forward" assert list(inspect.signature(test_model.forward).parameters) == ["x"] remove_hook_from_module(test_model) assert not hasattr(test_model, "_hf_hook") assert not hasattr(test_model, "_old_forward") def test_pre_forward_hook_is_executed(self): test_model = ModelForTest() x = torch.randn(2, 3) expected = test_model(x + 1) expected2 = test_model(x + 2) test_hook = PreForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, expected, atol=1e-5) # Attaching a hook to a model when it already has one replaces, does not chain test_hook = PreForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, expected, atol=1e-5) # You need to use the sequential hook to chain two or more hooks test_hook = SequentialHook(PreForwardHook(), PreForwardHook()) add_hook_to_module(test_model, test_hook) output2 = test_model(x) assert torch.allclose(output2, expected2, atol=1e-5) def test_post_forward_hook_is_executed(self): test_model = ModelForTest() x = torch.randn(2, 3) output = test_model(x) test_hook = PostForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, (output + 1), atol=1e-5) # Attaching a hook to a model when it already has one replaces, does not chain test_hook = PostForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, (output + 1), atol=1e-5) # You need to use the sequential hook to chain two or more hooks test_hook = SequentialHook(PostForwardHook(), PostForwardHook()) add_hook_to_module(test_model, test_hook) output2 = test_model(x) assert torch.allclose(output2, output + 2, atol=1e-5) def test_no_grad_in_hook(self): test_model = ModelForTest() x = torch.randn(2, 3) output = test_model(x) test_hook = PostForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, (output + 1)) assert output1.requires_grad test_hook.no_grad = True output1 = test_model(x) assert not output1.requires_grad @require_multi_device def test_align_devices_as_model_parallelism(self): model = ModelForTest() # Everything is on CPU assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # This will move each submodule on different devices add_hook_to_module(model.linear1, AlignDevicesHook(execution_device=0)) add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0)) add_hook_to_module(model.linear2, AlignDevicesHook(execution_device=1)) assert model.linear1.weight.device == torch.device(torch_device) assert model.batchnorm.weight.device == torch.device(torch_device) assert model.batchnorm.running_mean.device == torch.device(torch_device) assert model.linear2.weight.device == torch.device(torch_device.replace(":0", ":1")) # We can still make a forward pass. The input does not need to be on any particular device x = torch.randn(2, 3) output = model(x) assert output.device == torch.device(torch_device.replace(":0", ":1")) # We can add a general hook to put back output on same device as input. add_hook_to_module(model, AlignDevicesHook(io_same_device=True)) x = torch.randn(2, 3).to(torch_device) output = model(x) assert output.device == torch.device(torch_device) def test_align_devices_as_cpu_offload(self): model = ModelForTest() # Everything is on CPU assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # This will move each submodule on different devices hook_kwargs = {"execution_device": torch_device, "offload": True} add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs)) add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs)) add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs)) # Parameters have been offloaded, so on the meta device assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") # Buffers are not included in the offload by default, so are on the execution device device = torch.device(hook_kwargs["execution_device"]) assert model.batchnorm.running_mean.device == device x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_module(model.linear1) remove_hook_from_module(model.batchnorm) remove_hook_from_module(model.linear2) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Now test with buffers included in the offload hook_kwargs = { "execution_device": torch_device, "offload": True, "offload_buffers": True, } add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs)) add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs)) add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs)) # Parameters have been offloaded, so on the meta device, buffers included assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") assert model.batchnorm.running_mean.device == torch.device("meta") x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_module(model.linear1) remove_hook_from_module(model.batchnorm) remove_hook_from_module(model.linear2) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") def test_attach_align_device_hook_as_cpu_offload(self): model = ModelForTest() # Everything is on CPU assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # This will move each submodule on different devices execution_device = torch_device attach_align_device_hook(model, execution_device=execution_device, offload=True) # Parameters have been offloaded, so on the meta device assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") # Buffers are not included in the offload by default, so are on the execution device device = torch.device(execution_device) assert model.batchnorm.running_mean.device == device x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_submodules(model) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Now test with buffers included in the offload attach_align_device_hook(model, execution_device=execution_device, offload=True, offload_buffers=True) # Parameters have been offloaded, so on the meta device, buffers included assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") assert model.batchnorm.running_mean.device == torch.device("meta") x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_submodules(model) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") def test_attach_align_device_hook_as_cpu_offload_with_weight_map(self): model = ModelForTest() # Everything is on CPU assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # This will move each submodule on different devices execution_device = torch_device attach_align_device_hook( model, execution_device=execution_device, offload=True, weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") # Buffers are not included in the offload by default, so are on the execution device device = torch.device(execution_device) assert model.batchnorm.running_mean.device == device x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_submodules(model) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Now test with buffers included in the offload attach_align_device_hook( model, execution_device=execution_device, offload=True, weights_map=model.state_dict(), offload_buffers=True, ) # Parameters have been offloaded, so on the meta device, buffers included assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") assert model.batchnorm.running_mean.device == torch.device("meta") x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_submodules(model) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") def test_add_remove_hook_fx_graph_module(self): with torch.no_grad(): test_model = ModelForTest() test_hook = ModelHook() x = torch.randn(2, 3) output1 = test_model(x) graph_model = symbolic_trace(test_model) output2 = graph_model(x) assert torch.allclose(output1, output2) add_hook_to_module(graph_model, test_hook) remove_hook_from_module(graph_model, recurse=True) # We want to make sure that `add_hook_to_module` and `remove_hook_from_module` yields back an fx.GraphModule # that behaves correctly (for example that is not frozen, see https://github.com/huggingface/accelerate/pull/2369). # For that, we add a sigmoid node to the FX graph and make sure that the new output (output3 below) is different than # the original model's output. linear2_node = None for node in graph_model.graph.nodes: if node.name == "linear2": linear2_node = node assert linear2_node is not None graph_model.graph.inserting_after(linear2_node) new_node = graph_model.graph.create_node( op="call_function", target=torch.sigmoid, args=(linear2_node,), name="relu" ) output_node = None for node in graph_model.graph.nodes: if node.name == "output": output_node = node assert output_node is not None output_node.replace_input_with(linear2_node, new_node) graph_model.graph.lint() graph_model.recompile() output3 = graph_model(x) # Now the output is expected to be different since we modified the graph. assert not torch.allclose(output1, output3)
accelerate/tests/test_hooks.py/0
{ "file_path": "accelerate/tests/test_hooks.py", "repo_id": "accelerate", "token_count": 6577 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import os import random import shutil import tempfile import unittest import uuid from contextlib import contextmanager import pytest import torch from parameterized import parameterized_class from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import ( DEFAULT_LAUNCH_COMMAND, execute_subprocess_async, require_non_cpu, require_non_torch_xla, ) from accelerate.utils import DistributedType, ProjectConfiguration, set_seed logger = logging.getLogger(__name__) def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2): "Generates a tuple of dummy DataLoaders to test with" def get_dataset(n_batches): x = torch.randn(batch_size * n_batches, 1) return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1)) train_dataset = get_dataset(n_train_batches) valid_dataset = get_dataset(n_valid_batches) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4) return (train_dataloader, valid_dataloader) def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None): "Trains for `num_epochs`" rands = [] for epoch in range(num_epochs): # Train quickly model.train() for batch in dataloader: x, y = batch outputs = model(x) loss = torch.nn.functional.mse_loss(outputs, y) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() rands.append(random.random()) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class DummyModel(nn.Module): "Simple model to do y=mx+b" def __init__(self): super().__init__() self.a = nn.Parameter(torch.randn(1)) self.b = nn.Parameter(torch.randn(1)) def forward(self, x): return x * self.a + self.b def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = "use_safetensors" if param["use_safetensors"] is True else "use_pytorch" return f"{func.__name__}_{param_based_name}" @parameterized_class(("use_safetensors",), [[True], [False]], class_name_func=parameterized_custom_name_func) class CheckpointTest(unittest.TestCase): def check_adam_state(self, state1, state2, distributed_type): # For DistributedType.XLA, the `accelerator.save_state` function calls `xm._maybe_convert_to_cpu` before saving. # As a result, all tuple values are converted to lists. Therefore, we need to convert them back here. # Remove this code once Torch XLA fixes this issue. if distributed_type == DistributedType.XLA: state1["param_groups"][0]["betas"] = tuple(state1["param_groups"][0]["betas"]) state2["param_groups"][0]["betas"] = tuple(state2["param_groups"][0]["betas"]) assert state1 == state2 def test_with_save_limit(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) # Save second state accelerator.save_state(safe_serialization=self.use_safetensors) assert len(os.listdir(accelerator.project_dir)) == 1 def test_can_resume_training_with_folder(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() # Train baseline accelerator = Accelerator() model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial initial = os.path.join(tmpdir, "initial") accelerator.save_state(initial, safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() accelerator = Accelerator() model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state(initial) (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() self.assertEqual(a, a2) self.assertEqual(b, b2) assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything checkpoint = os.path.join(tmpdir, "checkpoint") accelerator.save_state(checkpoint, safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(checkpoint) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_can_resume_training(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything accelerator.save_state(safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_can_resume_training_checkpoints_relative_path(self): # See #1983 # This test is like test_can_resume_training but uses a relative path for the checkpoint and automatically # infers the checkpoint path when loading. @contextmanager def temporary_relative_directory(): # This is equivalent to tempfile.TemporaryDirectory() except that it returns a relative path rand_dir = f"test_path_{uuid.uuid4()}" os.mkdir(rand_dir) try: yield rand_dir finally: shutil.rmtree(rand_dir) with temporary_relative_directory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state() # <= infer the directory automatically (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) assert opt_state == opt_state2 test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything accelerator.save_state(safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_invalid_registration(self): t = torch.tensor([1, 2, 3]) t1 = torch.tensor([2, 3, 4]) net = DummyModel() opt = torch.optim.Adam(net.parameters()) accelerator = Accelerator() with self.assertRaises(ValueError) as ve: accelerator.register_for_checkpointing(t, t1, net, opt) message = str(ve.exception) assert "Item at index 0" in message assert "Item at index 1" in message assert "Item at index 2" not in message assert "Item at index 3" not in message def test_with_scheduler(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) scheduler_state = scheduler.state_dict() train(3, model, train_dataloader, optimizer, accelerator, scheduler) assert scheduler_state != scheduler.state_dict() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) assert scheduler_state == scheduler.state_dict() def test_automatic_loading(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) train(2, model, train_dataloader, optimizer, accelerator, scheduler) (a2, b2) = model.a.item(), model.b.item() # Save a first time accelerator.save_state(safe_serialization=self.use_safetensors) train(1, model, train_dataloader, optimizer, accelerator, scheduler) (a3, b3) = model.a.item(), model.b.item() # Load back in the last saved checkpoint, should point to a2, b2 accelerator.load_state() assert a3 != model.a.item() assert b3 != model.b.item() assert a2 == model.a.item() assert b2 == model.b.item() def test_checkpoint_deletion(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() project_config = ProjectConfiguration(automatic_checkpoint_naming=True, total_limit=2) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model = accelerator.prepare(model) # Save 3 states: for _ in range(11): accelerator.save_state(safe_serialization=self.use_safetensors) assert not os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) assert os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_9")) assert os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_10")) @require_non_cpu @require_non_torch_xla def test_map_location(self): cmd = DEFAULT_LAUNCH_COMMAND + [inspect.getfile(self.__class__)] execute_subprocess_async( cmd, env={ **os.environ, "USE_SAFETENSORS": str(self.use_safetensors), "OMP_NUM_THREADS": "1", }, ) if __name__ == "__main__": use_safetensors = os.environ.get("USE_SAFETENSORS", "False") == "True" savedir = "/tmp/accelerate/state_checkpointing" model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) model, optimizer = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: param_device = group["params"][0].device break assert param_device.type == accelerator.device.type model = model.cpu() accelerator.wait_for_everyone() accelerator.save_state(safe_serialization=use_safetensors) accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: param_device = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: param_device = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
accelerate/tests/test_state_checkpointing.py/0
{ "file_path": "accelerate/tests/test_state_checkpointing.py", "repo_id": "accelerate", "token_count": 8972 }
# Changelog This documents the main changes to the `candle` crate. ## v0.3.1 - Unreleased ### Added ### Modified ## v0.3.0 - 2023-10-01 ### Added - Added the Mistral 7b v0.1 model [983](https://github.com/huggingface/candle/pull/983). - Quantized version of the Mistral model [1009](https://github.com/huggingface/candle/pull/1009). - Add the gelu-erf op and activation function [969](https://github.com/huggingface/candle/pull/969). - Add the mixformer/phi-v1.5 model [930](https://github.com/huggingface/candle/pull/930). - Add the sclice-scatter op [927](https://github.com/huggingface/candle/pull/927). - Add the Wuerstchen diffusion model [911](https://github.com/huggingface/candle/pull/911). ### Modified - Support for simd128 intrinsics in some quantized vecdots [982](https://github.com/huggingface/candle/pull/982). - Optimize the index-select cuda kernel [976](https://github.com/huggingface/candle/pull/976). - Self-contained safetensor wrappers [946](https://github.com/huggingface/candle/pull/946). ## v0.2.2 - 2023-09-18 ### Added - Support for `top_p` sampling [819](https://github.com/huggingface/candle/pull/819). - T5 model including decoding [864](https://github.com/huggingface/candle/pull/864). - 1-d upsampling [839](https://github.com/huggingface/candle/pull/839). ### Modified - Bugfix for conv2d [820](https://github.com/huggingface/candle/pull/820). - Support tensor based indexing using `.i` [842](https://github.com/huggingface/candle/pull/842). ## v0.2.1 - 2023-09-11 ### Added - Add some RNNs (GRU and LSTM) in `candle-nn` [674](https://github.com/huggingface/candle/pull/674), [688](https://github.com/huggingface/candle/pull/688). - gguf v2 support [725](https://github.com/huggingface/candle/pull/725). - Quantized llama example in Python using the pyo3 api [716](https://github.com/huggingface/candle/pull/716). - `candle-nn` layer for conv2d-transposed [760](https://github.com/huggingface/candle/pull/760). - Add the Segment-Anything Model (SAM) as an example [773](https://github.com/huggingface/candle/pull/773). - TinyViT backbone for the segment anything example [787](https://github.com/huggingface/candle/pull/787). - Shape with holes support [770](https://github.com/huggingface/candle/pull/770). ### Modified - Dilations are now supported in conv-transpose2d. [671](https://github.com/huggingface/candle/pull/671). - Interactive mode for the quantized model [690](https://github.com/huggingface/candle/pull/690). - Faster softmax operation [747](https://github.com/huggingface/candle/pull/747). - Faster convolution operations on CPU and CUDA via im2col [802](https://github.com/huggingface/candle/pull/802). - Moving some models to a more central location [796](https://github.com/huggingface/candle/pull/796). ## v0.2.0 - 2023-08-30 ### Added - Add the powf op [664](https://github.com/huggingface/candle/pull/664). - Stable Diffusion XL support [647](https://github.com/huggingface/candle/pull/647). - Add the conv-transpose2d op [635](https://github.com/huggingface/candle/pull/635). - Refactor the VarBuilder api [627](https://github.com/huggingface/candle/pull/627). - Add some quantization command [625](https://github.com/huggingface/candle/pull/625). - Support more quantized types, e.g. Q2K, Q4K, Q5K... [586](https://github.com/huggingface/candle/pull/586). - Add pose estimation to the yolo example [589](https://github.com/huggingface/candle/pull/589). - Api to write GGUF files [585](https://github.com/huggingface/candle/pull/585). - Support more quantization types [580](https://github.com/huggingface/candle/pull/580). - Add EfficientNet as an example Computer Vision model [572](https://github.com/huggingface/candle/pull/572). - Add a group parameter to convolutions [566](https://github.com/huggingface/candle/pull/566). - New dtype: int64 [563](https://github.com/huggingface/candle/pull/563). - Handling of the GGUF file format. [559](https://github.com/huggingface/candle/pull/559). ## v0.1.2 - 2023-08-21
candle/CHANGELOG.md/0
{ "file_path": "candle/CHANGELOG.md", "repo_id": "candle", "token_count": 1525 }
# MNIST So we now have downloaded the MNIST parquet files, let's put them in a simple struct. ```rust,ignore {{#include ../lib.rs:book_training_3}} ``` The parsing of the file and putting it into single tensors requires the dataset to fit the entire memory. It is quite rudimentary, but simple enough for a small dataset like MNIST.
candle/candle-book/src/training/mnist.md/0
{ "file_path": "candle/candle-book/src/training/mnist.md", "repo_id": "candle", "token_count": 93 }
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run(a: &Tensor, b: &Tensor, c: &Tensor) { a.where_cond(b, c).unwrap(); } const fn create_cond_arr<const N: usize>() -> [u8; N] { let mut arr = [0u8; N]; let mut i = 0; while i < N { arr[i] = (i % 2) as u8; i += 1; } arr } const B: usize = 1; const M: usize = 1024; const K: usize = 1024; const SIZE: usize = B * M * K; const DATA: [u8; SIZE] = create_cond_arr::<SIZE>(); fn run_where_cond_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) { let tensor = Tensor::from_slice(DATA.as_slice(), (B, M, K), device).unwrap(); let on_true = Tensor::ones((B, M, K), dtype, device).unwrap(); let on_false = Tensor::zeros((B, M, K), dtype, device).unwrap(); let elements = B * M * K; // E.g. 2 f32 tensors + 1 u8 tensor let flops = (2 * elements * dtype.size_in_bytes()) + elements; let mut group = c.benchmark_group(device.bench_name(name)); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run( black_box(&tensor), black_box(&on_true), black_box(&on_false), ); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let device = BenchDeviceHandler::new().unwrap(); for d in device.devices { run_where_cond_benchmark(c, &d, DType::F32, "where_cond_f32"); run_where_cond_benchmark(c, &d, DType::BF16, "where_cond_bf16"); run_where_cond_benchmark(c, &d, DType::F16, "where_cond_f16"); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/where_cond.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/where_cond.rs", "repo_id": "candle", "token_count": 939 }
//! Implementation of Backend Fns for CPU use crate::backend::{BackendDevice, BackendStorage}; use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{DType, Error, IntDType, Layout, Result, Shape, WithDType}; use half::{bf16, f16}; use rayon::prelude::*; mod utils; pub use utils::{ binary_map, binary_map_vec, unary_map, unary_map_vec, Map1, Map1Any, Map2, Map2U8, }; const USE_IM2COL_CONV1D: bool = true; const USE_COL2IM_CONV1D_TR: bool = true; const USE_IM2COL_CONV2D: bool = true; // TODO: Maybe we should not implement [Clone] here and instead have an explicit allocator + // intercept the oom errors to avoid panicking and provide a proper error. #[derive(Debug, Clone)] pub enum CpuStorage { U8(Vec<u8>), U32(Vec<u32>), I64(Vec<i64>), BF16(Vec<bf16>), F16(Vec<f16>), F32(Vec<f32>), F64(Vec<f64>), } #[derive(Debug, Clone)] pub enum CpuStorageRef<'a> { U8(&'a [u8]), U32(&'a [u32]), I64(&'a [i64]), BF16(&'a [bf16]), F16(&'a [f16]), F32(&'a [f32]), F64(&'a [f64]), } #[derive(Debug, Clone)] pub struct CpuDevice; struct Cmp(CmpOp); impl Map2U8 for Cmp { const OP: &'static str = "cmp"; #[inline(always)] fn f<T: WithDType>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<u8>> { let dst = match self.0 { CmpOp::Eq => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x == y)), CmpOp::Ne => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x != y)), CmpOp::Lt => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x < y)), CmpOp::Le => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x <= y)), CmpOp::Gt => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x > y)), CmpOp::Ge => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x >= y)), }; Ok(dst) } } struct WCond<'a, T: IntDType>(&'a [T], &'a Layout); impl<I: IntDType> Map2 for WCond<'_, I> { const OP: &'static str = "where"; #[inline(always)] fn f<T: WithDType>(&self, t: &[T], t_l: &Layout, f: &[T], f_l: &Layout) -> Result<Vec<T>> { let vs = match ( self.1.contiguous_offsets(), t_l.contiguous_offsets(), f_l.contiguous_offsets(), ) { (Some((o1, o2)), Some((o_t1, o_t2)), Some((o_f1, o_f2))) => { let pred = &self.0[o1..o2]; let t = &t[o_t1..o_t2]; let f = &f[o_f1..o_f2]; pred.iter() .zip(t.iter().zip(f.iter())) .map(|(p, (&t, &f))| if p.is_true() { t } else { f }) .collect::<Vec<_>>() } _ => self .1 .strided_index() .zip(t_l.strided_index().zip(f_l.strided_index())) .map(|(i_p, (i_t, i_f))| { if self.0[i_p].is_true() { t[i_t] } else { f[i_f] } }) .collect::<Vec<_>>(), }; Ok(vs) } } struct ReduceIndex { reduce_dim_index: usize, use_min: bool, return_index: bool, } impl ReduceIndex { // The value gets replaced if f(s[current_acc], s[i]) returns true. #[inline(always)] fn fold_impl<T, U, F, G>(&self, src: &[T], src_l: &Layout, f: F, g: G) -> Result<Vec<U>> where T: Clone + Copy, U: Clone + Copy, F: Fn(T, T) -> bool, G: Fn(T, usize) -> U, { let reduce_dim_size = src_l.dims()[self.reduce_dim_index]; let reduce_dim_stride = src_l.stride()[self.reduce_dim_index]; let dst_len = src_l.shape().elem_count() / reduce_dim_size; let mut dst: Vec<U> = Vec::with_capacity(dst_len); let dst_to_set = dst.spare_capacity_mut(); let dst_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<U>], &mut [U]>(dst_to_set) }; match src_l.contiguous_offsets() { Some((o1, o2)) => { let src = &src[o1..o2]; if reduce_dim_stride == 1 { for (start_src_i, dst_v) in dst_to_set.iter_mut().enumerate() { let start_src_i = start_src_i * reduce_dim_size; let src = &src[start_src_i..start_src_i + reduce_dim_size]; let mut acc = 0; let mut val = src[0]; for (src_i, &s) in src.iter().enumerate() { if f(val, s) { acc = src_i; val = s } } *dst_v = g(val, acc) } } else { for (start_src_i, dst_v) in dst_to_set.iter_mut().enumerate() { let (p, q) = ( start_src_i / reduce_dim_stride, start_src_i % reduce_dim_stride, ); // start_src_i = p * reduce_dim_stride + q let start_src_i = p * reduce_dim_stride * reduce_dim_size + q; let src = &src[start_src_i..]; let mut acc = 0; let mut val = src[0]; for src_i in 0..reduce_dim_size { let s = src[src_i * reduce_dim_stride]; if f(val, s) { acc = src_i; val = s } } *dst_v = g(val, acc) } } } None => { let l = src_l.narrow(self.reduce_dim_index, 0, 1)?; for (unstr_index, src_index) in l.strided_index().enumerate() { let src = &src[src_index..]; let mut acc = 0; let mut val = src[0]; for src_i in 0..reduce_dim_size { let s = src[src_i * reduce_dim_stride]; if f(val, s) { acc = src_i; val = s } } dst_to_set[unstr_index] = g(val, acc) } } } unsafe { dst.set_len(dst_len) }; Ok(dst) } } impl Map1Any for ReduceIndex { #[inline(always)] fn f<T: WithDType, W: Fn(Vec<T>) -> CpuStorage>( &self, src: &[T], src_l: &Layout, wrap: W, ) -> Result<CpuStorage> { if src_l.shape().elem_count() == 0 { Err(Error::EmptyTensor { op: "reduce" }.bt())? } let dst = match (self.return_index, self.use_min) { (false, true) => wrap(self.fold_impl(src, src_l, |x, y| x > y, |v, _i| v)?), (false, false) => wrap(self.fold_impl(src, src_l, |x, y| x < y, |v, _i| v)?), (true, true) => { CpuStorage::U32(self.fold_impl(src, src_l, |x, y| x > y, |_v, i| i as u32)?) } (true, false) => { CpuStorage::U32(self.fold_impl(src, src_l, |x, y| x < y, |_v, i| i as u32)?) } }; Ok(dst) } } struct ReduceSum<'a> { dst_shape: &'a Shape, reduce_dims: &'a [usize], reduce_dims_and_stride: Vec<(usize, usize)>, } impl ReduceSum<'_> { #[inline(always)] fn fold_impl<T>(&self, src: &[T], src_l: &Layout, start_elt: T) -> Result<Vec<T>> where T: WithDType, { let mut dst = vec![start_elt; self.dst_shape.elem_count()]; match src_l.contiguous_offsets() { Some((o1, o2)) => { let src = &src[o1..o2]; // Handle the case where we reduce over the last dimensions separately as it is // fairly common and easy to optimize. This rely on the layout being contiguous! // reduce_dims is sorted, check if it is ranging from a to n-1. let reduce_over_last_dims = self .reduce_dims .iter() .rev() .enumerate() .all(|(i, &v)| v == src_l.shape().rank() - 1 - i); if reduce_over_last_dims { let reduce_sz = self .reduce_dims_and_stride .iter() .map(|(u, _)| u) .product::<usize>(); for (dst_i, dst_v) in dst.iter_mut().enumerate() { let src_i = dst_i * reduce_sz; unsafe { T::vec_reduce_sum( src[src_i..src_i + reduce_sz].as_ptr(), dst_v, reduce_sz, ) }; } return Ok(dst); }; for (unstr_index, &src) in src.iter().enumerate() { let mut dst_index = unstr_index; // Set the reduce_dims indexes to 0. for &(dim, stride) in self.reduce_dims_and_stride.iter() { // The compiler is able to optimize the following in a single divmod op. let (pre, post) = (dst_index / stride, dst_index % stride); dst_index = (pre / dim) * stride + post; } dst[dst_index] += src; } } None => { for (unstr_index, src_index) in src_l.strided_index().enumerate() { let mut dst_index = unstr_index; // Set the reduce_dims indexes to 0. for &(dim, stride) in self.reduce_dims_and_stride.iter() { // The compiler is able to optimize the following in a single divmod op. let (pre, post) = (dst_index / stride, dst_index % stride); dst_index = (pre / dim) * stride + post; } dst[dst_index] += src[src_index]; } } } Ok(dst) } } impl Map1 for ReduceSum<'_> { #[inline(always)] fn f<T: WithDType>(&self, src: &[T], src_l: &Layout) -> Result<Vec<T>> { self.fold_impl(src, src_l, T::zero()) } } struct Affine(f64, f64); impl Map1 for Affine { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let mul = T::from_f64(self.0); let add = T::from_f64(self.1); Ok(unary_map(vs, layout, |v| v * mul + add)) } } struct AvgPool2D((usize, usize), (usize, usize)); impl Map1 for AvgPool2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html let (k_h, k_w) = self.0; let (s_h, s_w) = self.1; let (b_sz, c, h, w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let h_out = (h - k_h) / s_h + 1; let w_out = (w - k_w) / s_w + 1; let src_index = layout.start_offset(); let mut dst = vec![T::zero(); b_sz * c * h_out * w_out]; let scale = 1f64 / (k_h * k_w) as f64; let scale = T::from_f64(scale); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * h_out * w_out..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * h_out * w_out..]; let src_index = src_index + c_idx * stride[1]; for h_idx in 0..h_out { for w_idx in 0..w_out { let mut sum = T::zero(); for m in 0..k_h { for n in 0..k_w { let m = s_h * h_idx + m; let n = s_w * w_idx + n; sum += src[src_index + m * stride_h + n * stride_w] } } dst[h_idx * w_out + w_idx] = sum * scale; } } } } Ok(dst) } } struct MaxPool2D((usize, usize), (usize, usize)); impl Map1 for MaxPool2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html let (k_h, k_w) = self.0; let (s_h, s_w) = self.1; let (b_sz, c, h, w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let h_out = (h - k_h) / s_h + 1; let w_out = (w - k_w) / s_w + 1; let src_index = layout.start_offset(); let mut dst = vec![T::zero(); b_sz * c * h_out * w_out]; for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * h_out * w_out..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * h_out * w_out..]; let src_index = src_index + c_idx * stride[1]; for h_idx in 0..h_out { for w_idx in 0..w_out { let mut largest = src[src_index + s_h * h_idx * stride_h + s_w * w_idx * stride_w]; for m in 0..k_h { for n in 0..k_w { let m = s_h * h_idx + m; let n = s_w * w_idx + n; if largest < src[src_index + m * stride_h + n * stride_w] { largest = src[src_index + m * stride_h + n * stride_w] } } } dst[h_idx * w_out + w_idx] = largest; } } } } Ok(dst) } } struct UpsampleNearest1D(usize); impl Map1 for UpsampleNearest1D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // TODO: Specialized implementation for the case 2*sz? let dst_sz = self.0; let (b_sz, c, src_sz) = layout.shape().dims3()?; let stride = layout.stride(); let stride_sz = stride[2]; let src_index = layout.start_offset(); let scale_sz = src_sz as f64 / dst_sz as f64; let mut dst = vec![T::zero(); b_sz * c * dst_sz]; let src_idxs = (0..dst_sz) .map(|idx| usize::min(src_sz - 1, (idx as f64 * scale_sz) as usize)) .collect::<Vec<_>>(); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * dst_sz..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * dst_sz..]; let src_index = src_index + c_idx * stride[1]; for (idx, src_idx) in src_idxs.iter().enumerate() { dst[idx] = src[src_index + src_idx * stride_sz] } } } Ok(dst) } } struct UpsampleNearest2D(usize, usize); impl Map1 for UpsampleNearest2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // TODO: Specialized implementation for the case 2*h, 2*w? let (dst_h, dst_w) = (self.0, self.1); let (b_sz, c, src_h, src_w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let src_index = layout.start_offset(); let scale_h = src_h as f64 / dst_h as f64; let scale_w = src_w as f64 / dst_w as f64; let mut dst = vec![T::zero(); b_sz * c * dst_h * dst_w]; let src_h_idxs = (0..dst_h) .map(|h_idx| usize::min(src_h - 1, (h_idx as f64 * scale_h) as usize)) .collect::<Vec<_>>(); let src_w_idxs = (0..dst_w) .map(|w_idx| usize::min(src_w - 1, (w_idx as f64 * scale_w) as usize)) .collect::<Vec<_>>(); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * dst_h * dst_w..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * dst_h * dst_w..]; let src_index = src_index + c_idx * stride[1]; for (h_idx, src_h_idx) in src_h_idxs.iter().enumerate() { for (w_idx, src_w_idx) in src_w_idxs.iter().enumerate() { let src_index = src_index + src_h_idx * stride_h + src_w_idx * stride_w; dst[h_idx * dst_w + w_idx] = src[src_index] } } } } Ok(dst) } } struct Gather<'a, I: IntDType> { ids: &'a [I], ids_l: &'a Layout, dim: usize, } impl<I: IntDType> Map1 for Gather<'_, I> { fn f<T: WithDType>(&self, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let ids = match self.ids_l.contiguous_offsets() { Some((a, b)) => &self.ids[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; let src = match src_l.contiguous_offsets() { Some((a, b)) => &src[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; let dim = self.dim; let ids_dims = self.ids_l.dims(); let src_dims = src_l.dims(); let dst_len: usize = ids_dims.iter().product(); let dst_left_len: usize = ids_dims[..dim].iter().product(); let dst_dim_len = ids_dims[dim]; let dst_right_len: usize = ids_dims[dim + 1..].iter().product(); let src_dim_len = src_dims[dim]; let src_right_len: usize = src_dims[dim + 1..].iter().product(); let mut dst = vec![T::zero(); dst_len]; for left_i in 0..dst_left_len { let start_src_idx = left_i * src_right_len * src_dim_len; let start_dst_idx = left_i * dst_right_len * dst_dim_len; for i in 0..dst_dim_len { let start_dst_idx = start_dst_idx + i * dst_right_len; for right_i in 0..dst_right_len { let dst_idx = start_dst_idx + right_i; let index = ids[dst_idx].as_usize(); if index >= src_dim_len { Err(Error::InvalidIndex { index, size: src_dim_len, op: "gather", } .bt())? } let src_idx = start_src_idx + index * src_right_len + right_i; dst[dst_idx] = src[src_idx] } } } Ok(dst) } } struct IndexSelect<'a, T: IntDType> { ids: &'a [T], ids_l: &'a Layout, dim: usize, } impl<I: IntDType> Map1 for IndexSelect<'_, I> { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { let src = match layout.contiguous_offsets() { Some((a, b)) => &src[a..b], None => Err(Error::RequiresContiguous { op: "index-select" }.bt())?, }; let dim = self.dim; let n_ids = match self.ids_l.dims() { [n_ids] => *n_ids, d => Err(Error::UnexpectedNumberOfDims { expected: 1, got: d.len(), shape: self.ids_l.shape().clone(), } .bt())?, }; let stride_ids = self.ids_l.stride()[0]; let mut dst_dims = layout.dims().to_vec(); let src_dim = dst_dims[dim]; dst_dims[dim] = n_ids; let dst_len: usize = dst_dims.iter().product(); let left_len: usize = dst_dims[..dim].iter().product(); let right_len: usize = dst_dims[dim + 1..].iter().product(); let mut dst = vec![T::zero(); dst_len]; for left_i in 0..left_len { let start_src_idx = left_i * right_len * src_dim; let start_dst_idx = left_i * right_len * n_ids; for i in 0..n_ids { let index = self.ids[self.ids_l.start_offset() + stride_ids * i].as_usize(); if index >= src_dim { Err(Error::InvalidIndex { index, size: src_dim, op: "index-select", } .bt())? } let start_src_idx = start_src_idx + index * right_len; let start_dst_idx = start_dst_idx + i * right_len; dst[start_dst_idx..start_dst_idx + right_len] .copy_from_slice(&src[start_src_idx..start_src_idx + right_len]) } } Ok(dst) } } struct ScatterAdd<'a, I: IntDType> { ids: &'a [I], ids_l: &'a Layout, dim: usize, } impl<I: IntDType> Map2 for ScatterAdd<'_, I> { const OP: &'static str = "scatter-add"; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let dst_len = l1.shape().elem_count(); let mut dst = vec![T::zero(); dst_len]; copy_strided_src_(v1, &mut dst, 0, l1); let src = match src_l.contiguous_offsets() { None => Err(Error::RequiresContiguous { op: "scatter-add" }.bt())?, Some((o1, o2)) => &src[o1..o2], }; let dim = self.dim; let ids_dims = self.ids_l.dims(); let dst_dims = l1.dims(); let dst_dim_len = dst_dims[dim]; let dst_right_len: usize = dst_dims[dim + 1..].iter().product(); let ids_left_len: usize = ids_dims[..dim].iter().product(); let ids_dim_len = ids_dims[dim]; let ids_right_len: usize = ids_dims[dim + 1..].iter().product(); let ids = match self.ids_l.contiguous_offsets() { Some((a, b)) => &self.ids[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; for left_i in 0..ids_left_len { let start_ids_idx = left_i * ids_right_len * ids_dim_len; let start_dst_idx = left_i * dst_right_len * dst_dim_len; for i in 0..ids_dim_len { let start_ids_idx = start_ids_idx + i * ids_right_len; for right_i in 0..dst_right_len { let ids_idx = start_ids_idx + right_i; let index = ids[ids_idx].as_usize(); if index >= dst_dim_len { Err(Error::InvalidIndex { index, size: dst_dim_len, op: "gather", } .bt())? } let dst_idx = start_dst_idx + index * dst_right_len + right_i; dst[dst_idx] += src[ids_idx] } } } Ok(dst) } } struct IndexAdd<'a, I: IntDType> { ids: &'a [I], dim: usize, } impl<I: IntDType> Map2 for IndexAdd<'_, I> { const OP: &'static str = "index-add"; // https://pytorch.org/docs/stable/generated/torch.Tensor.index_add_.html#torch.Tensor.index_add_ // v1, l1 -> self fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let dst_len = l1.shape().elem_count(); let mut dst = vec![T::zero(); dst_len]; copy_strided_src_(v1, &mut dst, 0, l1); let src = match src_l.contiguous_offsets() { None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, Some((o1, o2)) => &src[o1..o2], }; let dim = self.dim; let max_idx = l1.dims()[dim]; let pre_dim = src_l.dims()[..dim].iter().product::<usize>(); let src_dim_sz = src_l.dims()[dim]; let post_dim = src_l.dims()[dim + 1..].iter().product::<usize>(); if dim == 0 { for (src_idx, dst_idx) in self.ids.iter().enumerate() { let dst_idx = dst_idx.as_usize(); if dst_idx >= max_idx { Err(Error::InvalidIndex { index: dst_idx, op: "index-add", size: max_idx, })? } let src_idx = src_idx * post_dim; let dst_idx = dst_idx * post_dim; let src = &src[src_idx..src_idx + post_dim]; let dst = &mut dst[dst_idx..dst_idx + post_dim]; for (d, &s) in dst.iter_mut().zip(src.iter()) { *d += s } } } else { for (src_idx, dst_idx) in self.ids.iter().enumerate() { let dst_idx = dst_idx.as_usize(); if dst_idx >= max_idx { Err(Error::InvalidIndex { index: dst_idx, op: "index-add", size: max_idx, })? } for pre_i in 0..pre_dim { let pre_src_i = (pre_i * src_dim_sz + src_idx) * post_dim; let pre_dst_i = (pre_i * max_idx + dst_idx) * post_dim; let src = &src[pre_src_i..pre_src_i + post_dim]; let dst = &mut dst[pre_dst_i..pre_dst_i + post_dim]; for (d, &s) in dst.iter_mut().zip(src.iter()) { *d += s } } } } Ok(dst) } } #[allow(clippy::too_many_arguments)] fn copy2d_<T: Copy>( src: &[T], dst: &mut [T], d1: usize, d2: usize, src_stride1: usize, dst_stride1: usize, src_offset: usize, dst_offset: usize, ) { for i1 in 0..d1 { let dst_idx = i1 * dst_stride1 + dst_offset; let src_idx = i1 * src_stride1 + src_offset; let dst = &mut dst[dst_idx..dst_idx + d2]; let src = &src[src_idx..src_idx + d2]; dst.copy_from_slice(src) } } fn copy_strided_src_<T: Copy>(src: &[T], dst: &mut [T], dst_offset: usize, src_l: &Layout) { match src_l.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => { let to_copy = (dst.len() - dst_offset).min(len); dst[dst_offset..dst_offset + to_copy] .copy_from_slice(&src[start_offset..start_offset + to_copy]) } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len: 1, } => { for (dst_index, src_index) in block_start_index.enumerate() { let dst_index = dst_index + dst_offset; if dst_index >= dst.len() { break; } dst[dst_index] = src[src_index] } } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let mut dst_index = dst_offset; for src_index in block_start_index { let next_dst_index = dst_index + block_len; if dst_index >= dst.len() { break; } let to_copy = usize::min(block_len, dst.len() - dst_index); dst[dst_index..dst_index + to_copy] .copy_from_slice(&src[src_index..src_index + to_copy]); dst_index = next_dst_index } } } } struct Conv1D<'a>(&'a crate::conv::ParamsConv1D); impl Map2 for Conv1D<'_> { const OP: &'static str = "conv1d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let k = &k[k_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2) = crate::shape::dims3(inp_l.stride())?; let (k_s0, k_s1, k_s2) = crate::shape::dims3(k_l.stride())?; let l_out = p.l_out(); let dst_elems = p.c_out * l_out * p.b_size; // The output shape is [b_size, c_out, l_out] let dst = vec![T::zero(); dst_elems]; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.l_in]; for b_idx in 0..p.b_size { for src_l in 0..p.l_in { for src_c_idx in 0..p.c_in { let inp_idx = b_idx * inp_s0 + src_c_idx * inp_s1 + src_l * inp_s2; inp_cont[b_idx * p.l_in * p.c_in + src_l * p.c_in + src_c_idx] = inp[inp_idx] } } } for offset in 0..p.k_size { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let dst_idx = dst_c_idx * l_out; let k_cont = (0..p.c_in) .map(|c_in_idx| k[dst_c_idx * k_s0 + c_in_idx * k_s1 + offset * k_s2]) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { let dst_idx = dst_idx + b_idx * p.c_out * l_out; for dst_l in 0..l_out { let dst_idx = dst_idx + dst_l; let src_l = p.stride * dst_l + offset * p.dilation; if src_l < p.padding || src_l >= p.padding + p.l_in { continue; } let src_l = src_l - p.padding; let inp_cont = &inp_cont[b_idx * p.l_in * p.c_in + src_l * p.c_in..]; assert!(inp_cont.len() >= p.c_in); assert!(k_cont.len() >= p.c_in); let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to parallelise // the different tasks so no two threads can try to write at the same // location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } }) } Ok(dst) } } struct Im2Col1D { l_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col1D { fn l_out(&self, l: usize) -> usize { (l + 2 * self.padding - self.dilation * (self.l_k - 1) - 1) / self.stride + 1 } } impl Map1 for Im2Col1D { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let &Self { l_k, stride, dilation, padding, } = self; let (b, c, l) = layout.shape().dims3()?; let l_out = self.l_out(l); let src = &vs[layout.start_offset()..]; let mut dst = vec![T::zero(); b * l_out * c * l_k]; let (src_s0, src_s1, src_s2) = { let s = layout.stride(); (s[0], s[1], s[2]) }; // TODO: provide specialized kernels for the common use cases. // - l_k = 1 // - padding = 0 // - stride = 1 // - dilation = 1 for b_idx in 0..b { let src_idx = b_idx * src_s0; let dst_idx = b_idx * l_out * c * l_k; for l_idx in 0..l_out { let dst_idx = dst_idx + l_idx * c * l_k; for c_idx in 0..c { let dst_idx = dst_idx + c_idx * l_k; let src_idx = c_idx * src_s1 + src_idx; for l_k_idx in 0..l_k { let src_l = l_idx * stride + l_k_idx * dilation; if padding != 0 && (src_l < padding || src_l >= l + padding) { continue; } let src_l = src_l - padding; let src_idx = src_idx + src_l * src_s2; let dst_idx = dst_idx + l_k_idx; dst[dst_idx] = src[src_idx] } } } } Ok(dst) } } struct Im2Col { h_k: usize, w_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col { fn hw_out(&self, h: usize, w: usize) -> (usize, usize) { let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1; let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1; (h_out, w_out) } } impl Map1 for Im2Col { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let &Self { h_k, w_k, stride, dilation, padding, } = self; let (b, c, h, w) = layout.shape().dims4()?; let (h_out, w_out) = self.hw_out(h, w); let src = &vs[layout.start_offset()..]; let mut dst = vec![T::zero(); b * h_out * w_out * c * h_k * w_k]; let (src_s0, src_s1, src_s2, src_s3) = { let s = layout.stride(); (s[0], s[1], s[2], s[3]) }; // TODO: provide specialized kernels for the common use cases. // - h_k = w_k = 1 // - padding = 0 // - stride = 1 // - dilation = 1 for b_idx in 0..b { let src_idx = b_idx * src_s0; let dst_idx = b_idx * h_out * w_out * c * h_k * w_k; for h_idx in 0..h_out { let dst_idx = dst_idx + h_idx * w_out * c * h_k * w_k; for w_idx in 0..w_out { let dst_idx = dst_idx + w_idx * c * h_k * w_k; for c_idx in 0..c { let dst_idx = dst_idx + c_idx * h_k * w_k; let src_idx = c_idx * src_s1 + src_idx; for h_k_idx in 0..h_k { let src_h = h_idx * stride + h_k_idx * dilation; if padding != 0 && (src_h < padding || src_h >= h + padding) { continue; } let src_h = src_h - padding; let src_idx = src_idx + src_h * src_s2; let dst_idx = dst_idx + h_k_idx * w_k; for w_k_idx in 0..w_k { let src_w = w_idx * stride + w_k_idx * dilation; if padding != 0 && (src_w < padding || src_w >= w + padding) { continue; } let src_w = src_w - padding; let src_idx = src_idx + src_w * src_s3; let dst_idx = dst_idx + w_k_idx; dst[dst_idx] = src[src_idx] } } } } } } Ok(dst) } } struct Col2Im1D { stride: usize, } impl Map1 for Col2Im1D { fn f<T: WithDType>(&self, col: &[T], l: &Layout) -> Result<Vec<T>> { let (b_size, l_in, c_out, k_size) = l.shape().dims4()?; let stride = self.stride; let l_out = (l_in - 1) * stride + k_size; let mut im = vec![T::zero(); b_size * c_out * l_out]; let (dst_s0, dst_s1) = (c_out * l_out, l_out); let (src_s0, src_s1, src_s2) = (c_out * k_size * l_in, c_out * k_size, k_size); for l_in_i in 0..l_in { for k_i in 0..k_size { let l_out_i = l_in_i * stride + k_i; for b_i in 0..b_size { for c_i in 0..c_out { let dst_idx = b_i * dst_s0 + c_i * dst_s1 + l_out_i; let src_idx = b_i * src_s0 + l_in_i * src_s1 + c_i * src_s2 + k_i; im[dst_idx] += col[src_idx] } } } } Ok(im) } } struct ConvTranspose1D<'a>(&'a crate::conv::ParamsConvTranspose1D); impl Map2 for ConvTranspose1D<'_> { const OP: &'static str = "conv_transpose1d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let k = &k[k_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2) = crate::shape::dims3(inp_l.stride())?; let (k_s0, k_s1, k_s2) = crate::shape::dims3(k_l.stride())?; let l_out = p.l_out(); // Output shape: [b_size, c_out, l_out]. let dst_elems = p.c_out * l_out * p.b_size; let dst = vec![T::zero(); dst_elems]; let dst_s0 = p.c_out * l_out; let dst_s1 = l_out; let dst_s2 = 1; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.l_in]; let cont_s0 = p.l_in * p.c_in; let cont_s1 = p.c_in; for b_idx in 0..p.b_size { for l_idx in 0..p.l_in { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + l_idx * inp_s2; let dst_idx = b_idx * cont_s0 + l_idx * cont_s1 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } for k_idx in 0..p.k_size { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let k_cont = (0..p.c_in) .map(|c_in_idx| k[c_in_idx * k_s0 + dst_c_idx * k_s1 + k_idx * k_s2]) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { for l_idx in 0..p.l_in { let out_idx = l_idx * p.stride + k_idx * p.dilation; if out_idx < p.padding { continue; } let out_idx = out_idx - p.padding; if out_idx < l_out { let inp_cont = &inp_cont[b_idx * cont_s0 + l_idx * cont_s1..]; let dst_idx = b_idx * dst_s0 + out_idx * dst_s2 + dst_c_idx * dst_s1; let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to // parallelise the different tasks so no two threads can try to // write at the same location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } }) } Ok(dst) } } struct Conv2D<'a>(&'a crate::conv::ParamsConv2D); impl Map2 for Conv2D<'_> { const OP: &'static str = "conv2d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2, inp_s3) = crate::shape::dims4(inp_l.stride())?; let k = &k[k_l.start_offset()..]; let (k_s0, k_s1, k_s2, k_s3) = crate::shape::dims4(k_l.stride())?; let (out_h, out_w) = (p.out_h(), p.out_w()); // Output shape: [b_size, c_out, out_h, out_w]. let dst = vec![T::zero(); p.b_size * p.c_out * out_h * out_w]; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.i_h * p.i_w]; let cont_s0 = p.i_h * p.i_w * p.c_in; let cont_s1 = p.i_w * p.c_in; let cont_s2 = p.c_in; for b_idx in 0..p.b_size { for h_idx in 0..p.i_h { for w_idx in 0..p.i_w { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + h_idx * inp_s2 + w_idx * inp_s3; let dst_idx = b_idx * cont_s0 + h_idx * cont_s1 + w_idx * cont_s2 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } } for offset_h in 0..p.k_h { for offset_w in 0..p.k_w { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let dst_idx = dst_c_idx * out_w * out_h; let k_cont = (0..p.c_in) .map(|c_in_idx| { k[dst_c_idx * k_s0 + c_in_idx * k_s1 + offset_h * k_s2 + offset_w * k_s3] }) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { let dst_idx = dst_idx + b_idx * p.c_out * out_h * out_w; for dst_h in 0..out_h { let dst_idx = dst_idx + dst_h * out_w; let src_h = p.stride * dst_h + offset_h * p.dilation; if src_h < p.padding || src_h >= p.i_h + p.padding { continue; } let src_h = src_h - p.padding; for dst_w in 0..out_w { let dst_idx = dst_idx + dst_w; let src_w = p.stride * dst_w + offset_w * p.dilation; if src_w < p.padding || src_w >= p.i_w + p.padding { continue; } let src_w = src_w - p.padding; let inp_cont = &inp_cont [b_idx * cont_s0 + src_h * cont_s1 + src_w * cont_s2..]; assert!(inp_cont.len() >= p.c_in); assert!(k_cont.len() >= p.c_in); let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to parallelise // the different tasks so no two threads can try to write at the same // location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } }); } } Ok(dst) } } struct ConvTranspose2D<'a>(&'a crate::conv::ParamsConvTranspose2D); impl Map2 for ConvTranspose2D<'_> { const OP: &'static str = "conv_transpose2d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2, inp_s3) = crate::shape::dims4(inp_l.stride())?; let k = &k[k_l.start_offset()..]; let (k_s0, k_s1, k_s2, k_s3) = crate::shape::dims4(k_l.stride())?; let (out_h, out_w) = (p.out_h(), p.out_w()); // Output shape: [b_size, c_out, out_h, out_w]. let dst = vec![T::zero(); p.b_size * p.c_out * out_h * out_w]; let dst_s0 = p.c_out * out_h * out_w; let dst_s1 = out_h * out_w; let dst_s2 = out_w; let dst_s3 = 1; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.i_h * p.i_w]; let cont_s0 = p.i_h * p.i_w * p.c_in; let cont_s1 = p.i_w * p.c_in; let cont_s2 = p.c_in; for b_idx in 0..p.b_size { for h_idx in 0..p.i_h { for w_idx in 0..p.i_w { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + h_idx * inp_s2 + w_idx * inp_s3; let dst_idx = b_idx * cont_s0 + h_idx * cont_s1 + w_idx * cont_s2 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } } for k_y in 0..p.k_h { for k_x in 0..p.k_w { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let k_cont = (0..p.c_in) .map(|c_in_idx| { k[c_in_idx * k_s0 + dst_c_idx * k_s1 + k_y * k_s2 + k_x * k_s3] }) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { for inp_y in 0..p.i_h { for inp_x in 0..p.i_w { let out_x = inp_x * p.stride + k_x * p.dilation; let out_y = inp_y * p.stride + k_y * p.dilation; if out_x < p.padding || out_y < p.padding { continue; } let out_x = out_x - p.padding; let out_y = out_y - p.padding; if out_x < out_w && out_y < out_h { let inp_cont = &inp_cont [b_idx * cont_s0 + inp_y * cont_s1 + inp_x * cont_s2..]; let dst_idx = b_idx * dst_s0 + out_y * dst_s2 + out_x * dst_s3 + dst_c_idx * dst_s1; let mut d = T::zero(); unsafe { T::vec_dot( inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in, ) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to // parallelise the different tasks so no two threads can try to // write at the same location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } } }) } } Ok(dst) } } struct MatMul((usize, usize, usize, usize)); impl MatMul { fn striding_error(&self, lhs_l: &Layout, rhs_l: &Layout, msg: &'static str) -> Error { Error::MatMulUnexpectedStriding(Box::new(crate::error::MatMulUnexpectedStriding { lhs_l: lhs_l.clone(), rhs_l: rhs_l.clone(), bmnk: self.0, msg, })) .bt() } fn ab_skip(&self, lhs_l: &Layout, rhs_l: &Layout) -> Result<(usize, usize)> { let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rank = lhs_stride.len(); let (_b, m, n, k) = self.0; let a_skip: usize = match lhs_stride[..rank - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [_, stride] if lhs_l.dims()[0] == 1 => stride, [stride, _] if lhs_l.dims()[1] == 1 => stride, [stride] => stride, [] => m * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))?, }; let b_skip: usize = match rhs_stride[..rank - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [_, stride] if rhs_l.dims()[0] == 1 => stride, [stride, _] if rhs_l.dims()[1] == 1 => stride, [stride] => stride, [] => n * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))?, }; Ok((a_skip, b_skip)) } } impl Map2 for MatMul { const OP: &'static str = "mat_mul"; #[cfg(all(not(feature = "mkl"), not(feature = "accelerate")))] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { use gemm::{gemm, Parallelism}; match T::DTYPE { DType::F16 | DType::F32 | DType::F64 => {} _ => Err(Error::UnsupportedDTypeForOp(T::DTYPE, "matmul").bt())?, } let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rank = lhs_stride.len(); let lhs_cs = lhs_stride[rank - 1]; let lhs_rs = lhs_stride[rank - 2]; let rhs_cs = rhs_stride[rank - 1]; let rhs_rs = rhs_stride[rank - 2]; let (a_skip, b_skip) = self.ab_skip(lhs_l, rhs_l)?; let c_skip: usize = m * n; let dst_shape: Shape = (m, n).into(); let dst_strides = dst_shape.stride_contiguous(); let dst_rs = dst_strides[0]; let dst_cs = dst_strides[1]; let mut dst = vec![T::zero(); b * m * n]; let num_threads = crate::utils::get_num_threads(); let parallelism = if num_threads > 1 { Parallelism::Rayon(num_threads) } else { Parallelism::None }; for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { gemm( /* m: usize = */ m, /* n: usize = */ n, /* k: usize = */ k, /* dst: *mut T = */ dst_p.as_mut_ptr(), /* dst_cs: isize = */ dst_cs as isize, /* dst_rs: isize = */ dst_rs as isize, /* read_dst: bool = */ false, /* lhs: *const T = */ lhs_p.as_ptr(), /* lhs_cs: isize = */ lhs_cs as isize, /* lhs_rs: isize = */ lhs_rs as isize, /* rhs: *const T = */ rhs_p.as_ptr(), /* rhs_cs: isize = */ rhs_cs as isize, /* rhs_rs: isize = */ rhs_rs as isize, /* alpha: T = */ T::zero(), /* beta: T = */ T::one(), /* conj_dst: bool = */ false, /* conj_lhs: bool = */ false, /* conj_rhs: bool = */ false, parallelism, ) } } Ok(dst) } #[cfg(feature = "accelerate")] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let (a_skip, b_skip) = self.ab_skip(lhs_l, rhs_l)?; let c_skip: usize = m * n; let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; let (lda, transa) = if (rhs_m1 == 1 || n == 1) && (rhs_m2 == n || k == 1) { (n as i32, b'N') } else if rhs_m1 == k && rhs_m2 == 1 { (k as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))? }; // The b tensor has dims batching, m, k (lhs) let (ldb, transb) = if (lhs_m1 == 1 || k == 1) && (lhs_m2 == k || m == 1) { (k as i32, b'N') } else if lhs_m1 == m && lhs_m2 == 1 { (m as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))? }; let mut dst = vec![T::zero(); b * m * n]; match T::DTYPE { DType::F16 => { crate::bail!("the accelerate backend does not support f16 matmul") } DType::F32 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f32; let b = lhs_p.as_ptr() as *const f32; let c = dst_p.as_mut_ptr() as *mut f32; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::accelerate::sgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F64 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f64; let b = lhs_p.as_ptr() as *const f64; let c = dst_p.as_mut_ptr() as *mut f64; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::accelerate::dgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } dtype => Err(Error::UnsupportedDTypeForOp(dtype, "matmul").bt())?, } Ok(dst) } #[cfg(feature = "mkl")] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let (a_skip, b_skip) = self.ab_skip(lhs_l, rhs_l)?; let c_skip: usize = m * n; let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; let (lda, transa) = if (rhs_m1 == 1 || n == 1) && (rhs_m2 == n || k == 1) { (n as i32, b'N') } else if rhs_m1 == k && rhs_m2 == 1 { (k as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))? }; // The b tensor has dims batching, m, k (lhs) let (ldb, transb) = if (lhs_m1 == 1 || k == 1) && (lhs_m2 == k || m == 1) { (k as i32, b'N') } else if lhs_m1 == m && lhs_m2 == 1 { (m as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))? }; let mut dst = vec![T::zero(); b * m * n]; match T::DTYPE { DType::F16 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f16; let b = lhs_p.as_ptr() as *const f16; let c = dst_p.as_mut_ptr() as *mut f16; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::hgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ f16::ONE, /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ f16::ZERO, /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F32 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f32; let b = lhs_p.as_ptr() as *const f32; let c = dst_p.as_mut_ptr() as *mut f32; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::sgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F64 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f64; let b = lhs_p.as_ptr() as *const f64; let c = dst_p.as_mut_ptr() as *mut f64; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::dgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } dtype => Err(Error::UnsupportedDTypeForOp(dtype, "matmul").bt())?, } Ok(dst) } } fn elu<T: num_traits::Float>(v: T, alpha: T) -> T { if v.is_sign_positive() { v } else { (v.exp() - T::one()) * alpha } } impl CpuStorage { pub fn as_slice<D: WithDType>(&self) -> Result<&[D]> { D::cpu_storage_as_slice(self) } pub fn concat(storages: &[CpuStorage]) -> Result<CpuStorage> { let storage0 = &storages[0]; let s = match storage0 { Self::U8(_) => { let storages = storages .iter() .map(|s| match s { Self::U8(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::U8(storages) } Self::U32(_) => { let storages = storages .iter() .map(|s| match s { Self::U32(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::U32(storages) } Self::I64(_) => { let storages = storages .iter() .map(|s| match s { Self::I64(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::I64(storages) } Self::BF16(_) => { let storages = storages .iter() .map(|s| match s { Self::BF16(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::BF16(storages) } Self::F16(_) => { let storages = storages .iter() .map(|s| match s { Self::F16(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F16(storages) } Self::F32(_) => { let storages = storages .iter() .map(|s| match s { Self::F32(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F32(storages) } Self::F64(_) => { let storages = storages .iter() .map(|s| match s { Self::F64(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F64(storages) } }; Ok(s) } } impl BackendStorage for CpuStorage { type Device = CpuDevice; fn dtype(&self) -> DType { match self { Self::U8(_) => DType::U8, Self::U32(_) => DType::U32, Self::I64(_) => DType::I64, Self::BF16(_) => DType::BF16, Self::F16(_) => DType::F16, Self::F32(_) => DType::F32, Self::F64(_) => DType::F64, } } fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { // TODO: find a way around the quadratic number of cases below. match (self, dtype) { (Self::U8(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::U32(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::I64(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::BF16(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| v); Ok(Self::BF16(data)) } (Self::F16(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v.to_f32())); Ok(Self::BF16(data)) } (Self::F32(storage), DType::BF16) => { let data = unary_map(storage, layout, bf16::from_f32); Ok(Self::BF16(data)) } (Self::F64(storage), DType::BF16) => { let data = unary_map(storage, layout, bf16::from_f64); Ok(Self::BF16(data)) } (Self::U8(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::U32(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::I64(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::BF16(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v.to_f32())); Ok(Self::F16(data)) } (Self::F16(storage), DType::F16) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F16(data)) } (Self::F32(storage), DType::F16) => { let data = unary_map(storage, layout, f16::from_f32); Ok(Self::F16(data)) } (Self::F64(storage), DType::F16) => { let data = unary_map(storage, layout, f16::from_f64); Ok(Self::F16(data)) } (Self::U8(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::U32(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::I64(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::BF16(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v.to_f32()); Ok(Self::F32(data)) } (Self::F16(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v.to_f32()); Ok(Self::F32(data)) } (Self::F32(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F32(data)) } (Self::F64(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::U8(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v); Ok(Self::U8(data)) } (Self::BF16(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v.to_f32() as u8); Ok(Self::U8(data)) } (Self::F16(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v.to_f32() as u8); Ok(Self::U8(data)) } (Self::F32(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::F64(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::U32(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::I64(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::U8(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::U32(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v); Ok(Self::U32(data)) } (Self::I64(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::BF16(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v.to_f32() as u32); Ok(Self::U32(data)) } (Self::F16(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v.to_f32() as u32); Ok(Self::U32(data)) } (Self::F32(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::F64(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::U8(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::U32(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::I64(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v); Ok(Self::I64(data)) } (Self::BF16(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v.to_f32() as i64); Ok(Self::I64(data)) } (Self::F16(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v.to_f32() as i64); Ok(Self::I64(data)) } (Self::F32(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::F64(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::U8(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::U32(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::I64(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::BF16(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v.to_f64()); Ok(Self::F64(data)) } (Self::F16(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v.to_f64()); Ok(Self::F64(data)) } (Self::F32(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::F64(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F64(data)) } } } fn reduce_op(&self, op: ReduceOp, layout: &Layout, reduce_dims: &[usize]) -> Result<Self> { match op { ReduceOp::Sum => { let src_dims = layout.dims(); let mut dst_dims = src_dims.to_vec(); for &dim in reduce_dims.iter() { dst_dims[dim] = 1; } let dst_shape = Shape::from(dst_dims); let mut reduce_dims = reduce_dims.to_vec(); // Sort the reduce_dims as they have to be processed from left to right when converting the // indexes. reduce_dims.sort(); let reduce_dims_and_stride: Vec<_> = reduce_dims .iter() .map(|&d| (src_dims[d], src_dims[d + 1..].iter().product::<usize>())) .collect(); ReduceSum { dst_shape: &dst_shape, reduce_dims: &reduce_dims, reduce_dims_and_stride, } .map(self, layout) } ReduceOp::Min | ReduceOp::ArgMin | ReduceOp::Max | ReduceOp::ArgMax => { let reduce_dim_index = match reduce_dims { [reduce_dim_index] => *reduce_dim_index, _ => { let op = match op { ReduceOp::Min => "min", ReduceOp::ArgMin => "argmin", ReduceOp::Max => "max", ReduceOp::ArgMax => "argmax", _ => unreachable!(), }; let dims = reduce_dims.to_vec(); Err(Error::OnlySingleDimension { op, dims })? } }; let (use_min, return_index) = match op { ReduceOp::Min => (true, false), ReduceOp::ArgMin => (true, true), ReduceOp::Max => (false, false), ReduceOp::ArgMax => (false, true), _ => unreachable!(), }; ReduceIndex { reduce_dim_index, use_min, return_index, } .map(self, layout) } } } fn cmp(&self, op: CmpOp, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout) -> Result<Self> { Cmp(op).map(self, lhs_l, rhs, rhs_l) } fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { Affine(mul, add).map(self, layout) } fn avg_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { AvgPool2D(kernel_size, stride).map(self, layout) } fn max_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { MaxPool2D(kernel_size, stride).map(self, layout) } fn upsample_nearest1d(&self, layout: &Layout, sz: usize) -> Result<Self> { UpsampleNearest1D(sz).map(self, layout) } fn upsample_nearest2d(&self, layout: &Layout, h: usize, w: usize) -> Result<Self> { UpsampleNearest2D(h, w).map(self, layout) } fn powf(&self, layout: &Layout, e: f64) -> Result<Self> { use num_traits::Float; // TODO: Have some generic map for functions that apply on num_traits::Float elements. match self { Self::BF16(storage) => { let data = unary_map(storage, layout, |v| v.powf(bf16::from_f64(e))); Ok(Self::BF16(data)) } Self::F16(storage) => { let data = unary_map(storage, layout, |v| v.powf(f16::from_f64(e))); Ok(Self::F16(data)) } Self::F32(storage) => { let data = unary_map(storage, layout, |v| v.powf(e as f32)); Ok(Self::F32(data)) } Self::F64(storage) => { let data = unary_map(storage, layout, |v| v.powf(e)); Ok(Self::F64(data)) } Self::U8(_) => Err(Error::UnsupportedDTypeForOp(DType::U8, "elu").bt()), Self::U32(_) => Err(Error::UnsupportedDTypeForOp(DType::U32, "elu").bt()), Self::I64(_) => Err(Error::UnsupportedDTypeForOp(DType::I64, "elu").bt()), } } fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { // TODO: Have some generic map for functions that apply on num_traits::Float elements. match self { Self::BF16(storage) => { let data = unary_map(storage, layout, |v| elu(v, bf16::from_f64(alpha))); Ok(Self::BF16(data)) } Self::F16(storage) => { let data = unary_map(storage, layout, |v| elu(v, f16::from_f64(alpha))); Ok(Self::F16(data)) } Self::F32(storage) => { let data = unary_map(storage, layout, |v| elu(v, f32::from_f64(alpha))); Ok(Self::F32(data)) } Self::F64(storage) => { let data = unary_map(storage, layout, |v| elu(v, alpha)); Ok(Self::F64(data)) } Self::U8(_) => Err(Error::UnsupportedDTypeForOp(DType::U8, "elu").bt()), Self::U32(_) => Err(Error::UnsupportedDTypeForOp(DType::U32, "elu").bt()), Self::I64(_) => Err(Error::UnsupportedDTypeForOp(DType::I64, "elu").bt()), } } fn unary_impl<B: UnaryOpT>(&self, layout: &Layout) -> Result<Self> { match self { Self::BF16(storage) => { if B::BF16_VEC { let data = unary_map_vec(storage, layout, B::bf16, B::bf16_vec); Ok(Self::BF16(data)) } else { let data = unary_map(storage, layout, B::bf16); Ok(Self::BF16(data)) } } Self::F16(storage) => { if B::F16_VEC { let data = unary_map_vec(storage, layout, B::f16, B::f16_vec); Ok(Self::F16(data)) } else { let data = unary_map(storage, layout, B::f16); Ok(Self::F16(data)) } } Self::F32(storage) => { if B::F32_VEC { let data = unary_map_vec(storage, layout, B::f32, B::f32_vec); Ok(Self::F32(data)) } else { let data = unary_map(storage, layout, B::f32); Ok(Self::F32(data)) } } Self::F64(storage) => { if B::F64_VEC { let data = unary_map_vec(storage, layout, B::f64, B::f64_vec); Ok(Self::F64(data)) } else { let data = unary_map(storage, layout, B::f64); Ok(Self::F64(data)) } } Self::U8(storage) => { let data = unary_map(storage, layout, B::u8); Ok(Self::U8(data)) } Self::U32(storage) => { let data = unary_map(storage, layout, B::u32); Ok(Self::U32(data)) } Self::I64(storage) => { let data = unary_map(storage, layout, B::i64); Ok(Self::I64(data)) } } } fn binary_impl<B: BinaryOpT>( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { match (self, rhs) { (Self::BF16(lhs), Self::BF16(rhs)) => { let data = if B::BF16_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::bf16, B::bf16_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::bf16) }; Ok(Self::BF16(data)) } (Self::F16(lhs), Self::F16(rhs)) => { let data = if B::F16_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f16, B::f16_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f16) }; Ok(Self::F16(data)) } (Self::F32(lhs), Self::F32(rhs)) => { let data = if B::F32_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f32, B::f32_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f32) }; Ok(Self::F32(data)) } (Self::F64(lhs), Self::F64(rhs)) => { let data = if B::F64_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f64, B::f64_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f64) }; Ok(Self::F64(data)) } (Self::U32(lhs), Self::U32(rhs)) => { let data = if B::U32_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::u32, B::u32_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::u32) }; Ok(Self::U32(data)) } (Self::I64(lhs), Self::I64(rhs)) => { let data = if B::I64_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::i64, B::i64_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::i64) }; Ok(Self::I64(data)) } (Self::U8(lhs), Self::U8(rhs)) => { let data = if B::U8_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::u8, B::u8_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::u8) }; Ok(Self::U8(data)) } _ => { // This should be covered by the dtype check above. Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: rhs.dtype(), op: B::NAME, } .bt()) } } } fn copy2d( &self, dst: &mut Self, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o: usize, dst_o: usize, ) -> Result<()> { match (self, dst) { (Self::U8(src), Self::U8(dst)) => copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o), (Self::U32(src), Self::U32(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::I64(src), Self::I64(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::BF16(src), Self::BF16(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::F16(src), Self::F16(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::F32(src), Self::F32(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (Self::F64(src), Self::F64(dst)) => { copy2d_(src, dst, d1, d2, src_s, dst_s, src_o, dst_o) } (_, dst) => { return Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: dst.dtype(), op: "copy2d", } .bt()); } } Ok(()) } fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { match (self, dst) { (Self::U8(src), Self::U8(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::U32(src), Self::U32(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::I64(src), Self::I64(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::BF16(src), Self::BF16(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F16(src), Self::F16(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F32(src), Self::F32(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F64(src), Self::F64(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (_, dst) => { // This should be covered by the dtype check above. return Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: dst.dtype(), op: "copy_strided", } .bt()); } } Ok(()) } fn where_cond( &self, layout: &Layout, t: &Self, t_l: &Layout, f: &Self, f_l: &Layout, ) -> Result<Self> { match self { Self::U8(pred) => WCond(pred, layout).map(t, t_l, f, f_l), Self::U32(pred) => WCond(pred, layout).map(t, t_l, f, f_l), Self::I64(pred) => WCond(pred, layout).map(t, t_l, f, f_l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "where-cond")), } } fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { if !USE_IM2COL_CONV1D { return Conv1D(params).map(self, l, kernel, kernel_l); } let op = Im2Col1D { l_k: params.k_size, padding: params.padding, stride: params.stride, dilation: params.dilation, }; let col = op.map(self, l)?; let b = params.b_size; let n = params.c_out; let l_out = params.l_out(); let k = op.l_k * params.c_in; let m = l_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = unsafe { self.device() .alloc_uninit(kernel_l.shape(), kernel.dtype())? }; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, l_out, params.c_out)).transpose(1, 2)?; let mut res_t = unsafe { self.device().alloc_uninit(res_l.shape(), res.dtype())? }; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { let can_use_col2im = kernel_l.is_contiguous() && params.dilation == 1 && params.padding == 0 && params.output_padding == 0; if USE_COL2IM_CONV1D_TR && can_use_col2im { let (b_size, c_in, l_in) = l.shape().dims3()?; let (c_in2, c_out, k_size) = kernel_l.shape().dims3()?; if !kernel_l.is_contiguous() { crate::bail!( "convtr1d: the second argument (kernel) has to be contiguous {kernel_l:?}" ) } if c_in != c_in2 { crate::bail!( "convtr1d: shape mismatch on c_in {:?} {:?}", l.shape(), kernel_l.shape() ) } let col = { // This merges the last two dimensions of the kernel together. let kernel_l_mm = Layout::new( (b_size, c_in, k_size * c_out).into(), vec![0, k_size * c_out, 1], kernel_l.start_offset(), ); self.matmul( kernel, ( b_size, /* m */ l_in, /* n */ c_out * k_size, /* k */ c_in, ), &l.transpose(1, 2)?, &kernel_l_mm, )? }; let col_l = Layout::contiguous((b_size, l_in, c_out, k_size)); Col2Im1D { stride: params.stride, } .map(&col, &col_l) } else { ConvTranspose1D(params).map(self, l, kernel, kernel_l) } } fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { if !USE_IM2COL_CONV2D { return Conv2D(params).map(self, l, kernel, kernel_l); } let op = Im2Col { h_k: params.k_h, w_k: params.k_w, padding: params.padding, stride: params.stride, dilation: params.dilation, }; let col = op.map(self, l)?; let b = params.b_size; let n = params.c_out; let (h_out, w_out) = (params.out_h(), params.out_w()); let k = op.h_k * op.w_k * params.c_in; let m = h_out * w_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = unsafe { self.device() .alloc_uninit(kernel_l.shape(), kernel.dtype())? }; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, h_out, w_out, params.c_out)) .transpose(1, 2)? .transpose(1, 3)?; let mut res_t = unsafe { self.device().alloc_uninit(res_l.shape(), res.dtype())? }; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { ConvTranspose2D(params).map(self, l, kernel, kernel_l) } fn index_select(&self, ids: &Self, l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> { match ids { Self::U8(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), Self::U32(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), Self::I64(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "index-select").bt()), } } fn gather(&self, l: &Layout, ids: &Self, ids_l: &Layout, dim: usize) -> Result<Self> { match ids { Self::U8(ids) => Gather { ids, ids_l, dim }.map(self, l), Self::U32(ids) => Gather { ids, ids_l, dim }.map(self, l), Self::I64(ids) => Gather { ids, ids_l, dim }.map(self, l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "gather").bt()), } } fn scatter_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { match ids { Self::U8(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), Self::U32(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), Self::I64(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "scatter-add").bt()), } } fn index_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { match ids { Self::U8(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } Self::U32(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } Self::I64(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "index-add").bt()), } } fn matmul( &self, rhs: &Self, bmnk: (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { MatMul(bmnk).map(self, lhs_l, rhs, rhs_l) } fn device(&self) -> &Self::Device { &CpuDevice } fn try_clone(&self, _: &Layout) -> Result<Self> { Ok(self.clone()) } fn to_cpu_storage(&self) -> Result<CpuStorage> { Ok(self.clone()) } } impl BackendDevice for CpuDevice { type Storage = CpuStorage; fn location(&self) -> crate::DeviceLocation { crate::DeviceLocation::Cpu } fn same_device(&self, _: &Self) -> bool { true } fn storage_from_slice<T: crate::WithDType>(&self, s: &[T]) -> Result<Self::Storage> { Ok(T::to_cpu_storage(s)) } fn storage_from_cpu_storage(&self, s: &CpuStorage) -> Result<Self::Storage> { Ok(s.clone()) } fn storage_from_cpu_storage_owned(&self, s: CpuStorage) -> Result<Self::Storage> { Ok(s) } fn new(_: usize) -> Result<Self> { Ok(Self) } fn set_seed(&self, _seed: u64) -> Result<()> { crate::bail!("cannot seed the CPU rng with set_seed") } fn rand_uniform(&self, shape: &Shape, dtype: DType, min: f64, max: f64) -> Result<CpuStorage> { use rand::prelude::*; let elem_count = shape.elem_count(); let mut rng = rand::thread_rng(); match dtype { DType::U8 | DType::U32 | DType::I64 => { Err(Error::UnsupportedDTypeForOp(dtype, "rand_uniform").bt()) } DType::BF16 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(bf16::from_f64(min), bf16::from_f64(max)); for _i in 0..elem_count { data.push(rng.sample::<bf16, _>(uniform)) } Ok(CpuStorage::BF16(data)) } DType::F16 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(f16::from_f64(min), f16::from_f64(max)); for _i in 0..elem_count { data.push(rng.sample::<f16, _>(uniform)) } Ok(CpuStorage::F16(data)) } DType::F32 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(min as f32, max as f32); for _i in 0..elem_count { data.push(rng.sample::<f32, _>(uniform)) } Ok(CpuStorage::F32(data)) } DType::F64 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(min, max); for _i in 0..elem_count { data.push(rng.sample::<f64, _>(uniform)) } Ok(CpuStorage::F64(data)) } } } fn rand_normal(&self, shape: &Shape, dtype: DType, mean: f64, std: f64) -> Result<CpuStorage> { use rand::prelude::*; let elem_count = shape.elem_count(); let mut rng = rand::thread_rng(); match dtype { DType::U8 | DType::U32 | DType::I64 => { Err(Error::UnsupportedDTypeForOp(dtype, "rand_normal").bt()) } DType::BF16 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(bf16::from_f64(mean), bf16::from_f64(std)) .map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::BF16(data)) } DType::F16 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(f16::from_f64(mean), f16::from_f64(std)) .map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F16(data)) } DType::F32 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(mean as f32, std as f32).map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F32(data)) } DType::F64 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(mean, std).map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F64(data)) } } } #[allow(clippy::uninit_vec)] unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<CpuStorage> { let elem_count = shape.elem_count(); // The code below is highly unsafe but hopefully not directly unsound as we only consider // types that are Copy, not Drop, and for which all bit patterns are proper values. // It's still pretty risky, see the following for more details: // https://github.com/rust-lang/rust-clippy/issues/4483 let storage = match dtype { DType::U8 => { let mut v = Vec::with_capacity(elem_count); v.set_len(elem_count); CpuStorage::U8(v) } DType::U32 => { let mut v = Vec::with_capacity(elem_count); v.set_len(elem_count); CpuStorage::U32(v) } DType::I64 => { let mut v = Vec::with_capacity(elem_count); v.set_len(elem_count); CpuStorage::I64(v) } DType::BF16 => { let mut v = Vec::with_capacity(elem_count); v.set_len(elem_count); CpuStorage::BF16(v) } DType::F16 => { let mut v = Vec::with_capacity(elem_count); v.set_len(elem_count); CpuStorage::F16(v) } DType::F32 => { let mut v = Vec::with_capacity(elem_count); v.set_len(elem_count); CpuStorage::F32(v) } DType::F64 => { let mut v = Vec::with_capacity(elem_count); v.set_len(elem_count); CpuStorage::F64(v) } }; Ok(storage) } fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<CpuStorage> { let elem_count = shape.elem_count(); let storage = match dtype { DType::U8 => CpuStorage::U8(vec![1u8; elem_count]), DType::U32 => CpuStorage::U32(vec![1u32; elem_count]), DType::I64 => CpuStorage::I64(vec![1i64; elem_count]), DType::BF16 => CpuStorage::BF16(vec![bf16::ONE; elem_count]), DType::F16 => CpuStorage::F16(vec![f16::ONE; elem_count]), DType::F32 => CpuStorage::F32(vec![1f32; elem_count]), DType::F64 => CpuStorage::F64(vec![1f64; elem_count]), }; Ok(storage) } fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<CpuStorage> { let elem_count = shape.elem_count(); let storage = match dtype { DType::U8 => CpuStorage::U8(vec![0u8; elem_count]), DType::U32 => CpuStorage::U32(vec![0u32; elem_count]), DType::I64 => CpuStorage::I64(vec![0i64; elem_count]), DType::BF16 => CpuStorage::BF16(vec![bf16::ZERO; elem_count]), DType::F16 => CpuStorage::F16(vec![f16::ZERO; elem_count]), DType::F32 => CpuStorage::F32(vec![0f32; elem_count]), DType::F64 => CpuStorage::F64(vec![0f64; elem_count]), }; Ok(storage) } fn synchronize(&self) -> Result<()> { Ok(()) } } #[macro_export] macro_rules! map_dtype { ($name:expr, $storage:ident, $fn:expr, ($($dtypes:ident),+)) => { match $storage { $(CpuStorage::$dtypes(__e) => CpuStorage::$dtypes($fn(__e)),)* s => Err(Error::UnsupportedDTypeForOp(s.dtype(), $name).bt())?, } }; }
candle/candle-core/src/cpu_backend/mod.rs/0
{ "file_path": "candle/candle-core/src/cpu_backend/mod.rs", "repo_id": "candle", "token_count": 64089 }
//! ML framework for Rust //! //! ```rust //! use candle_core::{Tensor, DType, Device}; //! # use candle_core::Error; //! # fn main() -> Result<(), Error>{ //! //! let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?; //! let b = Tensor::arange(0f32, 12f32, &Device::Cpu)?.reshape((3, 4))?; //! let c = a.matmul(&b)?; //! //! # Ok(())} //! ``` //! //! ## Features //! //! - Simple syntax (looks and feels like PyTorch) //! - CPU and Cuda backends (and M1 support) //! - Enable serverless (CPU) small and fast deployments //! - Model training //! - Distributed computing (NCCL). //! - Models out of the box (Llama, Whisper, Falcon, ...) //! //! ## FAQ //! //! - Why Candle? //! //! Candle stems from the need to reduce binary size in order to *enable serverless* //! possible by making the whole engine smaller than PyTorch very large library volume //! //! And simply *removing Python* from production workloads. //! Python can really add overhead in more complex workflows and the [GIL](https://www.backblaze.com/blog/the-python-gil-past-present-and-future/) is a notorious source of headaches. //! //! Rust is cool, and a lot of the HF ecosystem already has Rust crates [safetensors](https://github.com/huggingface/safetensors) and [tokenizers](https://github.com/huggingface/tokenizers) //! //! ## Other Crates //! //! Candle consists of a number of crates. This crate holds core the common data structures but you may wish //! to look at the docs for the other crates which can be found here: //! //! - [candle-core](https://docs.rs/candle-core/). Core Datastructures and DataTypes. //! - [candle-nn](https://docs.rs/candle-nn/). Building blocks for Neural Nets. //! - [candle-datasets](https://docs.rs/candle-datasets/). Rust access to commonly used Datasets like MNIST. //! - [candle-examples](https://docs.rs/candle-examples/). Examples of Candle in Use. //! - [candle-onnx](https://docs.rs/candle-onnx/). Loading and using ONNX models. //! - [candle-pyo3](https://docs.rs/candle-pyo3/). Access to Candle from Python. //! - [candle-transformers](https://docs.rs/candle-transformers/). Candle implemntation of many published transformer models. //! #[cfg(feature = "accelerate")] mod accelerate; pub mod backend; pub mod backprop; pub mod conv; mod convert; pub mod cpu; pub mod cpu_backend; #[cfg(feature = "cuda")] pub mod cuda_backend; mod custom_op; mod device; pub mod display; mod dtype; pub mod dummy_cuda_backend; mod dummy_metal_backend; pub mod error; mod indexer; pub mod layout; #[cfg(feature = "metal")] pub mod metal_backend; #[cfg(feature = "mkl")] mod mkl; pub mod npy; pub mod op; pub mod pickle; pub mod quantized; pub mod safetensors; pub mod scalar; pub mod shape; mod sort; mod storage; pub mod streaming; mod strided_index; mod tensor; mod tensor_cat; pub mod test_utils; pub mod utils; mod variable; #[cfg(feature = "cudnn")] pub use cuda_backend::cudnn; pub use cpu_backend::{CpuStorage, CpuStorageRef}; pub use custom_op::{CustomOp1, CustomOp2, CustomOp3, InplaceOp1, InplaceOp2, InplaceOp3, UgIOp1}; pub use device::{Device, DeviceLocation, NdArray}; pub use dtype::{DType, DTypeParseError, FloatDType, IntDType, WithDType}; pub use error::{Context, Error, Result}; pub use indexer::{IndexOp, TensorIndexer}; pub use layout::Layout; pub use shape::{Shape, D}; pub use storage::Storage; pub use streaming::{StreamTensor, StreamingBinOp, StreamingModule}; pub use strided_index::{StridedBlocks, StridedIndex}; pub use tensor::{Tensor, TensorId}; pub use variable::Var; #[cfg(feature = "cuda")] pub use cuda_backend as cuda; #[cfg(not(feature = "cuda"))] pub use dummy_cuda_backend as cuda; pub use cuda::{CudaDevice, CudaStorage}; #[cfg(feature = "metal")] pub use metal_backend::{MetalDevice, MetalError, MetalStorage}; #[cfg(not(feature = "metal"))] pub use dummy_metal_backend::{MetalDevice, MetalError, MetalStorage}; #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; pub trait ToUsize2 { fn to_usize2(self) -> (usize, usize); } impl ToUsize2 for usize { fn to_usize2(self) -> (usize, usize) { (self, self) } } impl ToUsize2 for (usize, usize) { fn to_usize2(self) -> (usize, usize) { self } } /// Defining a module with forward method using a single argument. pub trait Module { fn forward(&self, xs: &Tensor) -> Result<Tensor>; } impl<T: Fn(&Tensor) -> Result<Tensor>> Module for T { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self(xs) } } impl<M: Module> Module for Option<&M> { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { None => Ok(xs.clone()), Some(m) => m.forward(xs), } } } /// A single forward method using a single single tensor argument and a flag to /// separate the training and evaluation behaviors. pub trait ModuleT { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor>; } impl<M: Module> ModuleT for M { fn forward_t(&self, xs: &Tensor, _train: bool) -> Result<Tensor> { self.forward(xs) } }
candle/candle-core/src/lib.rs/0
{ "file_path": "candle/candle-core/src/lib.rs", "repo_id": "candle", "token_count": 1894 }
use super::k_quants::{ BlockQ2K, BlockQ3K, BlockQ4K, BlockQ4_0, BlockQ5K, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K, }; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; #[allow(unused_imports)] #[cfg(target_arch = "arm")] use core::arch::arm::*; #[allow(unused_imports)] #[cfg(target_arch = "aarch64")] use core::arch::aarch64::*; #[inline(always)] unsafe fn vdotq_s32(a: int8x16_t, b: int8x16_t) -> int32x4_t { // TODO: dotprod let p0 = vmull_s8(vget_low_s8(a), vget_low_s8(b)); let p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b)); vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)) } #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; let nb = n / qk; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut sumv0 = vdupq_n_f32(0.0f32); for i in 0..nb { let x0 = &xs[i]; let y0 = &ys[i]; let m4b = vdupq_n_u8(0x0F); let s8b = vdupq_n_s8(0x8); let v0_0 = vld1q_u8(x0.qs.as_ptr()); // 4-bit -> 8-bit let v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); let v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); // sub 8 let v0_0ls = vsubq_s8(v0_0l, s8b); let v0_0hs = vsubq_s8(v0_0h, s8b); // load y let v1_0l = vld1q_s8(y0.qs.as_ptr()); let v1_0h = vld1q_s8(y0.qs.as_ptr().add(16)); let pl0 = vdotq_s32(v0_0ls, v1_0l); let ph0 = vdotq_s32(v0_0hs, v1_0h); sumv0 = vmlaq_n_f32( sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0.d.to_f32() * y0.d.to_f32(), ); } Ok(vaddvq_f32(sumv0)) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } let nb = n / QK8_0; unsafe { let mut sumv0 = vdupq_n_f32(0.0f32); for i in 0..nb { let x0 = &xs[i]; let y0 = &ys[i]; let x0_0 = vld1q_s8(x0.qs.as_ptr()); let x0_1 = vld1q_s8(x0.qs.as_ptr().add(16)); // load y let y0_0 = vld1q_s8(y0.qs.as_ptr()); let y0_1 = vld1q_s8(y0.qs.as_ptr().add(16)); let p0 = vdotq_s32(x0_0, y0_0); let p1 = vdotq_s32(x0_1, y0_1); sumv0 = vmlaq_n_f32( sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), x0.d.to_f32() * y0.d.to_f32(), ); } Ok(vaddvq_f32(sumv0)) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { unsafe { let mut sum_i = vdupq_n_s32(0); let scale = xs.d * ys.d; let xs = xs.qs.as_ptr(); let ys = ys.qs.as_ptr(); for i in (0..QK_K).step_by(16) { let xs = vld1q_s8(xs.add(i)); let ys = vld1q_s8(ys.add(i)); let xy = vdotq_s32(xs, ys); sum_i = vaddq_s32(sum_i, xy) } sumf += vaddvq_s32(sum_i) as f32 * scale } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut sum = 0f32; unsafe { let m4b = vdupq_n_u8(0xF); let mone = vdupq_n_u8(3); for (x, y) in xs.iter().zip(ys.iter()) { let d_all = x.d.to_f32(); let mut q6 = x.ql.as_ptr(); let mut qh = x.qh.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut scale = x.scales.as_ptr(); let q8sums = vld1q_s16_x2(y.bsums.as_ptr()); let scales = vld1q_s8(scale); let q6scales = int16x8x2_t( vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales)), ); let prod = vaddq_s32( vaddq_s32( vmull_s16(vget_low_s16(q8sums.0), vget_low_s16(q6scales.0)), vmull_s16(vget_high_s16(q8sums.0), vget_high_s16(q6scales.0)), ), vaddq_s32( vmull_s16(vget_low_s16(q8sums.1), vget_low_s16(q6scales.1)), vmull_s16(vget_high_s16(q8sums.1), vget_high_s16(q6scales.1)), ), ); let isum_mins = vaddvq_s32(prod); let mut isum = 0i32; for _j in 0..QK_K / 128 { let qhbits = vld1q_u8_x2(qh); qh = qh.add(32); let q6bits = vld1q_u8_x4(q6); q6 = q6.add(64); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let q6h_0 = vshlq_n_u8(vandq_u8(mone, qhbits.0), 4); let q6h_1 = vshlq_n_u8(vandq_u8(mone, qhbits.1), 4); let shifted = vshrq_n_u8(qhbits.0, 2); let q6h_2 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 2); let q6h_3 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let q6bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.0, m4b), q6h_0)); let q6bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.1, m4b), q6h_1)); let q6bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.2, m4b), q6h_2)); let q6bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.3, m4b), q6h_3)); let p0 = vdotq_s32(q6bytes_0, q8bytes.0); let p1 = vdotq_s32(q6bytes_1, q8bytes.1); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s32(p0) * scale0 + vaddvq_s32(p1) * scale1; scale = scale.add(2); let p2 = vdotq_s32(q6bytes_2, q8bytes.2); let p3 = vdotq_s32(q6bytes_3, q8bytes.3); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s32(p2) * scale0 + vaddvq_s32(p3) * scale1; scale = scale.add(2); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let shifted = vshrq_n_u8(qhbits.0, 4); let q6h_0 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 4); let q6h_1 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.0, 6); let q6h_2 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 6); let q6h_3 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let q6bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.0, 4), q6h_0)); let q6bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.1, 4), q6h_1)); let q6bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.2, 4), q6h_2)); let q6bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.3, 4), q6h_3)); let p0 = vdotq_s32(q6bytes_0, q8bytes.0); let p1 = vdotq_s32(q6bytes_1, q8bytes.1); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s32(p0) * scale0 + vaddvq_s32(p1) * scale1; scale = scale.add(2); let p2 = vdotq_s32(q6bytes_2, q8bytes.2); let p3 = vdotq_s32(q6bytes_3, q8bytes.3); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s32(p2) * scale0 + vaddvq_s32(p3) * scale1; scale = scale.add(2); } sum += d_all * y.d * ((isum - 32 * isum_mins) as f32); } } Ok(sum) } #[inline(always)] pub(crate) fn vec_dot_q5k_q8k(n: usize, xs: &[BlockQ5K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4b = vdupq_n_u8(0xF); let mone = vdupq_n_u8(1); let mtwo = vdupq_n_u8(2); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let q8sums = vpaddq_s16( vld1q_s16(y.bsums.as_ptr()), vld1q_s16(y.bsums.as_ptr().add(8)), ); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; let mins8 = vld1_u8((utmp.as_ptr() as *const u8).add(8)); let mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); let prod = vaddq_s32( vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)), ); let sumi_mins = vaddvq_s32(prod); let mut scales = utmp.as_ptr() as *const u8; let mut q5 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut qhbits = vld1q_u8_x2(x.qh.as_ptr()); let mut sumi = 0i32; for _j in 0..QK_K / 64 { let q5bits = vld1q_u8_x2(q5); q5 = q5.add(32); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let q5h_0 = vshlq_n_u8(vandq_u8(mone, qhbits.0), 4); let q5h_1 = vshlq_n_u8(vandq_u8(mone, qhbits.1), 4); let q5h_2 = vshlq_n_u8(vandq_u8(mtwo, qhbits.0), 3); let q5h_3 = vshlq_n_u8(vandq_u8(mtwo, qhbits.1), 3); qhbits.0 = vshrq_n_u8(qhbits.0, 2); qhbits.1 = vshrq_n_u8(qhbits.1, 2); let q5bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.0, m4b), q5h_0)); let q5bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.1, m4b), q5h_1)); let q5bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.0, 4), q5h_2)); let q5bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.1, 4), q5h_3)); let p0 = vdotq_s32(q5bytes_0, q8bytes.0); let p1 = vdotq_s32(q5bytes_1, q8bytes.1); sumi += vaddvq_s32(vaddq_s32(p0, p1)) * *scales as i32; scales = scales.add(1); let p2 = vdotq_s32(q5bytes_2, q8bytes.2); let p3 = vdotq_s32(q5bytes_3, q8bytes.3); sumi += vaddvq_s32(vaddq_s32(p2, p3)) * *scales as i32; scales = scales.add(1); } sumf += d * sumi as f32 - dmin * sumi_mins as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; let mut scales = [0u8; 16]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4b = vdupq_n_u8(0xF); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let q8sums = vpaddq_s16( vld1q_s16(y.bsums.as_ptr()), vld1q_s16(y.bsums.as_ptr().add(8)), ); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); let mins8 = vld1_u32( [ utmp[1] & KMASK1, ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4), ] .as_ptr(), ); utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[0] &= KMASK1; let mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); let prod = vaddq_s32( vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)), ); sumf -= dmin * vaddvq_s32(prod) as f32; LittleEndian::write_u32_into(&utmp, &mut scales); let mut q4 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut sumi1 = 0i32; let mut sumi2 = 0i32; for j in 0..QK_K / 64 { let q4bits = vld1q_u8_x2(q4); q4 = q4.add(32); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let q4bytes = int8x16x2_t( vreinterpretq_s8_u8(vandq_u8(q4bits.0, m4b)), vreinterpretq_s8_u8(vandq_u8(q4bits.1, m4b)), ); let p0 = vdotq_s32(q4bytes.0, q8bytes.0); let p1 = vdotq_s32(q4bytes.1, q8bytes.1); sumi1 += vaddvq_s32(vaddq_s32(p0, p1)) * scales[2 * j] as i32; let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let q4bytes = int8x16x2_t( vreinterpretq_s8_u8(vshrq_n_u8(q4bits.0, 4)), vreinterpretq_s8_u8(vshrq_n_u8(q4bits.1, 4)), ); let p2 = vdotq_s32(q4bytes.0, q8bytes.0); let p3 = vdotq_s32(q4bytes.1, q8bytes.1); sumi2 += vaddvq_s32(vaddq_s32(p2, p3)) * scales[2 * j + 1] as i32; } sumf += d * (sumi1 + sumi2) as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q3k_q8k(n: usize, xs: &[BlockQ3K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; let mut aux = [0u32; 3]; const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; unsafe { let m3b = vdupq_n_u8(0x3); let m0 = vdupq_n_u8(1); let m1 = vshlq_n_u8(m0, 1); let m2 = vshlq_n_u8(m0, 2); let m3 = vshlq_n_u8(m0, 3); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let mut q3 = x.qs.as_ptr(); let qh = x.hmask.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut qhbits = vld1q_u8_x2(qh); let mut isum = 0i32; // Set up scales LittleEndian::read_u32_into(&x.scales, &mut aux); utmp[3] = ((aux[1] >> 4) & KMASK2) | (((aux[2] >> 6) & KMASK1) << 4); utmp[2] = ((aux[0] >> 4) & KMASK2) | (((aux[2] >> 4) & KMASK1) << 4); utmp[1] = (aux[1] & KMASK2) | (((aux[2] >> 2) & KMASK1) << 4); utmp[0] = (aux[0] & KMASK2) | ((aux[2] & KMASK1) << 4); let mut scale = utmp.as_mut_ptr() as *mut i8; for j in 0..16 { *scale.add(j) -= 32i8 } for j in 0..QK_K / 128 { let q3bits = vld1q_u8_x2(q3); q3 = q3.add(32); let q8bytes_1 = vld1q_s8_x4(q8); q8 = q8.add(64); let q8bytes_2 = vld1q_s8_x4(q8); q8 = q8.add(64); let q3h_0 = vshlq_n_u8(vbicq_u8(m0, qhbits.0), 2); let q3h_1 = vshlq_n_u8(vbicq_u8(m0, qhbits.1), 2); let q3h_2 = vshlq_n_u8(vbicq_u8(m1, qhbits.0), 1); let q3h_3 = vshlq_n_u8(vbicq_u8(m1, qhbits.1), 1); let q3bytes_0 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(q3bits.0, m3b)), vreinterpretq_s8_u8(q3h_0), ); let q3bytes_1 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(q3bits.1, m3b)), vreinterpretq_s8_u8(q3h_1), ); let q3bytes_2 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 2), m3b)), vreinterpretq_s8_u8(q3h_2), ); let q3bytes_3 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 2), m3b)), vreinterpretq_s8_u8(q3h_3), ); let p0 = vdotq_s32(q3bytes_0, q8bytes_1.0); let p1 = vdotq_s32(q3bytes_1, q8bytes_1.1); let p2 = vdotq_s32(q3bytes_2, q8bytes_1.2); let p3 = vdotq_s32(q3bytes_3, q8bytes_1.3); isum += vaddvq_s32(p0) * *scale as i32 + vaddvq_s32(p1) * *scale.add(1) as i32 + vaddvq_s32(p2) * *scale.add(2) as i32 + vaddvq_s32(p3) * *scale.add(3) as i32; scale = scale.add(4); let q3h_0 = vbicq_u8(m2, qhbits.0); let q3h_1 = vbicq_u8(m2, qhbits.1); let q3h_2 = vshrq_n_u8(vbicq_u8(m3, qhbits.0), 1); let q3h_3 = vshrq_n_u8(vbicq_u8(m3, qhbits.1), 1); let q3bytes_0 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 4), m3b)), vreinterpretq_s8_u8(q3h_0), ); let q3bytes_1 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 4), m3b)), vreinterpretq_s8_u8(q3h_1), ); let q3bytes_2 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 6), m3b)), vreinterpretq_s8_u8(q3h_2), ); let q3bytes_3 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 6), m3b)), vreinterpretq_s8_u8(q3h_3), ); let p0 = vdotq_s32(q3bytes_0, q8bytes_2.0); let p1 = vdotq_s32(q3bytes_1, q8bytes_2.1); let p2 = vdotq_s32(q3bytes_2, q8bytes_2.2); let p3 = vdotq_s32(q3bytes_3, q8bytes_2.3); isum += vaddvq_s32(p0) * *scale as i32 + vaddvq_s32(p1) * *scale.add(1) as i32 + vaddvq_s32(p2) * *scale.add(2) as i32 + vaddvq_s32(p3) * *scale.add(3) as i32; scale = scale.add(4); if j == 0 { qhbits.0 = vshrq_n_u8(qhbits.0, 4); qhbits.1 = vshrq_n_u8(qhbits.1, 4); } } sumf += d * isum as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut aux = [0u8; 16]; unsafe { let m3 = vdupq_n_u8(0x3); let m4 = vdupq_n_u8(0xF); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); let mut q2 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let sc = x.scales.as_ptr(); let mins_and_scales = vld1q_u8(sc); let scales = vandq_u8(mins_and_scales, m4); vst1q_u8(aux.as_mut_ptr(), scales); let mins = vshrq_n_u8(mins_and_scales, 4); let q8sums = vld1q_s16_x2(y.bsums.as_ptr()); let mins16 = int16x8x2_t( vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins))), ); let s0 = vaddq_s32( vmull_s16(vget_low_s16(mins16.0), vget_low_s16(q8sums.0)), vmull_s16(vget_high_s16(mins16.0), vget_high_s16(q8sums.0)), ); let s1 = vaddq_s32( vmull_s16(vget_low_s16(mins16.1), vget_low_s16(q8sums.1)), vmull_s16(vget_high_s16(mins16.1), vget_high_s16(q8sums.1)), ); sumf += dmin * vaddvq_s32(vaddq_s32(s0, s1)) as f32; let mut isum = 0i32; let mut is = 0usize; // TODO: dotprod for _j in 0..QK_K / 128 { let q2bits = vld1q_u8_x2(q2); q2 = q2.add(32); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let mut q2bytes = int8x16x2_t( vreinterpretq_s8_u8(vandq_u8(q2bits.0, m3)), vreinterpretq_s8_u8(vandq_u8(q2bits.1, m3)), ); isum += multiply_accum_with_scale(&aux, is, 0, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 2), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 2), m3)); isum += multiply_accum_with_scale(&aux, is, 2, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 4), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 4), m3)); isum += multiply_accum_with_scale(&aux, is, 4, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 6), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 6), m3)); isum += multiply_accum_with_scale(&aux, is, 6, q2bytes, q8bytes); is += 8; } sumf += d * isum as f32; } } Ok(sumf) } #[inline(always)] unsafe fn multiply_accum_with_scale( aux: &[u8; 16], is: usize, index: usize, q2bytes: int8x16x2_t, q8bytes: int8x16x2_t, ) -> i32 { let p1 = vdotq_s32(q2bytes.0, q8bytes.0); let p2 = vdotq_s32(q2bytes.1, q8bytes.1); vaddvq_s32(p1) * aux[is + index] as i32 + vaddvq_s32(p2) * aux[is + 1 + index] as i32 }
candle/candle-core/src/quantized/neon.rs/0
{ "file_path": "candle/candle-core/src/quantized/neon.rs", "repo_id": "candle", "token_count": 15290 }
use candle_core::backend::BackendStorage; use candle_core::cpu_backend; use candle_core::test_utils::to_vec1_round; use candle_core::{CpuStorage, CustomOp1, DType, Device, Error, Layout, Result, Shape, Tensor}; fn fwd<T: num_traits::Float>(v: T, alpha: f64) -> T { if v.is_sign_positive() { v } else { let alpha = T::from(alpha).unwrap_or(T::nan()); (v.exp() - T::one()) * alpha } } struct Elu { alpha: f64, } impl CustomOp1 for Elu { fn name(&self) -> &'static str { "elu" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { let storage = candle_core::map_dtype!( "elu", s, |s| cpu_backend::unary_map(s, l, |v| fwd(v, self.alpha)), (BF16, F16, F32, F64) ); Ok((storage, l.shape().clone())) } } #[test] fn custom_op1_no_backward() -> Result<()> { let cpu = &Device::Cpu; let t = Tensor::arange(0u32, 12u32, cpu)?.to_dtype(DType::F32)?; let t = (t - 5.)?; let elu_t = t.apply_op1_no_bwd(&Elu { alpha: 1. })?; assert_eq!( to_vec1_round(&elu_t, 4)?, &[-0.9933, -0.9817, -0.9502, -0.8647, -0.6321, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0] ); Ok(()) } // Define a similar struct as Elu but with backward support. fn bwd<T: num_traits::Float>(v: T, alpha: f64) -> T { if v.is_sign_positive() { T::one() } else { let alpha = T::from(alpha).unwrap_or(T::nan()); v.exp() * alpha } } struct EluBackward { alpha: f64, } impl CustomOp1 for EluBackward { fn name(&self) -> &'static str { "elu-bwd" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { let storage = candle_core::map_dtype!( "elu-bwd", s, |s| cpu_backend::unary_map(s, l, |v| bwd(v, self.alpha)), (BF16, F16, F32, F64) ); Ok((storage, l.shape().clone())) } } struct EluWithBackward(Elu); impl EluWithBackward { fn new(alpha: f64) -> Self { Self(Elu { alpha }) } } impl CustomOp1 for EluWithBackward { fn name(&self) -> &'static str { "elu" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { self.0.cpu_fwd(s, l) } fn bwd(&self, arg: &Tensor, _res: &Tensor, grad_res: &Tensor) -> Result<Option<Tensor>> { let alpha = self.0.alpha; let bwd = arg.apply_op1(EluBackward { alpha })?; Ok(Some(grad_res.mul(&bwd)?)) } } #[test] fn custom_op1_with_backward() -> Result<()> { let cpu = &Device::Cpu; let t = candle_core::Var::new(&[-2f32, 0f32, 2f32], cpu)?; let elu_t = t.apply_op1(EluWithBackward::new(2.))?; assert_eq!(to_vec1_round(&elu_t, 4)?, &[-1.7293, 0.0, 2.0]); let grads = elu_t.backward()?; let grad_x = grads.get(&t).unwrap(); assert_eq!(to_vec1_round(grad_x, 4)?, [0.2707, 1.0, 1.0]); Ok(()) } impl candle_core::InplaceOp1 for Elu { fn name(&self) -> &'static str { "elu" } fn cpu_fwd(&self, s: &mut CpuStorage, _l: &Layout) -> Result<()> { let alpha = self.alpha; match s { CpuStorage::BF16(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)), CpuStorage::F16(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)), CpuStorage::F32(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)), CpuStorage::F64(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)), _ => candle_core::bail!("unsupported dtype for inplace elu"), } Ok(()) } } #[test] fn inplace_op1() -> Result<()> { let cpu = &Device::Cpu; let t = Tensor::arange(0u32, 12u32, cpu)?.to_dtype(DType::F32)?; let t = (t - 5.)?; t.inplace_op1(&Elu { alpha: 1. })?; assert_eq!( to_vec1_round(&t, 4)?, &[-0.9933, -0.9817, -0.9502, -0.8647, -0.6321, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0] ); Ok(()) } #[cfg(any(feature = "cuda", feature = "metal"))] #[allow(clippy::approx_constant)] #[test] fn ug_op() -> Result<()> { let kernel = { use ug::lang::op; let layout = ug::Layout::from_shape(&[12]); let ptr = op::Arg::ptr(ug::DType::F32); let src = op::load(ptr.id(), layout.clone(), ug::DType::F32)?; let src = op::unary(op::UnaryOp::Exp, src)?; let st = op::store(ptr.id(), layout, src)?; let kernel = op::Kernel::new("exp".to_string(), vec![ptr], vec![st]); let opts: ug::lower_op::Opts = Default::default(); kernel.lower(&opts)? }; let device = if candle_core::utils::cuda_is_available() { Device::new_cuda(0)? } else if candle_core::utils::metal_is_available() { Device::new_metal(0)? } else { candle_core::bail!("metal/cuda is mandatory for this test") }; let op = candle_core::UgIOp1::new("test", kernel, &device)?; let t = Tensor::arange(0u32, 12u32, &device)?.to_dtype(DType::F32)?; t.inplace_op1(&op)?; assert_eq!( to_vec1_round(&t, 2)?, &[ 1.0, 2.72, 7.39, 20.09, 54.6, 148.41, 403.43, 1096.63, 2980.96, 8103.08, 22026.47, 59874.13 ] ); Ok(()) }
candle/candle-core/tests/custom_op_tests.rs/0
{ "file_path": "candle/candle-core/tests/custom_op_tests.rs", "repo_id": "candle", "token_count": 2721 }
use anyhow::{Error as E, Result}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::colpali::Model; use candle_transformers::models::{colpali, paligemma}; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use image::DynamicImage; use pdf2image::{RenderOptionsBuilder, PDF}; use tokenizers::Tokenizer; struct PageRetriever { model: Model, config: paligemma::Config, pdf: PDF, device: Device, tokenizer: Tokenizer, range: pdf2image::Pages, batch_size: usize, top_k: usize, } impl PageRetriever { fn new( model: Model, config: paligemma::Config, pdf: PDF, tokenizer: Tokenizer, device: &Device, range: Option<pdf2image::Pages>, batch_size: usize, top_k: usize, ) -> Self { let page_count = pdf.page_count(); Self { model, config, pdf, device: device.clone(), tokenizer, range: range.unwrap_or_else(|| pdf2image::Pages::Range(1..=page_count)), batch_size, top_k, } } fn get_images_from_pdf(&self) -> Result<Vec<DynamicImage>> { let pages = self .pdf .render(self.range.clone(), RenderOptionsBuilder::default().build()?)?; Ok(pages) } fn tokenize_batch(&self, prompts: Vec<&str>) -> Result<Tensor> { let tokens = self.tokenizer.encode_batch(prompts, true).map_err(E::msg)?; let token_ids = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Tensor::new(tokens.as_slice(), &self.device) }) .collect::<candle::Result<Vec<_>>>()?; let input = Tensor::stack(&token_ids, 0)?; Ok(input) } fn images_to_tensor( &self, pages: &[DynamicImage], image_size: usize, ) -> anyhow::Result<Tensor> { let mut images = vec![]; for page in pages.iter() { let img = page.resize_to_fill( image_size as u32, image_size as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let img = img.into_raw(); let img = Tensor::from_vec(img, (image_size, image_size, 3), &Device::Cpu)? .permute((2, 0, 1))? .to_dtype(DType::F32)? .affine(2. / 255., -1.)?; images.push(img); } let images = Tensor::stack(&images, 0)?; Ok(images) } fn retrieve(&mut self, prompt: &str) -> Result<Vec<usize>> { let dtype = if self.device.is_cuda() { DType::BF16 } else { DType::F32 }; let dummy_prompt: &str = "Describe the image"; let input = self.tokenize_batch(vec![prompt])?; let dummy_input = self.tokenize_batch(vec![dummy_prompt])?; let pages = self.get_images_from_pdf()?; let mut all_scores = Vec::new(); for batch in pages.chunks(self.batch_size) { let page_images = self .images_to_tensor(batch, self.config.vision_config.image_size)? .to_device(&self.device)? .to_dtype(dtype)?; let dummy_input = dummy_input.repeat((page_images.dims()[0], 0))?; let image_embeddings = self.model.forward_images(&page_images, &dummy_input)?; let text_embeddings = self.model.forward_text(&input)?; let scores = text_embeddings .unsqueeze(1)? .broadcast_matmul(&image_embeddings.unsqueeze(0)?.transpose(3, 2)?)? .max(3)? .sum(2)?; let batch_scores: Vec<f32> = scores .to_dtype(DType::F32)? .to_vec2()? .into_iter() .flatten() .collect(); all_scores.extend(batch_scores); } let mut indices: Vec<usize> = (0..all_scores.len()).collect(); indices.sort_by(|a, b| all_scores[*b].partial_cmp(&all_scores[*a]).unwrap()); let top_k_indices = indices[0..self.top_k].to_vec(); Ok(top_k_indices) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// number of top pages to show. #[arg(long, default_value_t = 3)] top_k: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "main")] revision: String, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, #[arg(long)] pdf: String, #[arg(long)] start: Option<u32>, #[arg(long)] end: Option<u32>, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); let api = Api::new()?; let model_id = match &args.model_id { Some(model_id) => model_id.to_string(), None => "vidore/colpali-v1.2-merged".to_string(), }; let repo = api.repo(Repo::with_revision( model_id, RepoType::Model, args.revision, )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => api .repo(Repo::with_revision( "vidore/colpali".to_string(), RepoType::Model, "main".to_string(), )) .get("tokenizer.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?, }; let start = std::time::Instant::now(); let config: paligemma::Config = paligemma::Config::paligemma_3b_448(); println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let device = candle_examples::device(false)?; let dtype = if device.is_cuda() { DType::BF16 } else { DType::F32 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let model = colpali::Model::new(&config, vb)?; let pdf = PDF::from_file(args.pdf)?; // check if start and end given in arg let range = if let (Some(start), Some(end)) = (args.start, args.end) { pdf2image::Pages::Range(start..=end) } else { pdf2image::Pages::Range(1..=pdf.page_count()) // can use pdf2image::Pages::All but there is a bug in the library which causes the first page to rendered twice. }; let mut retriever = PageRetriever::new(model, config, pdf, tokenizer, &device, Some(range), 4, 3); let top_k_indices = retriever.retrieve(&args.prompt)?; println!("Prompt: {}", args.prompt); println!( "top {} page numbers that contain similarity to the prompt", retriever.top_k ); println!("-----------------------------------"); for index in top_k_indices { println!("Page: {:?}", index + 1); } println!("-----------------------------------"); Ok(()) }
candle/candle-examples/examples/colpali/main.rs/0
{ "file_path": "candle/candle-examples/examples/colpali/main.rs", "repo_id": "candle", "token_count": 3864 }
# candle-flux: image generation with latent rectified flow transformers ![rusty robot holding a candle](./assets/flux-robot.jpg) Flux is a 12B rectified flow transformer capable of generating images from text descriptions, [huggingface](https://huggingface.co/black-forest-labs/FLUX.1-schnell), [github](https://github.com/black-forest-labs/flux), [blog post](https://blackforestlabs.ai/announcing-black-forest-labs/). ## Running the model ```bash cargo run --features cuda --example flux -r -- \ --height 1024 --width 1024 \ --prompt "a rusty robot walking on a beach holding a small torch, the robot has the word "rust" written on it, high quality, 4k" ```
candle/candle-examples/examples/flux/README.md/0
{ "file_path": "candle/candle-examples/examples/flux/README.md", "repo_id": "candle", "token_count": 216 }
# candle-jina-bert Jina-Bert is a general large language model with a context size of 8192, [model card](https://huggingface.co/jinaai/jina-embeddings-v2-base-en). In this example it can be used for two different tasks: - Compute sentence embeddings for a prompt. - Compute similarities between a set of sentences. ## Sentence embeddings Jina-Bert is used to compute the sentence embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash cargo run --example jina-bert --release -- --prompt "Here is a test sentence" > [[[ 0.1595, -0.9885, 0.6494, ..., 0.3003, -0.6901, -1.2355], > [ 0.0374, -0.1798, 1.3359, ..., 0.6731, 0.2133, -1.6807], > [ 0.1700, -0.8534, 0.8924, ..., -0.1785, -0.0727, -1.5087], > ... > [-0.3113, -1.3665, 0.2027, ..., -0.2519, 0.1711, -1.5811], > [ 0.0907, -1.0492, 0.5382, ..., 0.0242, -0.7077, -1.0830], > [ 0.0369, -0.6343, 0.6105, ..., 0.0671, 0.3778, -1.1505]]] > Tensor[[1, 7, 768], f32] ``` ## Similarities In this example, Jina-Bert is used to compute the sentence embeddings for a set of sentences (hardcoded in the examples). Then cosine similarities are computed for each sentence pair and they are reported by decreasing values, hence the first reported pair contains the two sentences that have the highest similarity score. The sentence embeddings are computed using average pooling through all the sentence tokens, including some potential padding. ```bash cargo run --example jina-bert --release > score: 0.94 'The new movie is awesome' 'The new movie is so great' > score: 0.81 'The cat sits outside' 'The cat plays in the garden' > score: 0.78 'I love pasta' 'Do you like pizza?' > score: 0.68 'I love pasta' 'The new movie is awesome' > score: 0.67 'A man is playing guitar' 'A woman watches TV' ```
candle/candle-examples/examples/jina-bert/README.md/0
{ "file_path": "candle/candle-examples/examples/jina-bert/README.md", "repo_id": "candle", "token_count": 663 }
# candle-mobilenetv4 [MobileNetV4 - Universal Models for the Mobile Ecosystem](https://arxiv.org/abs/2404.10518) This candle implementation uses pre-trained MobileNetV4 models from timm for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example mobilenetv4 --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which medium loaded image Tensor[dims 3, 256, 256; f32] model built unicycle, monocycle : 20.18% mountain bike, all-terrain bike, off-roader: 19.77% bicycle-built-for-two, tandem bicycle, tandem: 15.91% crash helmet : 1.15% tricycle, trike, velocipede: 0.67% ```
candle/candle-examples/examples/mobilenetv4/README.md/0
{ "file_path": "candle/candle-examples/examples/mobilenetv4/README.md", "repo_id": "candle", "token_count": 248 }
use anyhow::Result; use candle::{Device, Tensor}; use clap::{Parser, Subcommand}; #[derive(Subcommand, Debug, Clone)] enum Command { Print { #[arg(long)] file: String, }, SimpleEval { #[arg(long)] file: String, }, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct Args { #[command(subcommand)] command: Command, } pub fn main() -> Result<()> { let args = Args::parse(); match args.command { Command::Print { file } => { let model = candle_onnx::read_file(file)?; println!("{model:?}"); let graph = model.graph.unwrap(); for node in graph.node.iter() { println!("{node:?}"); } } Command::SimpleEval { file } => { let model = candle_onnx::read_file(file)?; let graph = model.graph.as_ref().unwrap(); let constants: std::collections::HashSet<_> = graph.initializer.iter().map(|i| i.name.as_str()).collect(); let mut inputs = std::collections::HashMap::new(); for input in graph.input.iter() { use candle_onnx::onnx::tensor_proto::DataType; if constants.contains(input.name.as_str()) { continue; } let type_ = input.r#type.as_ref().expect("no type for input"); let type_ = type_.value.as_ref().expect("no type.value for input"); let value = match type_ { candle_onnx::onnx::type_proto::Value::TensorType(tt) => { let dt = match DataType::try_from(tt.elem_type) { Ok(dt) => match candle_onnx::dtype(dt) { Some(dt) => dt, None => { anyhow::bail!( "unsupported 'value' data-type {dt:?} for {}", input.name ) } }, type_ => anyhow::bail!("unsupported input type {type_:?}"), }; let shape = tt.shape.as_ref().expect("no tensortype.shape for input"); let dims = shape .dim .iter() .map(|dim| match dim.value.as_ref().expect("no dim value") { candle_onnx::onnx::tensor_shape_proto::dimension::Value::DimValue(v) => Ok(*v as usize), candle_onnx::onnx::tensor_shape_proto::dimension::Value::DimParam(_) => Ok(42), }) .collect::<Result<Vec<usize>>>()?; Tensor::zeros(dims, dt, &Device::Cpu)? } type_ => anyhow::bail!("unsupported input type {type_:?}"), }; println!("input {}: {value:?}", input.name); inputs.insert(input.name.clone(), value); } let outputs = candle_onnx::simple_eval(&model, inputs)?; for (name, value) in outputs.iter() { println!("output {name}: {value:?}") } } } Ok(()) }
candle/candle-examples/examples/onnx_basics.rs/0
{ "file_path": "candle/candle-examples/examples/onnx_basics.rs", "repo_id": "candle", "token_count": 2016 }
# candle-repvgg [RepVGG: Making VGG-style ConvNets Great Again](https://arxiv.org/abs/2101.03697). This candle implementation uses a pre-trained RepVGG network for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example repvgg --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg loaded image Tensor[dims 3, 224, 224; f32] model built mountain bike, all-terrain bike, off-roader: 61.70% bicycle-built-for-two, tandem bicycle, tandem: 33.14% unicycle, monocycle : 4.88% crash helmet : 0.15% moped : 0.04% ```
candle/candle-examples/examples/repvgg/README.md/0
{ "file_path": "candle/candle-examples/examples/repvgg/README.md", "repo_id": "candle", "token_count": 254 }
# candle-whisper: speech recognition An implementation of [OpenAI Whisper](https://github.com/openai/whisper) using candle. Whisper is a general purpose speech recognition model, it can be used to convert audio files (in the `.wav` format) to text. Supported features include language detection as well as multilingual speech recognition. ## Running some example If no audio file is passed as input, a [sample file](https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_jfk.wav) is automatically downloaded from the hub. ```bash cargo run --example whisper --release --features="symphonia" > No audio file submitted: Downloading https://huggingface.co/datasets/Narsil/candle_demo/blob/main/samples_jfk.wav > loaded wav data: Header { audio_format: 1, channel_count: 1, sampling_rate: 16000, bytes_per_second: 32000, bytes_per_sample: 2, bits_per_sample: 16 } > pcm data loaded 176000 > loaded mel: [1, 80, 3000] > 0.0s -- 30.0s: And so my fellow Americans ask not what your country can do for you ask what you can do for your country ``` In order to use the multilingual mode, specify a multilingual model via the `--model` flag, see the details below. ## Command line flags - `--input`: the audio file to be converted to text, in wav format. - `--language`: force the language to some specific value rather than being detected, e.g. `en`. - `--task`: the task to be performed, can be `transcribe` (return the text data in the original language) or `translate` (translate the text to English). - `--timestamps`: enable the timestamp mode where some timestamps are reported for each recognized audio extracts. - `--model`: the model to be used. Models that do not end with `-en` are multilingual models, other ones are English only models. The supported OpenAI Whisper models are `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large`, `large-v2` and `large-v3`. The supported Distil-Whisper models are `distil-medium.en`, `distil-large-v2` and `distil-large-v3`.
candle/candle-examples/examples/whisper/README.md/0
{ "file_path": "candle/candle-examples/examples/whisper/README.md", "repo_id": "candle", "token_count": 627 }
[net] # Testing batch=1 subdivisions=1 # Training # batch=64 # subdivisions=16 width= 416 height = 416 channels=3 momentum=0.9 decay=0.0005 angle=0 saturation = 1.5 exposure = 1.5 hue=.1 learning_rate=0.001 burn_in=1000 max_batches = 500200 policy=steps steps=400000,450000 scales=.1,.1 [convolutional] batch_normalize=1 filters=32 size=3 stride=1 pad=1 activation=leaky # Downsample [convolutional] batch_normalize=1 filters=64 size=3 stride=2 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=32 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear # Downsample [convolutional] batch_normalize=1 filters=128 size=3 stride=2 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear # Downsample [convolutional] batch_normalize=1 filters=256 size=3 stride=2 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear # Downsample [convolutional] batch_normalize=1 filters=512 size=3 stride=2 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear # Downsample [convolutional] batch_normalize=1 filters=1024 size=3 stride=2 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky [shortcut] from=-3 activation=linear ###################### [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky [convolutional] size=1 stride=1 pad=1 filters=255 activation=linear [yolo] mask = 6,7,8 anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 classes=80 num=9 jitter=.3 ignore_thresh = .5 truth_thresh = 1 random=1 [route] layers = -4 [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [upsample] stride=2 [route] layers = -1, 61 [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=512 activation=leaky [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=512 activation=leaky [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=512 activation=leaky [convolutional] size=1 stride=1 pad=1 filters=255 activation=linear [yolo] mask = 3,4,5 anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 classes=80 num=9 jitter=.3 ignore_thresh = .5 truth_thresh = 1 random=1 [route] layers = -4 [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [upsample] stride=2 [route] layers = -1, 36 [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=256 activation=leaky [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=256 activation=leaky [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=256 activation=leaky [convolutional] size=1 stride=1 pad=1 filters=255 activation=linear [yolo] mask = 0,1,2 anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 classes=80 num=9 jitter=.3 ignore_thresh = .5 truth_thresh = 1 random=1
candle/candle-examples/examples/yolo-v3/yolo-v3.cfg/0
{ "file_path": "candle/candle-examples/examples/yolo-v3/yolo-v3.cfg", "repo_id": "candle", "token_count": 3586 }
[package] name = "candle-flash-attn" version = "0.8.2" edition = "2021" description = "Flash attention layer for the candle ML framework." repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" readme = "README.md" [dependencies] candle = { path = "../candle-core", features = ["cuda"], package = "candle-core", version = "0.8.2" } half = { version = "2.3.1", features = ["num-traits"] } [build-dependencies] bindgen_cuda = "0.1.1" anyhow = { version = "1", features = ["backtrace"] } [dev-dependencies] anyhow = { version = "1", features = ["backtrace"] } candle-nn = { path = "../candle-nn", features = ["cuda"] }
candle/candle-flash-attn/Cargo.toml/0
{ "file_path": "candle/candle-flash-attn/Cargo.toml", "repo_id": "candle", "token_count": 266 }
/****************************************************************************** * Copyright (c) 2024, Tri Dao. ******************************************************************************/ #pragma once #include <cute/tensor.hpp> namespace flash { using namespace cute; template <typename Engine, typename Layout> __forceinline__ __device__ void apply_mask(Tensor<Engine, Layout> &tensor, const int max_seqlen_k, const int col_idx_offset_ = 0) { // tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N)) static_assert(Layout::rank == 2, "Only support 2D Tensor"); const int lane_id = threadIdx.x % 32; const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2; #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; if (col_idx >= max_seqlen_k) { // Without the "make_coord" we get wrong results #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { tensor(mi, make_coord(j, nj)) = -INFINITY; } } } } } template <bool HasWSLeft=true, typename Engine, typename Layout> __forceinline__ __device__ void apply_mask_local(Tensor<Engine, Layout> &tensor, const int col_idx_offset_, const int max_seqlen_k, const int row_idx_offset, const int max_seqlen_q, const int warp_row_stride, const int window_size_left, const int window_size_right) { // tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N)) static_assert(Layout::rank == 2, "Only support 2D Tensor"); const int lane_id = threadIdx.x % 32; const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2; #pragma unroll for (int mi = 0; mi < size<0, 1>(tensor); ++mi) { const int row_idx_base = row_idx_offset + mi * warp_row_stride; #pragma unroll for (int i = 0; i < size<0, 0>(tensor); ++i) { const int row_idx = row_idx_base + i * 8; const int col_idx_limit_left = std::max(0, row_idx + max_seqlen_k - max_seqlen_q - window_size_left); const int col_idx_limit_right = std::min(max_seqlen_k, row_idx + 1 + max_seqlen_k - max_seqlen_q + window_size_right); #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; if (col_idx >= col_idx_limit_right || (HasWSLeft && col_idx < col_idx_limit_left)) { tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY; } } } // if (cute::thread0()) { // printf("mi = %d, i = %d, row_idx = %d, max_seqlen_k = %d\n", mi, i, row_idx, max_seqlen_k); // print(tensor(make_coord(i, mi), _)); // // print(tensor(_, j + nj * size<1, 0>(tensor))); // } } } } template <typename Engine, typename Layout> __forceinline__ __device__ void apply_mask_causal(Tensor<Engine, Layout> &tensor, const int col_idx_offset_, const int max_seqlen_k, const int row_idx_offset, const int max_seqlen_q, const int warp_row_stride) { // Causal masking is equivalent to local masking with window_size_left = infinity and window_size_right = 0 apply_mask_local</*HasWSLeft=*/false>(tensor, col_idx_offset_, max_seqlen_k, row_idx_offset, max_seqlen_q, warp_row_stride, -1, 0); } template <typename Engine0, typename Layout0, typename Engine1, typename Layout1> __forceinline__ __device__ void apply_mask_causal_w_idx( Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &idx_rowcol, const int col_idx_offset_, const int max_seqlen_k, const int row_idx_offset) { // tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N)) static_assert(Layout0::rank == 2, "Only support 2D Tensor"); static_assert(Layout1::rank == 2, "Only support 2D Tensor"); CUTE_STATIC_ASSERT_V(size<0>(tensor) == size<0>(idx_rowcol)); CUTE_STATIC_ASSERT_V(size<1>(tensor) == size<1>(idx_rowcol)); #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { const int col_idx_limit = std::min(max_seqlen_k, 1 + row_idx_offset + get<0>(idx_rowcol(mi, 0))); #pragma unroll for (int ni = 0; ni < size<1, 1>(tensor); ++ni) { if (col_idx_offset_ + get<1>(idx_rowcol(0, ni)) >= col_idx_limit) { tensor(mi, ni) = -INFINITY; } } // if (cute::thread0()) { // printf("ni = %d, j = %d, col_idx = %d, max_seqlen_k = %d\n", ni, j, col_idx, max_seqlen_k); // print(tensor(_, make_coord(j, ni))); // // print(tensor(_, j + ni * size<1, 0>(tensor))); // } } } template <bool Is_causal, bool Is_local, bool Has_alibi> struct Mask { const int max_seqlen_k, max_seqlen_q; const int window_size_left, window_size_right; const float alibi_slope; __forceinline__ __device__ Mask(const int max_seqlen_k, const int max_seqlen_q, const int window_size_left, const int window_size_right, const float alibi_slope=0.f) : max_seqlen_k(max_seqlen_k) , max_seqlen_q(max_seqlen_q) , window_size_left(window_size_left) , window_size_right(window_size_right) , alibi_slope(!Has_alibi ? 0.0 : alibi_slope) { }; // Causal_mask: whether this particular iteration needs causal masking template <bool Causal_mask=false, bool Is_even_MN=true, typename Engine, typename Layout> __forceinline__ __device__ void apply_mask(Tensor<Engine, Layout> &tensor_, const int col_idx_offset_, const int row_idx_offset, const int warp_row_stride) { static_assert(!(Causal_mask && Is_local), "Cannot be both causal and local"); static_assert(Layout::rank == 3, "Only support 3D Tensor"); static_assert(decltype(size<0>(tensor_))::value == 4, "First dimension must be 4"); static constexpr bool Need_masking = Has_alibi || Causal_mask || Is_local || !Is_even_MN; // if (cute::thread0()) { printf("Has_alibi = %d, Causal_mask=%d, Is_local=%d, Is_even_MN = %d, Need_masking = %d\n", Has_alibi, Causal_mask, Is_local, Is_even_MN, Need_masking); } if constexpr (Need_masking) { // Reshape tensor_ from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N)) Tensor tensor = make_tensor(tensor_.data(), flash::convert_layout_acc_rowcol(tensor_.layout())); // Do we need both row and column indices, or just column incides? static constexpr bool Col_idx_only = !(Has_alibi && !Is_causal) && !Is_local && !Causal_mask; const int lane_id = threadIdx.x % 32; const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2; if constexpr (Col_idx_only) { #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { // No causal, no local if constexpr (Has_alibi) { tensor(mi, make_coord(j, nj)) += alibi_slope * col_idx; } if constexpr (!Is_even_MN) { if (col_idx >= max_seqlen_k) { tensor(mi, make_coord(j, nj)) = -INFINITY; } } } } } } else { #pragma unroll for (int mi = 0; mi < size<0, 1>(tensor); ++mi) { const int row_idx_base = row_idx_offset + mi * warp_row_stride; #pragma unroll for (int i = 0; i < size<0, 0>(tensor); ++i) { const int row_idx = row_idx_base + i * 8; const int col_idx_limit_left = std::max(0, row_idx + max_seqlen_k - max_seqlen_q - window_size_left); const int col_idx_limit_right = std::min(max_seqlen_k, row_idx + 1 + max_seqlen_k - max_seqlen_q + window_size_right); #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; if constexpr (Has_alibi) { if constexpr (Is_causal) { tensor(make_coord(i, mi), make_coord(j, nj)) += alibi_slope * col_idx; } else { tensor(make_coord(i, mi), make_coord(j, nj)) -= alibi_slope * abs(row_idx + max_seqlen_k - max_seqlen_q - col_idx); } } if constexpr (Causal_mask) { if (col_idx >= col_idx_limit_right) { tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY; } } if constexpr (Is_local) { if (col_idx >= col_idx_limit_right || col_idx < col_idx_limit_left) { tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY; } } if constexpr (!Causal_mask && !Is_local && !Is_even_MN) { // Causal and Local already handles MN masking if (col_idx >= max_seqlen_k) { tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY; } } } } } } } } }; }; } // namespace flash
candle/candle-flash-attn/kernels/mask.h/0
{ "file_path": "candle/candle-flash-attn/kernels/mask.h", "repo_id": "candle", "token_count": 6255 }
#include "cuda_fp16.h" #include "cuda_bf16.h" // Table showing which features are supported on which compute capability // https://docs.nvidia.com/cuda/cuda-c-programming-guide/#features-and-technical-specifications // FIXME: the minimum compute capabilities are just guesses since the table is not specific enough #if (__CUDACC_VER_MAJOR__ < 12 || __CUDACC_VER_MINOR__ < 2) && __CUDA_ARCH__ < 800 __device__ __forceinline__ __half __hmax_nan(__half a, __half b) { return __hisnan(a) ? a : (__hisnan(b) ? b : __hmax(a, b)); } __device__ __forceinline__ __half __hmin_nan(__half a, __half b) { return __hisnan(a) ? a : (__hisnan(b) ? b : __hmin(a, b)); } #endif #if __CUDA_ARCH__ < 600 // Copied from https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif #if __CUDA_ARCH__ < 700 // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomicadd // The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher. // Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119 __device__ __half atomicAdd(__half *address, __half val) { // unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); // unsigned int old = *address_as_ui; // unsigned int assumed; // bool unaligned = (size_t) address & 2; // do { // assumed = old; // unsigned int hsum; // hsum = unaligned ? (old >> 16) : (old & 0xffff); // hsum = __half_as_ushort(__ushort_as_half(hsum) + val); // old = atomicCAS(address_as_ui, assumed, // unaligned ? (old & 0xffff) | (hsum << 16) : (old & 0xffff0000) | hsum // ); // } while (assumed != old); // return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff)); } #endif __device__ __forceinline__ __half atomicMaxf(__half* address, __half val) { #if __CUDA_ARCH__ < 700 // On older GPUs we do not have access to atomicCAS for shorts, so we have to do some trickery. // Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119 unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); unsigned int old = *address_as_ui; unsigned int assumed; bool unaligned = (size_t) address & 2; do { assumed = old; unsigned int hmax; hmax = unaligned ? (old >> 16) : (old & 0xffff); hmax = __half_as_ushort(__hmax_nan(val, __ushort_as_half(hmax))); old = atomicCAS(address_as_ui, assumed, unaligned ? (old & 0xffff) | (hmax << 16) : (old & 0xffff0000) | hmax ); } while (assumed != old); return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff)); #else // Based on https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions unsigned short int* casted_address = (unsigned short int*)address; unsigned short int old = *casted_address; unsigned short int assumed; do { assumed = old; old = atomicCAS(casted_address, assumed, __half_as_ushort(__hmax_nan(val, __ushort_as_half(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __ushort_as_half(old); #endif } // atomicMax is not implemented for floats, // solution copied https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda __device__ __forceinline__ float atomicMaxf(float * addr, float value) { if (signbit(value)) { return __uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value))); } else { return __int_as_float(atomicMax((int *)addr, __float_as_int(value))); } } __device__ __forceinline__ double atomicMaxf(double * addr, double value) { if (signbit(value)) { return __longlong_as_double(atomicMin((unsigned long long int *)addr, __double_as_longlong(value))); } else { return __longlong_as_double(atomicMax((long long int *)addr, __double_as_longlong(value))); } } __device__ __forceinline__ __half atomicMinf(__half* address, __half val) { #if __CUDA_ARCH__ < 700 // On older GPUs we do not have access to atomicCAS for shorts, so we have to do some trickery. // Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119 unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); unsigned int old = *address_as_ui; unsigned int assumed; bool unaligned = (size_t) address & 2; do { assumed = old; unsigned int hmin; hmin = unaligned ? (old >> 16) : (old & 0xffff); hmin = __half_as_ushort(__hmin_nan(val, __ushort_as_half(hmin))); old = atomicCAS(address_as_ui, assumed, unaligned ? (old & 0xffff) | (hmin << 16) : (old & 0xffff0000) | hmin ); } while (assumed != old); return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff)); #else // Based on https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions unsigned short int* casted_address = (unsigned short int*)address; unsigned short int old = *casted_address; unsigned short int assumed; do { assumed = old; old = atomicCAS(casted_address, assumed, __half_as_ushort(__hmin_nan(val, __ushort_as_half(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __ushort_as_half(old); #endif } // atomicMin is not implemented for floats, // solution copied https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda __device__ __forceinline__ float atomicMinf(float * addr, float value) { if (signbit(value)) { return __uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(value))); } else { return __int_as_float(atomicMin((int *)addr, __float_as_int(value))); } } __device__ __forceinline__ double atomicMinf(double * addr, double value) { if (signbit(value)) { return __longlong_as_double(atomicMax((unsigned long long int *)addr, __double_as_longlong(value))); } else { return __longlong_as_double(atomicMin((long long int *)addr, __double_as_longlong(value))); } }
candle/candle-kernels/src/compatibility.cuh/0
{ "file_path": "candle/candle-kernels/src/compatibility.cuh", "repo_id": "candle", "token_count": 2734 }
#include <metal_stdlib> METAL_FUNC uint get_strided_index( uint idx, constant size_t &num_dims, constant size_t *dims, constant size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } using namespace metal; #define CAST(FN_NAME, FN_NAME_STRIDED, LEFT_TYPENAME, RIGHT_TYPENAME) \ kernel void FN_NAME( \ constant size_t &dim, \ device const LEFT_TYPENAME *input, \ device RIGHT_TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = static_cast<RIGHT_TYPENAME>(input[tid]); \ } \ kernel void FN_NAME_STRIDED( \ constant size_t &dim, \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ device const LEFT_TYPENAME *input, \ device RIGHT_TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = static_cast<RIGHT_TYPENAME>(input[get_strided_index(tid, num_dims, dims, strides)]); \ } \ #define CAST_THROUGH(FN_NAME, FN_NAME_STRIDED, LEFT_TYPENAME, RIGHT_TYPENAME, IR_TYPENAME) \ kernel void FN_NAME( \ constant size_t &dim, \ device const LEFT_TYPENAME *input, \ device RIGHT_TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = static_cast<RIGHT_TYPENAME>(static_cast<IR_TYPENAME>(input[tid])); \ } \ kernel void FN_NAME_STRIDED( \ constant size_t &dim, \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ device const LEFT_TYPENAME *input, \ device RIGHT_TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = static_cast<RIGHT_TYPENAME>(static_cast<IR_TYPENAME>(input[get_strided_index(tid, num_dims, dims, strides)])); \ } \ // u32 CAST(cast_u32_f32, cast_u32_f32_strided, uint32_t, float) CAST(cast_u32_u8, cast_u32_u8_strided, uint32_t, uint8_t) CAST(cast_u32_f16, cast_u32_f16_strided, uint32_t, half) #if __METAL_VERSION__ >= 220 CAST(cast_u32_i64, cast_u32_i64_strided, uint32_t, int64_t) #endif #if defined(__HAVE_BFLOAT__) CAST(cast_u32_bf16, cast_u32_bf16_strided, uint32_t, bfloat) #endif // u8 CAST(cast_u8_u32, cast_u8_u32_strided, uint8_t, uint32_t) CAST(cast_u8_f32, cast_u8_f32_strided, uint8_t, float) CAST(cast_u8_f16, cast_u8_f16_strided, uint8_t, half) #if __METAL_VERSION__ >= 220 CAST(cast_u8_i64, cast_u8_i64_strided, uint8_t, int64_t) #endif #if defined(__HAVE_BFLOAT__) CAST(cast_u8_bf16, cast_u8_bf16_strided, uint8_t, bfloat) #endif // f16 CAST(cast_f16_f32, cast_f16_f32_strided, half, float) CAST(cast_f16_u8, cast_f16_u8_strided, half, uint8_t) CAST(cast_f16_u32, cast_f16_u32_strided, half, uint32_t) CAST(cast_f16_i64, cast_f16_i64_strided, half, int64_t) #if defined(__HAVE_BFLOAT__) CAST_THROUGH(cast_f16_bf16, cast_f16_bf16_strided, half, bfloat, float) #endif // i64 CAST(cast_i64_f32, cast_i64_f32_strided, int64_t, float) CAST(cast_i64_u8, cast_i64_u8_strided, int64_t, uint8_t) CAST(cast_i64_u32, cast_i64_u32_strided, int64_t, uint32_t) CAST(cast_i64_f16, cast_i64_f16_strided, int64_t, half) #if defined(__HAVE_BFLOAT__) CAST_THROUGH(cast_i64_bf16, cast_i64_bf16_strided, int64_t, bfloat, float) #endif // f32 CAST(cast_f32_f16, cast_f32_f16_strided, float, half) CAST(cast_f32_u32, cast_f32_u32_strided, float, uint32_t) CAST(cast_f32_u8, cast_f32_u8_strided, float, uint8_t) CAST(cast_f32_i64, cast_f32_i64_strided, float, int64_t) #if defined(__HAVE_BFLOAT__) CAST(cast_f32_bf16, cast_f32_bf16_strided, float, bfloat) #endif // bf16 #if defined(__HAVE_BFLOAT__) CAST(cast_bf16_u32, cast_bf16_u32_strided, bfloat, uint32_t) CAST(cast_bf16_i64, cast_bf16_i64_strided, bfloat, int64_t) CAST(cast_bf16_f32, cast_bf16_f32_strided, bfloat, float) CAST_THROUGH(cast_bf16_u8, cast_bf16_u8_strided, bfloat, uint8_t, float) CAST_THROUGH(cast_bf16_f16, cast_bf16_f16_strided, bfloat, half, float) #endif
candle/candle-metal-kernels/src/cast.metal/0
{ "file_path": "candle/candle-metal-kernels/src/cast.metal", "repo_id": "candle", "token_count": 2045 }
#include <metal_stdlib> #include <metal_math> # using namespace metal; METAL_FUNC uint get_strided_index( uint idx, constant size_t &num_dims, constant size_t *dims, constant size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } template <typename T> METAL_FUNC T sqr(T in){ return in * in; } template <typename T> METAL_FUNC T recip(T in){ return T(1.0 / in); } template <typename T> METAL_FUNC T neg(T in){ return -in; } template <typename T> METAL_FUNC T erf(T in){ float x = (float) in; // constants float a1 = 0.254829592; float a2 = -0.284496736; float a3 = 1.421413741; float a4 = -1.453152027; float a5 = 1.061405429; float p = 0.3275911; // Save the sign of x int sign = 1; if (x < 0) sign = -1; x = fabs(x); // A&S formula 7.1.26 float t = 1.0/(1.0 + p*x); float y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x); return T(sign*y); } template <typename T> METAL_FUNC T id(T in) { return in; } template <typename T> METAL_FUNC T gelu_erf(T x) { return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2); } template <typename T> METAL_FUNC T gelu(T x) { if (x > 5) { return x; } T x_sq = x * x; T x_cube = x_sq * x; T alpha = x + static_cast<T>(0.044715) * x_cube; T beta = (static_cast<T>(M_2_SQRTPI_F * M_SQRT1_2_F) * alpha); return static_cast<T>(0.5) * x * (static_cast<T>(1.0) + T(precise::tanh(beta))); } template <typename T> METAL_FUNC T relu(T in){ if (in < 0) { return 0; } return in; } template <typename T> METAL_FUNC T silu(T in){ return in / (static_cast<T>(1) + exp(-in)); } template <typename T> METAL_FUNC T sigmoid(T in) { return recip(static_cast<T>(1) + exp(-in)); } #define TILE_SIZE 2 #define UNARY(FN, TYPENAME, FN_NAME, FN_NAME_STRIDED) \ kernel void FN_NAME( \ constant size_t &dim, \ device const TYPENAME *input, \ device TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = TYPENAME(FN(float(input[tid]))); \ } \ kernel void FN_NAME##_##strided( \ constant size_t &dim, \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ device const TYPENAME *input, \ device TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ if (tid >= dim) { \ return; \ } \ output[tid] = TYPENAME(FN(float(input[get_strided_index(tid, num_dims, dims, strides)]))); \ } \ kernel void FN_NAME##_##tiled( \ constant size_t &dim, \ device const TYPENAME *input, \ device TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ for (uint i = 0; i < TILE_SIZE; i++) { \ const uint idx = tid * TILE_SIZE + i; \ output[idx] = TYPENAME(FN(float(input[idx]))); \ } \ } #define UNARY_OP(NAME) \ UNARY(NAME, float, NAME##_f32, NAME##_f32_strided); \ UNARY(NAME, half, NAME##_f16, NAME##_f16_strided); #define BFLOAT_UNARY_OP(NAME) \ UNARY(NAME, bfloat, NAME##_bf16, NAME##_bf16_strided); #define COPY2D(FN_NAME, TYPENAME) \ kernel void FN_NAME( \ constant int64_t &d1, \ constant int64_t &d2, \ constant int64_t &src_s, \ constant int64_t &dst_s, \ device const TYPENAME *input, \ device TYPENAME *output, \ uint2 idx [[thread_position_in_grid]] \ ) { \ if (idx.x >= d1 || idx.y >= d2) return; \ int64_t src_idx = idx.x * src_s + idx.y; \ int64_t dst_idx = idx.x * dst_s + idx.y; \ output[dst_idx] = input[src_idx]; \ } COPY2D(copy2d_f32, float) COPY2D(copy2d_f16, half) COPY2D(copy2d_u8, uint8_t) COPY2D(copy2d_u32, uint32_t) UNARY_OP(cos) UNARY_OP(sin) UNARY_OP(sqr) UNARY_OP(sqrt) UNARY_OP(neg) UNARY_OP(exp) UNARY_OP(log) UNARY_OP(gelu) UNARY_OP(silu) UNARY_OP(abs) UNARY_OP(ceil) UNARY_OP(floor) UNARY_OP(round) UNARY_OP(gelu_erf) UNARY_OP(erf) UNARY_OP(recip) UNARY_OP(relu) UNARY_OP(sign) UNARY_OP(sigmoid) UNARY(id, float, copy_f32, copy_f32_strided) UNARY(id, half, copy_f16, copy_f16_strided) UNARY(id, uint8_t, copy_u8, copy_u8_strided) UNARY(id, uint32_t, copy_u32, copy_u32_strided) // tanh may create NaN on large values, e.g. 45 rather than outputing 1. // This has been an issue for the encodec example. UNARY(precise::tanh, float, tanh_f32, tanh_f32_strided); UNARY(precise::tanh, half, tanh_f16, tanh_f16_strided); #if __METAL_VERSION__ >= 220 UNARY(id, int64_t, copy_i64, copy_i64_strided) COPY2D(copy2d_i64, int64_t) #endif #if defined(__HAVE_BFLOAT__) BFLOAT_UNARY_OP(cos) BFLOAT_UNARY_OP(sin) BFLOAT_UNARY_OP(sqr) BFLOAT_UNARY_OP(sqrt) BFLOAT_UNARY_OP(neg) BFLOAT_UNARY_OP(exp) BFLOAT_UNARY_OP(log) BFLOAT_UNARY_OP(gelu) BFLOAT_UNARY_OP(silu) BFLOAT_UNARY_OP(abs) BFLOAT_UNARY_OP(ceil) BFLOAT_UNARY_OP(floor) BFLOAT_UNARY_OP(round) BFLOAT_UNARY_OP(gelu_erf) BFLOAT_UNARY_OP(erf) BFLOAT_UNARY_OP(recip) BFLOAT_UNARY_OP(relu) BFLOAT_UNARY_OP(sign) BFLOAT_UNARY_OP(sigmoid) UNARY(id, bfloat, copy_bf16, copy_bf16_strided) UNARY(precise::tanh, bfloat, tanh_bf16, tanh_bf16_strided); COPY2D(copy2d_bf16, bfloat) #endif
candle/candle-metal-kernels/src/unary.metal/0
{ "file_path": "candle/candle-metal-kernels/src/unary.metal", "repo_id": "candle", "token_count": 2665 }
//! Activation Functions //! use candle::{Result, Tensor}; #[derive(Debug, Clone, Copy, PartialEq, serde::Deserialize, serde::Serialize, Default)] #[serde(rename_all = "lowercase")] pub enum Activation { #[default] #[serde(alias = "gelu")] Gelu, #[serde(alias = "gelu_new")] NewGelu, Relu, Relu2, Relu6, Silu, Sigmoid, HardSigmoid, Swiglu, Swish, HardSwish, Elu(f64), LeakyRelu(f64), #[serde(alias = "gelu_pytorch_tanh")] GeluPytorchTanh, } impl super::Module for Activation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::Gelu => xs.gelu_erf(), // https://github.com/huggingface/transformers/blob/12f043eaeaabfef6f6efea411d98e6f6d3c094b7/src/transformers/activations.py#L49-L78 Self::NewGelu => xs.gelu(), Self::Relu => xs.relu(), Self::Relu2 => xs.relu()?.sqr(), Self::Relu6 => xs.clamp(0f32, 6f32), Self::Silu => xs.silu(), Self::Sigmoid => crate::ops::sigmoid(xs), Self::HardSigmoid => crate::ops::hard_sigmoid(xs), Self::Swiglu => crate::ops::swiglu(xs), Self::Swish => xs * crate::ops::sigmoid(xs)?, Self::HardSwish => xs * crate::ops::hard_sigmoid(xs)?, &Self::Elu(alpha) => xs.elu(alpha), &Self::LeakyRelu(negative_slope) => crate::ops::leaky_relu(xs, negative_slope), Self::GeluPytorchTanh => xs.gelu(), } } } #[derive(Clone, Debug)] pub struct PReLU { weight: Tensor, is_scalar: bool, } impl PReLU { pub fn new(weight: Tensor, is_scalar: bool) -> Self { Self { weight, is_scalar } } pub fn weight(&self) -> &Tensor { &self.weight } pub fn is_scalar(&self) -> bool { self.is_scalar } } impl candle::Module for PReLU { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let weight = if self.is_scalar { self.weight.reshape(())? } else if xs.rank() >= 2 { let num_channels = xs.dim(1)?; let num_weights = self.weight.elem_count(); if num_weights != num_channels { candle::bail!("error in prelu: unexpected number of channels for the input, got {num_channels}, weight dim is {num_weights}") } let mut s = vec![1; xs.rank()]; s[1] = self.weight.elem_count(); self.weight.reshape(s)? } else { self.weight.clone() }; let zeros = xs.zeros_like()?; xs.maximum(&zeros)? + xs.minimum(&zeros)?.broadcast_mul(&weight)? } } /// Create or initialize a new PReLU layer. /// /// This uses some default name for weights, namely `"weight"`. /// # Arguments /// /// * `num_channels` - The number of channels. Use `None` to have as single trainable value and /// `Some` for a 1D vector with the appropriate number of channels. When applying the `forward` /// function, the input tensor shape `s` should either be one dimension with this number of /// channels or if `s.len() >= 2` it should have `s[1]` equal to this number. pub fn prelu(num_channels: Option<usize>, vs: crate::VarBuilder) -> Result<PReLU> { let init_ws = crate::init::Init::Const(0.25); // When using a scalar weight, the PyTorch encoding is to use a 1d vector of length 1. let ws = vs.get_with_hints((num_channels.unwrap_or(1),), "weight", init_ws)?; Ok(PReLU::new(ws, num_channels.is_none())) }
candle/candle-nn/src/activation.rs/0
{ "file_path": "candle/candle-nn/src/activation.rs", "repo_id": "candle", "token_count": 1665 }
//! Rotary Embeddings //! use candle::{CpuStorage, Layout, Result, Shape, Tensor, D}; use rayon::prelude::*; /// Interleaved variant of rotary embeddings. /// The x0 and x1 value are interleaved on the n_embd (= head_dim) dimension. /// The resulting y0 and y1 are also interleaved with: /// y0 = x0*cos - x1*sin /// y1 = x0*sin + x1*cos #[derive(Debug, Clone)] struct RotaryEmbI; impl candle::CustomOp3 for RotaryEmbI { fn name(&self) -> &'static str { "rotary-emb-int" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, h, t, d) = l_src.shape().dims4()?; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * d) .zip(dst.par_chunks_mut(t * d)) .for_each(|(src, dst)| { for i_over_2 in 0..t * d / 2 { let i = 2 * i_over_2; dst[i] = src[i] * cos[i_over_2] - src[i + 1] * sin[i_over_2]; dst[i + 1] = src[i] * sin[i_over_2] + src[i + 1] * cos[i_over_2]; } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, h, t, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, h, t, d) = l_src.shape().dims4()?; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope_i"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = (&src, &cos, &sin, &dst, (b * h) as u32, (t * d) as u32); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope-i {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_i_f32", candle::DType::F16 => "rope_i_f16", candle::DType::BF16 => "rope_i_bf16", dtype => candle::bail!("rope-i is not implemented for {dtype:?}"), }; let (b, h, t, d) = l_src.shape().dims4()?; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-i")?; candle_metal_kernels::call_rope_i( device.metal_device(), &command_buffer, kernels, name, b * h, t * d, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } pub fn rope_i(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, _n_head, seq_len, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = cos.dims2()?; let (sin_seq_len, sin_n_embd) = cos.dims2()?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmbI) } pub fn rope_i_slow(x: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (b_sz, n_head, seq_len, n_embd) = x.dims4()?; let cos = cos .narrow(0, 0, seq_len)? .reshape((seq_len, n_embd / 2, 1))?; let sin = sin .narrow(0, 0, seq_len)? .reshape((seq_len, n_embd / 2, 1))?; let cos = cos.broadcast_as((b_sz, 1, seq_len, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, 1, seq_len, n_embd / 2, 1))?; let x = x.reshape((b_sz, n_head, seq_len, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let y0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let y1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[y0, y1], D::Minus1)?; let rope = rope.flatten_from(D::Minus2)?; Ok(rope) } /// Contiguous variant of rope embeddings. #[derive(Debug, Clone)] struct RotaryEmb; impl candle::CustomOp3 for RotaryEmb { fn name(&self) -> &'static str { "rotary-emb" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, h, t, d) = l_src.shape().dims4()?; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * d) .zip(dst.par_chunks_mut(t * d)) .for_each(|(src, dst)| { for i_t in 0..t { for i_d in 0..d / 2 { let i1 = i_t * d + i_d; let i2 = i1 + d / 2; let i_cs = i_t * (d / 2) + i_d; dst[i1] = src[i1] * cos[i_cs] - src[i2] * sin[i_cs]; dst[i2] = src[i1] * sin[i_cs] + src[i2] * cos[i_cs]; } } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, h, t, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, h, t, d) = l_src.shape().dims4()?; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( &src, &cos, &sin, &dst, (b * h) as u32, (t * d) as u32, d as u32, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_f32", candle::DType::F16 => "rope_f16", candle::DType::BF16 => "rope_bf16", dtype => candle::bail!("rope is not implemented for {dtype:?}"), }; let (b, h, t, d) = l_src.shape().dims4()?; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-i")?; candle_metal_kernels::call_rope( device.metal_device(), &command_buffer, kernels, name, b * h, t * d, d, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } pub fn rope(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, _n_head, seq_len, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = cos.dims2()?; let (sin_seq_len, sin_n_embd) = sin.dims2()?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmb) } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } pub fn rope_slow(x: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, _h, seq_len, _n_embd) = x.dims4()?; let cos = Tensor::cat(&[cos, cos], D::Minus1)?; let sin = Tensor::cat(&[sin, sin], D::Minus1)?; let cos = cos.narrow(0, 0, seq_len)?; let sin = sin.narrow(0, 0, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; let sin = sin.unsqueeze(0)?.unsqueeze(0)?; x.broadcast_mul(&cos)? + rotate_half(x)?.broadcast_mul(&sin)? } /// T (seqlen)/H (num-heads)/D (head-dim) contiguous variant of rope embeddings. #[derive(Debug, Clone)] struct RotaryEmbThd; impl candle::CustomOp3 for RotaryEmbThd { fn name(&self) -> &'static str { "rotary-emb" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, t, h, d) = l_src.shape().dims4()?; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * h * d) .zip(dst.par_chunks_mut(t * h * d)) .for_each(|(src, dst)| { for i_t in 0..t { for i_d in 0..d / 2 { let i_cs = i_t * (d / 2) + i_d; for i_h in 0..h { let i1 = i_t * h * d + i_h * d + i_d; let i2 = i1 + d / 2; dst[i1] = src[i1] * cos[i_cs] - src[i2] * sin[i_cs]; dst[i2] = src[i1] * sin[i_cs] + src[i2] * cos[i_cs]; } } } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, t, h, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, t, h, d) = l_src.shape().dims4()?; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope_thd"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( &src, &cos, &sin, &dst, b as u32, t as u32, h as u32, d as u32, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_thd_f32", candle::DType::F16 => "rope_thd_f16", candle::DType::BF16 => "rope_thd_bf16", dtype => candle::bail!("rope_thd is not implemented for {dtype:?}"), }; let (b, t, h, d) = l_src.shape().dims4()?; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-thd")?; candle_metal_kernels::call_rope_thd( device.metal_device(), &command_buffer, kernels, name, b, t, h, d, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } pub fn rope_thd(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, seq_len, _n_head, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = cos.dims2()?; let (sin_seq_len, sin_n_embd) = sin.dims2()?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmbThd) }
candle/candle-nn/src/rotary_emb.rs/0
{ "file_path": "candle/candle-nn/src/rotary_emb.rs", "repo_id": "candle", "token_count": 15521 }
from dataclasses import dataclass from typing import Optional from candle.nn import Module, Embedding, LayerNorm, Linear, ModuleList from candle import Tensor import candle import candle.functional as F from typing import Tuple, Optional @dataclass class Config: vocab_size: int = 30522 hidden_size: int = 768 num_hidden_layers: int = 12 num_attention_heads: int = 12 intermediate_size: int = 3072 hidden_act: str = "gelu" hidden_dropout_prob: float = 0.1 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 layer_norm_eps: float = 1e-12 pad_token_id: int = 0 position_embedding_type: str = "absolute" use_cache: bool = True classifier_dropout: Optional[float] = None model_type: Optional[str] = "bert" class BertSelfAttention(Module): def __init__(self, config: Config) -> None: super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / self.num_attention_heads) all_head_size = int(config.num_attention_heads * self.attention_head_size) hidden_size = config.hidden_size self.query = Linear(hidden_size, all_head_size) self.key = Linear(hidden_size, all_head_size) self.value = Linear(hidden_size, all_head_size) def transpose_for_scores(self, x: Tensor) -> Tensor: new_x_shape = x.shape[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.reshape(new_x_shape).transpose(1, 2) return x.contiguous() def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: query = self.query.forward(hidden_states) key = self.key.forward(hidden_states) value = self.value.forward(hidden_states) query = self.transpose_for_scores(query) key = self.transpose_for_scores(key) value = self.transpose_for_scores(value) attention_scores = query.matmul(key.t()) attention_scores = attention_scores / float(self.attention_head_size) ** 0.5 if attention_mask is not None: b_size, _, _, last_dim = attention_scores.shape attention_scores = attention_scores.broadcast_add(attention_mask.reshape((b_size, 1, 1, last_dim))) attention_probs = F.softmax(attention_scores, dim=-1) context_layer = attention_probs.matmul(value) context_layer = context_layer.transpose(1, 2).contiguous() context_layer = context_layer.flatten_from(-2) return context_layer class BertSelfOutput(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.LayerNorm.forward(hidden_states + input_tensor) class BertAttention(Module): def __init__(self, config: Config) -> None: super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, hidden_states: Tensor, attention_mask: None) -> Tensor: self_outputs = self.self.forward(hidden_states, attention_mask=attention_mask) attention_output = self.output.forward(self_outputs, hidden_states) return attention_output class BertIntermediate(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.intermediate_size) self.act = F.gelu if config.hidden_act == "gelu" else F.relu def forward(self, hidden_states: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.act(hidden_states) class BertOutput(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.LayerNorm.forward(hidden_states + input_tensor) class BertLayer(Module): def __init__(self, config: Config) -> None: super().__init__() self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: attention_output = self.attention.forward(hidden_states, attention_mask=attention_mask) # TODO: Support cross-attention? # https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 # TODO: Support something similar to `apply_chunking_to_forward`? intermediate_output = self.intermediate.forward(attention_output) layer_output = self.output.forward(intermediate_output, attention_output) return layer_output class BertEncoder(Module): def __init__(self, config: Config) -> None: super().__init__() self.layer = ModuleList() for _ in range(config.num_hidden_layers): self.layer.append(BertLayer(config)) def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: for l in self.layer: hidden_states = l.forward(hidden_states, attention_mask=attention_mask) return hidden_states class BertEmbeddings(Module): def __init__(self, config: Config) -> None: super().__init__() self.word_embeddings = Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.position_ids = candle.Tensor(list(range(config.max_position_embeddings))).reshape( (1, config.max_position_embeddings) ) def forward(self, input_ids: Tensor, token_type_ids: Tensor) -> Tensor: (_batch_size, seq_len) = input_ids.shape input_embeddings = self.word_embeddings.forward(input_ids) token_type_embeddings = self.token_type_embeddings.forward(token_type_ids) embeddings: Tensor = input_embeddings + token_type_embeddings position_ids = list(range(seq_len)) position_ids = Tensor(position_ids).to_dtype(input_ids.dtype).to_device(input_ids.device) embeddings = embeddings.broadcast_add(self.position_embeddings.forward(position_ids)) embeddings = self.LayerNorm(embeddings) return embeddings class BertPooler(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.hidden_size) self.activation = F.tanh def forward(self, hidden_states: Tensor) -> Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense.forward(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def masked_fill(on_false: float, mask: Tensor, on_true: float): shape = mask.shape on_true = candle.tensor(on_true).broadcast_as(shape) on_false = candle.tensor(on_false).broadcast_as(shape) return mask.where_cond(on_true, on_false) # https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L874 class BertModel(Module): def __init__(self, config: Config, add_pooling_layer=True) -> None: super().__init__() self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None def forward( self, input_ids: Tensor, token_type_ids: Tensor, attention_mask=None ) -> Tuple[Tensor, Optional[Tensor]]: if attention_mask is not None: # Replace 0s with -inf, and 1s with 0s. attention_mask = masked_fill(float("-inf"), attention_mask, 1.0) embeddings = self.embeddings.forward(input_ids, token_type_ids) encoder_out = self.encoder.forward(embeddings, attention_mask=attention_mask) pooled_output = self.pooler(encoder_out) if self.pooler is not None else None return encoder_out, pooled_output
candle/candle-pyo3/py_src/candle/models/bert.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/models/bert.py", "repo_id": "candle", "token_count": 3528 }
# This example shows how the candle Python api can be used to replicate llama.cpp. import sys from typing import Dict, Tuple, Any import candle from candle.models.llama import QuantizedLlama from candle import utils MAX_SEQ_LEN = 4096 def gguf_rename(tensor_name: str): if tensor_name == "token_embd.weight": return "tok_embeddings.weight" if tensor_name == "output_norm.weight": return "norm.weight" tensor_name = tensor_name.replace("blk.", "layers.") tensor_name = tensor_name.replace(".attn_q.", ".attention.wq.") tensor_name = tensor_name.replace(".attn_k.", ".attention.wk.") tensor_name = tensor_name.replace(".attn_v.", ".attention.wv.") tensor_name = tensor_name.replace(".attn_output.", ".attention.wo.") tensor_name = tensor_name.replace(".ffn_gate.", ".feed_forward.w1.") tensor_name = tensor_name.replace(".ffn_down.", ".feed_forward.w2.") tensor_name = tensor_name.replace(".ffn_up.", ".feed_forward.w3.") tensor_name = tensor_name.replace(".attn_norm.", ".attention_norm.") return tensor_name def main(): if len(sys.argv) < 2: raise ValueError("missing weight file argument") filename = sys.argv[1] print(f"reading model file {filename}") if filename.endswith("gguf"): all_tensors, metadata = utils.load_gguf(filename) vocab = metadata["tokenizer.ggml.tokens"] for i, v in enumerate(vocab): vocab[i] = "\n" if v == "<0x0A>" else v.replace("▁", " ") hparams = {k: v for (k, v) in metadata.items() if not k.startswith("tokenizer")} print(hparams) hparams = { "n_vocab": len(vocab), "n_embd": metadata["llama.embedding_length"], "n_mult": 256, "n_head": metadata["llama.attention.head_count"], "n_head_kv": metadata["llama.attention.head_count_kv"], "n_layer": metadata["llama.block_count"], "n_rot": metadata["llama.rope.dimension_count"], "rope_freq": metadata.get("llama.rope.freq_base", 10000.0), "ftype": metadata["general.file_type"], "context_length": metadata["llama.context_length"], } all_tensors = {gguf_rename(k): v for k, v in all_tensors.items()} else: all_tensors, hparams, vocab = utils.load_ggml(filename) hparams["context_length"] = 2048 print(hparams) model = QuantizedLlama(hparams, all_tensors) print("model built, starting inference") tokens = [1] for token_idx in range(500): last_token = tokens[-1] lt = candle.tensor([last_token]).unsqueeze(0) logits = model.forward(lt, len(tokens)) # Greedy sampling for now # pr = candle.nn.softmax(logits, -1) m = logits.get(0).argmax_keepdim(-1) next_token = m.values()[0] print(vocab[next_token], end="", flush=True) tokens.append(next_token) if __name__ == "__main__": main()
candle/candle-pyo3/quant-llama.py/0
{ "file_path": "candle/candle-pyo3/quant-llama.py", "repo_id": "candle", "token_count": 1318 }
use candle::{Result, Tensor, D}; use candle_nn::{conv2d, group_norm, Conv2d, GroupNorm, VarBuilder}; // https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/modules/autoencoder.py#L9 #[derive(Debug, Clone)] pub struct Config { pub resolution: usize, pub in_channels: usize, pub ch: usize, pub out_ch: usize, pub ch_mult: Vec<usize>, pub num_res_blocks: usize, pub z_channels: usize, pub scale_factor: f64, pub shift_factor: f64, } impl Config { // https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L47 pub fn dev() -> Self { Self { resolution: 256, in_channels: 3, ch: 128, out_ch: 3, ch_mult: vec![1, 2, 4, 4], num_res_blocks: 2, z_channels: 16, scale_factor: 0.3611, shift_factor: 0.1159, } } // https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L79 pub fn schnell() -> Self { Self { resolution: 256, in_channels: 3, ch: 128, out_ch: 3, ch_mult: vec![1, 2, 4, 4], num_res_blocks: 2, z_channels: 16, scale_factor: 0.3611, shift_factor: 0.1159, } } } fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let dim = q.dim(D::Minus1)?; let scale_factor = 1.0 / (dim as f64).sqrt(); let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?; candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v) } #[derive(Debug, Clone)] struct AttnBlock { q: Conv2d, k: Conv2d, v: Conv2d, proj_out: Conv2d, norm: GroupNorm, } impl AttnBlock { fn new(in_c: usize, vb: VarBuilder) -> Result<Self> { let q = conv2d(in_c, in_c, 1, Default::default(), vb.pp("q"))?; let k = conv2d(in_c, in_c, 1, Default::default(), vb.pp("k"))?; let v = conv2d(in_c, in_c, 1, Default::default(), vb.pp("v"))?; let proj_out = conv2d(in_c, in_c, 1, Default::default(), vb.pp("proj_out"))?; let norm = group_norm(32, in_c, 1e-6, vb.pp("norm"))?; Ok(Self { q, k, v, proj_out, norm, }) } } impl candle::Module for AttnBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let init_xs = xs; let xs = xs.apply(&self.norm)?; let q = xs.apply(&self.q)?; let k = xs.apply(&self.k)?; let v = xs.apply(&self.v)?; let (b, c, h, w) = q.dims4()?; let q = q.flatten_from(2)?.t()?.unsqueeze(1)?; let k = k.flatten_from(2)?.t()?.unsqueeze(1)?; let v = v.flatten_from(2)?.t()?.unsqueeze(1)?; let xs = scaled_dot_product_attention(&q, &k, &v)?; let xs = xs.squeeze(1)?.t()?.reshape((b, c, h, w))?; xs.apply(&self.proj_out)? + init_xs } } #[derive(Debug, Clone)] struct ResnetBlock { norm1: GroupNorm, conv1: Conv2d, norm2: GroupNorm, conv2: Conv2d, nin_shortcut: Option<Conv2d>, } impl ResnetBlock { fn new(in_c: usize, out_c: usize, vb: VarBuilder) -> Result<Self> { let conv_cfg = candle_nn::Conv2dConfig { padding: 1, ..Default::default() }; let norm1 = group_norm(32, in_c, 1e-6, vb.pp("norm1"))?; let conv1 = conv2d(in_c, out_c, 3, conv_cfg, vb.pp("conv1"))?; let norm2 = group_norm(32, out_c, 1e-6, vb.pp("norm2"))?; let conv2 = conv2d(out_c, out_c, 3, conv_cfg, vb.pp("conv2"))?; let nin_shortcut = if in_c == out_c { None } else { Some(conv2d( in_c, out_c, 1, Default::default(), vb.pp("nin_shortcut"), )?) }; Ok(Self { norm1, conv1, norm2, conv2, nin_shortcut, }) } } impl candle::Module for ResnetBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let h = xs .apply(&self.norm1)? .apply(&candle_nn::Activation::Swish)? .apply(&self.conv1)? .apply(&self.norm2)? .apply(&candle_nn::Activation::Swish)? .apply(&self.conv2)?; match self.nin_shortcut.as_ref() { None => xs + h, Some(c) => xs.apply(c)? + h, } } } #[derive(Debug, Clone)] struct Downsample { conv: Conv2d, } impl Downsample { fn new(in_c: usize, vb: VarBuilder) -> Result<Self> { let conv_cfg = candle_nn::Conv2dConfig { stride: 2, ..Default::default() }; let conv = conv2d(in_c, in_c, 3, conv_cfg, vb.pp("conv"))?; Ok(Self { conv }) } } impl candle::Module for Downsample { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.pad_with_zeros(D::Minus1, 0, 1)?; let xs = xs.pad_with_zeros(D::Minus2, 0, 1)?; xs.apply(&self.conv) } } #[derive(Debug, Clone)] struct Upsample { conv: Conv2d, } impl Upsample { fn new(in_c: usize, vb: VarBuilder) -> Result<Self> { let conv_cfg = candle_nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv = conv2d(in_c, in_c, 3, conv_cfg, vb.pp("conv"))?; Ok(Self { conv }) } } impl candle::Module for Upsample { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_, _, h, w) = xs.dims4()?; xs.upsample_nearest2d(h * 2, w * 2)?.apply(&self.conv) } } #[derive(Debug, Clone)] struct DownBlock { block: Vec<ResnetBlock>, downsample: Option<Downsample>, } #[derive(Debug, Clone)] pub struct Encoder { conv_in: Conv2d, mid_block_1: ResnetBlock, mid_attn_1: AttnBlock, mid_block_2: ResnetBlock, norm_out: GroupNorm, conv_out: Conv2d, down: Vec<DownBlock>, } impl Encoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let conv_cfg = candle_nn::Conv2dConfig { padding: 1, ..Default::default() }; let mut block_in = cfg.ch; let conv_in = conv2d(cfg.in_channels, block_in, 3, conv_cfg, vb.pp("conv_in"))?; let mut down = Vec::with_capacity(cfg.ch_mult.len()); let vb_d = vb.pp("down"); for (i_level, ch_mult) in cfg.ch_mult.iter().enumerate() { let mut block = Vec::with_capacity(cfg.num_res_blocks); let vb_d = vb_d.pp(i_level); let vb_b = vb_d.pp("block"); let in_ch_mult = if i_level == 0 { 1 } else { cfg.ch_mult[i_level - 1] }; block_in = cfg.ch * in_ch_mult; let block_out = cfg.ch * ch_mult; for i_block in 0..cfg.num_res_blocks { let b = ResnetBlock::new(block_in, block_out, vb_b.pp(i_block))?; block.push(b); block_in = block_out; } let downsample = if i_level != cfg.ch_mult.len() - 1 { Some(Downsample::new(block_in, vb_d.pp("downsample"))?) } else { None }; let block = DownBlock { block, downsample }; down.push(block) } let mid_block_1 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_1"))?; let mid_attn_1 = AttnBlock::new(block_in, vb.pp("mid.attn_1"))?; let mid_block_2 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_2"))?; let conv_out = conv2d(block_in, 2 * cfg.z_channels, 3, conv_cfg, vb.pp("conv_out"))?; let norm_out = group_norm(32, block_in, 1e-6, vb.pp("norm_out"))?; Ok(Self { conv_in, mid_block_1, mid_attn_1, mid_block_2, norm_out, conv_out, down, }) } } impl candle_nn::Module for Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut h = xs.apply(&self.conv_in)?; for block in self.down.iter() { for b in block.block.iter() { h = h.apply(b)? } if let Some(ds) = block.downsample.as_ref() { h = h.apply(ds)? } } h.apply(&self.mid_block_1)? .apply(&self.mid_attn_1)? .apply(&self.mid_block_2)? .apply(&self.norm_out)? .apply(&candle_nn::Activation::Swish)? .apply(&self.conv_out) } } #[derive(Debug, Clone)] struct UpBlock { block: Vec<ResnetBlock>, upsample: Option<Upsample>, } #[derive(Debug, Clone)] pub struct Decoder { conv_in: Conv2d, mid_block_1: ResnetBlock, mid_attn_1: AttnBlock, mid_block_2: ResnetBlock, norm_out: GroupNorm, conv_out: Conv2d, up: Vec<UpBlock>, } impl Decoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let conv_cfg = candle_nn::Conv2dConfig { padding: 1, ..Default::default() }; let mut block_in = cfg.ch * cfg.ch_mult.last().unwrap_or(&1); let conv_in = conv2d(cfg.z_channels, block_in, 3, conv_cfg, vb.pp("conv_in"))?; let mid_block_1 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_1"))?; let mid_attn_1 = AttnBlock::new(block_in, vb.pp("mid.attn_1"))?; let mid_block_2 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_2"))?; let mut up = Vec::with_capacity(cfg.ch_mult.len()); let vb_u = vb.pp("up"); for (i_level, ch_mult) in cfg.ch_mult.iter().enumerate().rev() { let block_out = cfg.ch * ch_mult; let vb_u = vb_u.pp(i_level); let vb_b = vb_u.pp("block"); let mut block = Vec::with_capacity(cfg.num_res_blocks + 1); for i_block in 0..=cfg.num_res_blocks { let b = ResnetBlock::new(block_in, block_out, vb_b.pp(i_block))?; block.push(b); block_in = block_out; } let upsample = if i_level != 0 { Some(Upsample::new(block_in, vb_u.pp("upsample"))?) } else { None }; let block = UpBlock { block, upsample }; up.push(block) } up.reverse(); let norm_out = group_norm(32, block_in, 1e-6, vb.pp("norm_out"))?; let conv_out = conv2d(block_in, cfg.out_ch, 3, conv_cfg, vb.pp("conv_out"))?; Ok(Self { conv_in, mid_block_1, mid_attn_1, mid_block_2, norm_out, conv_out, up, }) } } impl candle_nn::Module for Decoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let h = xs.apply(&self.conv_in)?; let mut h = h .apply(&self.mid_block_1)? .apply(&self.mid_attn_1)? .apply(&self.mid_block_2)?; for block in self.up.iter().rev() { for b in block.block.iter() { h = h.apply(b)? } if let Some(us) = block.upsample.as_ref() { h = h.apply(us)? } } h.apply(&self.norm_out)? .apply(&candle_nn::Activation::Swish)? .apply(&self.conv_out) } } #[derive(Debug, Clone)] pub struct DiagonalGaussian { sample: bool, chunk_dim: usize, } impl DiagonalGaussian { pub fn new(sample: bool, chunk_dim: usize) -> Result<Self> { Ok(Self { sample, chunk_dim }) } } impl candle_nn::Module for DiagonalGaussian { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let chunks = xs.chunk(2, self.chunk_dim)?; if self.sample { let std = (&chunks[1] * 0.5)?.exp()?; &chunks[0] + (std * chunks[0].randn_like(0., 1.))? } else { Ok(chunks[0].clone()) } } } #[derive(Debug, Clone)] pub struct AutoEncoder { encoder: Encoder, decoder: Decoder, reg: DiagonalGaussian, shift_factor: f64, scale_factor: f64, } impl AutoEncoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let decoder = Decoder::new(cfg, vb.pp("decoder"))?; let reg = DiagonalGaussian::new(true, 1)?; Ok(Self { encoder, decoder, reg, scale_factor: cfg.scale_factor, shift_factor: cfg.shift_factor, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let z = xs.apply(&self.encoder)?.apply(&self.reg)?; (z - self.shift_factor)? * self.scale_factor } pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { let xs = ((xs / self.scale_factor)? + self.shift_factor)?; xs.apply(&self.decoder) } } impl candle::Module for AutoEncoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.decode(&self.encode(xs)?) } }
candle/candle-transformers/src/models/flux/autoencoder.rs/0
{ "file_path": "candle/candle-transformers/src/models/flux/autoencoder.rs", "repo_id": "candle", "token_count": 7145 }
//! The LLaVA (Large Language and Vision Assistant) model. //! //! This provides the main model implementation combining a vision tower (CLIP) with //! language model (Llama) for multimodal capabilities. The architecture implements the training-free projection technique. //! //! - 💻[GH Link](https://github.com/haotian-liu/LLaVA/tree/main) //! - 📝 [Paper](https://arxiv.org/abs/2304.08485)/ Visual Instruction Tuning //! pub mod config; pub mod utils; use crate::models::clip::vision_model::{ClipVisionConfig, ClipVisionTransformer}; use crate::models::llama::{Cache, Llama}; use crate::models::with_tracing::linear; use candle::{bail, Context, Device, IndexOp, Result, Tensor}; use candle_nn::{seq, Activation, Module, Sequential, VarBuilder}; use fancy_regex::Regex; use utils::get_anyres_image_grid_shape; use config::LLaVAConfig; fn mlp_gelu_match(mm_projector_type: &str) -> Option<usize> { let mlp_gelu_regex = Regex::new(r"^mlp(\d+)x_gelu$").unwrap(); if let Ok(Some(captures)) = mlp_gelu_regex.captures(mm_projector_type) { if let Some(match_str) = captures.get(1) { let match_str = match_str.as_str(); match_str.parse::<usize>().ok() } else { None } } else { None } } fn unpad_image(tensor: &Tensor, original_size: &(u32, u32)) -> Result<Tensor> { assert_eq!(tensor.dims().len(), 3); let (original_width, original_height) = *original_size; let tensor_dims = tensor.dims(); let current_height = tensor_dims[1]; let current_width = tensor_dims[2]; let original_aspect_ratio = (original_width as f32) / (original_height as f32); let current_aspect_ratio = (current_width as f32) / (current_height as f32); if original_aspect_ratio > current_aspect_ratio { let scale_factor = (current_width as f32) / (original_width as f32); let new_height = (original_height as f32 * scale_factor).floor() as usize; let padding = (current_height - new_height) / 2; tensor.i((.., padding..current_width - padding, ..)) } else { let scale_factor = (current_height as f32) / (original_height as f32); let new_width = (original_width as f32 * scale_factor).floor() as usize; let padding = (current_width - new_width) / 2; tensor.i((.., .., padding..current_width - padding)) } } pub struct IdentityMap {} impl Module for IdentityMap { fn forward(&self, x: &Tensor) -> Result<Tensor> { Ok(x.clone()) } } pub struct MMProjector { pub modules: Sequential, } impl MMProjector { pub fn load(vb: &VarBuilder, config: &LLaVAConfig) -> Result<Self> { if config.mm_projector_type == "linear" { let vb_prefix = if config.hf { "multi_modal_projector.linear_1" } else { "model.mm_projector.0" }; let linear = linear(config.mm_hidden_size, config.hidden_size, vb.pp(vb_prefix))?; let modules = seq().add(linear); Ok(Self { modules }) } else if let Some(mlp_depth) = mlp_gelu_match(&config.mm_projector_type) { let modules = if config.hf { let mut modules = seq().add(linear( config.mm_hidden_size, config.hidden_size, vb.pp("multi_modal_projector.linear_1"), )?); for i in 1..mlp_depth { modules = modules.add(Activation::Gelu).add(linear( config.hidden_size, config.hidden_size, vb.pp(format!("multi_modal_projector.linear_{}", i + 1)), )?); } modules } else { let mut modules = seq().add(linear( config.mm_hidden_size, config.hidden_size, vb.pp("model.mm_projector.0"), )?); for i in 1..mlp_depth { modules = modules.add(Activation::Gelu).add(linear( config.hidden_size, config.hidden_size, vb.pp(format!("model.mm_projector.{}", i * 2)), )?); } modules }; Ok(Self { modules }) } else if config.mm_projector_type == "identity" { Ok(Self { modules: seq().add(IdentityMap {}), }) } else { bail!( "Unsupported MM projector type: {}", config.mm_projector_type ) } } pub fn forward(&self, x: &Tensor) -> Result<Tensor> { self.modules.forward(x) } } pub struct ClipVisionTower { model: ClipVisionTransformer, select_layer: isize, select_feature_method: String, pub config: ClipVisionConfig, } impl ClipVisionTower { pub fn new( vb: VarBuilder, select_layer: isize, select_feature_method: &str, config: &Option<ClipVisionConfig>, ) -> Result<Self> { let config = if config.is_none() { ClipVisionConfig::clip_vit_large_patch14_336() } else { config.clone().context("no config")? }; let select_layer = match select_layer { -1 | -2 => select_layer, _ => bail!("Unsupported select layer: {}", select_layer), }; let model = ClipVisionTransformer::new(vb, &config)?; Ok(Self { model, select_layer, select_feature_method: select_feature_method.to_string(), config, }) } pub fn forward(&self, x: &Tensor) -> Result<Tensor> { let result = self.model.output_hidden_states(x)?; let index = result.len() as isize + self.select_layer; let result = result[index as usize].clone(); if self.select_feature_method == "cls_patch" { Ok(result) } else { result.i((.., 1..)) } } pub fn num_patches_per_side(&self) -> usize { self.config.image_size / self.config.patch_size } } pub struct LLaVA { pub clip_vision_tower: ClipVisionTower, pub image_newline: Tensor, pub mm_projector: MMProjector, pub llama: Llama, config: LLaVAConfig, device: Device, } impl LLaVA { pub fn load( vb: VarBuilder, config: &LLaVAConfig, clip_vision_config: Option<ClipVisionConfig>, ) -> Result<Self> { let device = vb.device().clone(); let llama_config = config.to_llama_config(); let mm_projector = MMProjector::load(&vb, config)?; let (clip_vision_tower, image_newline, llama) = if config.hf { ( ClipVisionTower::new( vb.pp("vision_tower.vision_model"), config.mm_vision_select_layer, &config.mm_vision_select_feature, &clip_vision_config, )?, vb.get(&[config.hidden_size], "image_newline")? .to_device(&device)?, Llama::load(vb.pp("language_model"), &llama_config)?, ) } else { ( ClipVisionTower::new( vb.pp("model.vision_tower.vision_tower.vision_model"), config.mm_vision_select_layer, &config.mm_vision_select_feature, &clip_vision_config, )?, vb.get(&[config.hidden_size], "model.image_newline")? .to_device(&device)?, Llama::load(vb, &llama_config)?, ) }; Ok(Self { clip_vision_tower, image_newline, mm_projector, llama, config: (*config).clone(), device, }) } pub fn encode_images(&self, x: &Tensor) -> Result<Tensor> { let image_features = self.clip_vision_tower.forward(x)?; let image_features = self.mm_projector.forward(&image_features)?; Ok(image_features) } // currently only for single image, 4 dim tensor pub fn prepare_inputs_labels_for_multimodal( &self, input_ids: &Tensor, images: &[Tensor], image_sizes: &[(u32, u32)], ) -> Result<Tensor> { //TODO: process of multiple images/ new line // 576: 336(input size)/14(patch size)=24 24*24+1(class)=577 577-1=576 let concat_images = Tensor::cat(images, 0)?; let image_features_together = self.encode_images(&concat_images)?; let split_sizes = images .iter() .map(|x| x.shape().dims()[0]) .collect::<Vec<usize>>(); // can be replaced by split let mut index_pos = 0; let mut image_features = Vec::new(); for split_size in split_sizes.iter() { image_features.push(image_features_together.i(index_pos..index_pos + (*split_size))?); index_pos += *split_size; } let mm_patch_merge_type = &self.config.mm_patch_merge_type; let image_aspect_ratio = &self.config.image_aspect_ratio; let image_features = if mm_patch_merge_type == "flat" { image_features .iter() .map(|x| x.flatten(0, 1)) .collect::<Result<Vec<Tensor>>>()? } else if mm_patch_merge_type.starts_with("spatial") { let mut new_image_features = Vec::new(); for (image_idx, image_feature) in image_features.iter().enumerate() { let new_image_feature = if image_feature.dims()[0] > 1 { let base_image_feature = image_feature.get(0)?; let patch_image_feature = image_feature.i(1..)?; let height = self.clip_vision_tower.num_patches_per_side(); let width = height; assert_eq!(height * width, base_image_feature.dims()[0]); let image_size = image_sizes[image_idx]; let new_image_feature = if image_aspect_ratio == "anyres" { let (num_patch_width, num_patch_height) = get_anyres_image_grid_shape( image_size, &self.config.image_grid_pinpoints, self.clip_vision_tower.config.image_size as u32, ); patch_image_feature.reshape(( num_patch_height as usize, num_patch_width as usize, height, width, (), ))? } else { bail!("not implemented in original python LLaVA yet") }; let new_image_feature = if mm_patch_merge_type.contains("unpad") { let new_image_feature = new_image_feature .permute((4, 0, 2, 1, 3))? .flatten(1, 2)? .flatten(2, 3)?; let new_image_feature = unpad_image(&new_image_feature, &image_size)?; let new_image_feature_dims = new_image_feature.dims(); let image_new_line = self .image_newline .reshape((self.config.hidden_size, 1, 1))? .broadcast_as(( new_image_feature_dims[0], new_image_feature_dims[1], 1, ))?; let new_image_feature = Tensor::cat(&[new_image_feature, image_new_line], 2)?; new_image_feature.flatten(1, 2)?.transpose(0, 1)? } else { new_image_feature.permute((0, 2, 1, 3, 4))?.flatten(0, 3)? }; Tensor::cat(&[base_image_feature, new_image_feature], 0)? } else { let new_image_feature = image_feature.get(0)?; if mm_patch_merge_type.contains("unpad") { Tensor::cat( &[new_image_feature, self.image_newline.clone().unsqueeze(0)?], 0, )? } else { new_image_feature } }; new_image_features.push(new_image_feature); } new_image_features } else { bail!("Unexpected mm_patch_merge_type: {mm_patch_merge_type}") }; // can easily be replaced by nonzero if it is implemented in candle let input_ids_vec = input_ids.squeeze(0)?.to_vec1::<i64>()?; let mut image_indices = { let mut image_indices = vec![0_i64]; image_indices.extend( input_ids_vec .iter() .enumerate() .filter_map(|(i, x)| { if *x == self.config.image_token_index as i64 { Some(i as i64) } else { None } }) .collect::<Vec<i64>>(), ); image_indices }; if image_indices.len() == 1 { //no image, only [0], return self.llama.embed(input_ids); } let input_ids_noim = input_ids_vec .iter() .filter_map(|x| { if *x != self.config.image_token_index as i64 { Some(*x) } else { None } }) .collect::<Vec<i64>>(); let input_ids_noim_len = input_ids_noim.len(); image_indices.push((input_ids_noim_len) as i64); let input_ids_noim = Tensor::from_vec(input_ids_noim, input_ids_noim_len, &self.device)?; let cur_input_embeds = self.llama.embed(&input_ids_noim)?; // can be replace by split if it is implemented in candle let input_embed_no_ims = { let mut input_embeds = Vec::new(); for i in 0..image_indices.len() - 1 { let start = (image_indices[i]) as usize; let end = image_indices[i + 1] as usize; input_embeds.push(cur_input_embeds.i((start..end, ..))?) } input_embeds }; let mut cur_new_input_embeds = Vec::new(); for (i, image_feature) in image_features.iter().enumerate() { cur_new_input_embeds.push(input_embed_no_ims[i].clone()); cur_new_input_embeds.push(image_feature.clone()); } cur_new_input_embeds.push(input_embed_no_ims[image_features.len()].clone()); let new_input_embeds = Tensor::cat(&cur_new_input_embeds, 0)?; //trancate let new_input_embeds = if let Some(tokenizer_model_max_length) = self.config.tokenizer_model_max_length { let (new_input_embeds_length, _) = new_input_embeds.shape().dims2()?; if new_input_embeds_length > tokenizer_model_max_length { new_input_embeds.i((..tokenizer_model_max_length, ..))? } else { new_input_embeds } } else { new_input_embeds }; new_input_embeds.unsqueeze(0) } pub fn forward( &self, input_embeds: &Tensor, position_id: usize, cache: &mut Cache, ) -> Result<Tensor> { self.llama .forward_input_embed(input_embeds, position_id, cache) } }
candle/candle-transformers/src/models/llava/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/llava/mod.rs", "repo_id": "candle", "token_count": 8610 }
//! Mix of Multi-scale Dilated and Traditional Convolutions //! //! Mix of Multi-scale Dilated and Traditional Convolutions (MMDiT) is an architecture //! introduced for Stable Diffusion 3, with the MMDiT-X variant used in Stable Diffusion 3.5. //! //! - 📝 [Research Paper](https://arxiv.org/abs/2403.03206) //! - 💻 ComfyUI [reference implementation](https://github.com/comfyanonymous/ComfyUI/blob/78e133d0415784924cd2674e2ee48f3eeca8a2aa/comfy/ldm/modules/diffusionmodules/mmdit.py) //! - 💻 Stability-AI [MMDiT-X implementation](https://github.com/Stability-AI/sd3.5/blob/4e484e05308d83fb77ae6f680028e6c313f9da54/mmditx.py) //! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning) //! - 💻 [GH Link](https://github.com/salesforce/BLIP) //! - 🤗 [HF Link](https://huggingface.co/Salesforce/blip-image-captioning-base) //! - 📝 [Paper](https://arxiv.org/abs/2201.12086) //! pub mod blocks; pub mod embedding; pub mod model; pub mod projections;
candle/candle-transformers/src/models/mmdit/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/mmdit/mod.rs", "repo_id": "candle", "token_count": 395 }
//! Multimodal multi-purpose model combining Gemma-based language model with SigLIP image understanding //! //! See PaLiGemma details at: //! - [Paper](https://arxiv.org/abs/2402.05257) //! - [Google Blog Post](https://blog.research.google/2024/02/paligemma-scaling-language-image.html) //! //! The model is a multimodal combination of: //! - SigLIP vision encoder //! - Gemma language model //! - Cross-projection layers //! //! References: //! - [HuggingFace Implementation](https://huggingface.co/google/paligemma-3b) //! - [Paper: PaLI-3 and Beyond: Scaling Language-Image Learning](https://arxiv.org/abs/2402.05257) //! use crate::models::{gemma, siglip}; use candle::{Module, Result, Tensor}; use candle_nn::{linear, Linear, VarBuilder}; #[derive(serde::Deserialize, Clone, Debug)] pub struct Config { pub vision_config: siglip::VisionConfig, pub text_config: gemma::Config, pub projection_dim: usize, } impl Config { pub fn paligemma_3b_224() -> Self { // https://huggingface.co/google/paligemma-3b-pt-224/blob/main/config.json Self { vision_config: siglip::VisionConfig::paligemma_3b_224(), text_config: gemma::Config { hidden_size: 2048, intermediate_size: 16384, num_attention_heads: 8, num_hidden_layers: 18, num_key_value_heads: 1, vocab_size: 257216, // Default values. rope_theta: 10000., head_dim: 256, hidden_act: Some(candle_nn::Activation::GeluPytorchTanh), hidden_activation: None, attention_bias: false, max_position_embeddings: 8192, rms_norm_eps: 1e-6, }, projection_dim: 2048, } } pub fn paligemma_3b_448() -> Self { Self { vision_config: siglip::VisionConfig::paligemma_3b_448(), text_config: gemma::Config { hidden_size: 2048, intermediate_size: 16384, num_attention_heads: 8, num_hidden_layers: 18, num_key_value_heads: 1, // Default values. rope_theta: 10000., head_dim: 256, hidden_act: Some(candle_nn::Activation::GeluPytorchTanh), hidden_activation: None, attention_bias: false, max_position_embeddings: 8192, rms_norm_eps: 1e-6, vocab_size: 257216, }, projection_dim: 2048, } } } #[derive(Clone, Debug)] pub struct MultiModalProjector { linear: Linear, } impl MultiModalProjector { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let linear = linear( cfg.vision_config.hidden_size, cfg.projection_dim, vb.pp("linear"), )?; Ok(Self { linear }) } } impl Module for MultiModalProjector { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.linear) } } #[derive(Clone, Debug)] pub struct Model { pos: usize, vision_tower: siglip::VisionModel, multi_modal_projector: MultiModalProjector, language_model: gemma::Model, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vision_tower = siglip::VisionModel::new( &cfg.vision_config, false, vb.pp("vision_tower.vision_model"), )?; let multi_modal_projector = MultiModalProjector::new(cfg, vb.pp("multi_modal_projector"))?; let language_model = gemma::Model::new(false, &cfg.text_config, vb.pp("language_model"))?; Ok(Self { pos: 0, language_model, vision_tower, multi_modal_projector, }) } pub fn setup(&mut self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<Tensor> { self.clear_kv_cache(); let image_features = self .vision_tower .forward(pixel_values)? .apply(&self.multi_modal_projector)?; let image_features = crate::models::clip::div_l2_norm(&image_features)?; let text_features = self.language_model.embed_tokens().forward(input_ids)?; let input_embeds = Tensor::cat(&[image_features, text_features], 1)?; self.pos = input_embeds.dim(1)?; self.language_model.forward_embeds(&input_embeds, None, 0) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let pos = self.pos; let seq_len = input_ids.dim(1)?; self.pos = pos + seq_len; self.language_model.forward(input_ids, pos) } pub fn forward_without_projection(&mut self, input_ids: &Tensor) -> Result<Tensor> { self.clear_kv_cache(); let input_embeds = self.language_model.embed_tokens().forward(input_ids)?; self.language_model .forward_embeds_without_projection(&input_embeds, None, 0) } pub fn setup_without_projection( &mut self, pixel_values: &Tensor, input_ids: &Tensor, ) -> Result<Tensor> { self.clear_kv_cache(); let image_features = self .vision_tower .forward(pixel_values)? .apply(&self.multi_modal_projector)?; let image_features = crate::models::clip::div_l2_norm(&image_features)?; let text_features = self.language_model.embed_tokens().forward(input_ids)?; let input_embeds = Tensor::cat(&[image_features, text_features], 1)?; self.language_model .forward_embeds_without_projection(&input_embeds, None, 0) } pub fn clear_kv_cache(&mut self) { self.pos = 0; self.language_model.clear_kv_cache() } }
candle/candle-transformers/src/models/paligemma.rs/0
{ "file_path": "candle/candle-transformers/src/models/paligemma.rs", "repo_id": "candle", "token_count": 2807 }
//! Quantized MPT model implementation. //! //! MPT (MPT-7B) is a causal transformer model series optimized for code generation. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Multi-Query Grouped Attention (MQA) //! - Support for KV-caching //! - Pre-computed ALiBi attention biases //! - Support for 8-bit quantization //! //! References: //! - [Replit Code Models](https://huggingface.co/replit/replit-code-v1_5-3b) //! - [MPT-7B Implementation](https://github.com/mosaicml/llm-foundry) //! /// MPT model used by replit-code-v1_5-3b /// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py /// use crate::quantized_nn::{layer_norm_no_bias, linear_no_bias, Embedding, Linear}; pub use crate::quantized_var_builder::VarBuilder; /// MPT model used by replit-code-v1_5-3b /// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py use candle::{IndexOp, Module, Result, Tensor, D}; use candle_nn::LayerNorm; pub use super::mpt::Config; #[derive(Debug, Clone)] struct GroupedQueryAttention { wqkv: Linear, out_proj: Linear, kv_cache: Option<(Tensor, Tensor)>, softmax_scale: f64, head_dim: usize, d_model: usize, n_heads: usize, kv_n_heads: usize, attn_bias: Tensor, span: tracing::Span, } impl GroupedQueryAttention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let head_dim = cfg.d_model / cfg.n_heads; let wqkv_size = cfg.d_model + 2 * cfg.kv_n_heads * head_dim; let wqkv = linear_no_bias(cfg.d_model, wqkv_size, vb.pp("Wqkv"))?; let softmax_scale = 1f64 / (head_dim as f64).sqrt(); let out_proj = linear_no_bias(cfg.d_model, cfg.d_model, vb.pp("out_proj"))?; let attn_bias = super::mpt::build_alibi_bias(cfg)?.to_device(vb.device())?; Ok(Self { wqkv, out_proj, kv_cache: None, softmax_scale, head_dim, d_model: cfg.d_model, n_heads: cfg.n_heads, kv_n_heads: cfg.kv_n_heads, attn_bias, span: tracing::span!(tracing::Level::TRACE, "gqa"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len, _n_embd) = xs.dims3()?; let qkv = self.wqkv.forward(xs)?; let query = qkv.narrow(2, 0, self.d_model)?; let kv_size = self.kv_n_heads * self.head_dim; let key = qkv.narrow(2, self.d_model, kv_size)?; let value = qkv.narrow(2, self.d_model + kv_size, kv_size)?; // scaled_multihead_dot_product_attention let query = query .reshape((b_size, seq_len, self.n_heads, ()))? .transpose(1, 2)?; // b,h,s,d let key = key .reshape((b_size, seq_len, self.kv_n_heads, ()))? .permute((0, 2, 3, 1))?; // b,h,d,s let value = value .reshape((b_size, seq_len, self.kv_n_heads, ()))? .transpose(1, 2)?; // b,h,s,d let (key, value) = match &self.kv_cache { None => (key, value), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key], 3)?; let v = Tensor::cat(&[prev_v, &value], 2)?; (k, v) } }; self.kv_cache = Some((key.clone(), value.clone())); let query = query.contiguous()?; let key = crate::utils::repeat_kv(key, self.n_heads / self.kv_n_heads)?.contiguous()?; let value = crate::utils::repeat_kv(value, self.n_heads / self.kv_n_heads)?.contiguous()?; let attn_weights = (query.matmul(&key)? * self.softmax_scale)?; let attn_bias = { let s_q = query.dim(D::Minus2)?; let s_k = key.dim(D::Minus1)?; let (_, _, a_q, a_k) = self.attn_bias.dims4()?; let start_q = a_q.saturating_sub(s_q); let start_k = a_k.saturating_sub(s_k); self.attn_bias.i((.., .., start_q.., start_k..))? }; let attn_weights = attn_weights.broadcast_add(&attn_bias)?; let attn_weights = match mask { None => attn_weights, Some(mask) => super::mpt::masked_fill( &attn_weights, &mask.broadcast_as(attn_weights.shape())?, f32::NEG_INFINITY, )?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights .matmul(&value)? .transpose(1, 2)? .flatten_from(D::Minus2)?; let out = attn_output.apply(&self.out_proj)?; Ok(out) } } #[derive(Debug, Clone)] struct Ffn { up_proj: Linear, down_proj: Linear, } impl Ffn { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden = cfg.d_model * cfg.expansion_ratio; let up_proj = linear_no_bias(cfg.d_model, hidden, vb.pp("up_proj"))?; let down_proj = linear_no_bias(hidden, cfg.d_model, vb.pp("down_proj"))?; Ok(Self { up_proj, down_proj }) } } impl Module for Ffn { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.up_proj)?.gelu_erf()?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct MPTBlock { norm1: LayerNorm, // Do we need the low-precision variant? attn: GroupedQueryAttention, norm2: LayerNorm, ffn: Ffn, } impl MPTBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let norm1 = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_1"))?; let norm2 = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_2"))?; let attn = GroupedQueryAttention::new(cfg, vb.pp("attn"))?; let ffn = Ffn::new(cfg, vb.pp("ffn"))?; Ok(Self { norm1, attn, norm2, ffn, }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.norm1)?; let xs = self.attn.forward(&xs, mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.norm2)?.apply(&self.ffn)?; xs + residual } } #[derive(Debug, Clone)] pub struct Model { wte: Embedding, blocks: Vec<MPTBlock>, norm_f: LayerNorm, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let wte = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("wte"))?; let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(cfg.n_layers); for i in 0..cfg.n_layers { let block = MPTBlock::new(cfg, vb_b.pp(i))?; blocks.push(block) } let norm_f = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_f"))?; Ok(Self { wte, blocks, norm_f, }) } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.wte)?; let mask = if seq_len <= 1 { None } else { Some(super::mpt::get_mask(seq_len, xs.device())?) }; for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())?; } let xs = xs.apply(&self.norm_f)?; let logits = xs .narrow(1, seq_len - 1, 1)? .squeeze(1)? .matmul(&self.wte.embeddings().t()?)? .squeeze(1)?; Ok(logits) } }
candle/candle-transformers/src/models/quantized_mpt.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_mpt.rs", "repo_id": "candle", "token_count": 3969 }
//! Segformer model implementation for semantic segmentation and image classification. //! //! Segformer is a transformer-based model designed for vision tasks. It uses a hierarchical //! structure that progressively generates features at different scales. //! //! Key characteristics: //! - Efficient self-attention with sequence reduction //! - Hierarchical feature generation //! - Mix-FFN for local and global feature interaction //! - Lightweight all-MLP decode head //! //! References: //! - [SegFormer Paper](https://arxiv.org/abs/2105.15203) //! - [Model Card](https://huggingface.co/nvidia/mit-b0) //! use crate::models::with_tracing::{conv2d, linear, Conv2d, Linear}; use candle::{Context, Module, ModuleT, Result, Tensor, D}; use candle_nn::{conv2d_no_bias, layer_norm, Activation, Conv2dConfig, VarBuilder}; use serde::Deserialize; use std::collections::HashMap; // https://github.com/huggingface/transformers/blob/main/src/transformers/models/segformer/configuration_segformer.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { #[serde(default)] pub id2label: HashMap<String, String>, pub num_channels: usize, pub num_encoder_blocks: usize, pub depths: Vec<usize>, pub sr_ratios: Vec<usize>, pub hidden_sizes: Vec<usize>, pub patch_sizes: Vec<usize>, pub strides: Vec<usize>, pub num_attention_heads: Vec<usize>, pub mlp_ratios: Vec<usize>, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, pub decoder_hidden_size: usize, } #[derive(Debug, Clone)] struct SegformerOverlapPatchEmbeddings { projection: Conv2d, layer_norm: candle_nn::LayerNorm, } impl SegformerOverlapPatchEmbeddings { fn new( config: &Config, patch_size: usize, stride: usize, num_channels: usize, hidden_size: usize, vb: VarBuilder, ) -> Result<Self> { let projection = conv2d( num_channels, hidden_size, patch_size, Conv2dConfig { stride, padding: patch_size / 2, ..Default::default() }, vb.pp("proj"), )?; let layer_norm = candle_nn::layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm"))?; Ok(Self { projection, layer_norm, }) } } impl Module for SegformerOverlapPatchEmbeddings { fn forward(&self, x: &Tensor) -> Result<Tensor> { let embeddings = self.projection.forward(x)?; let shape = embeddings.shape(); // [B, C, H, W] -> [B, H * W, C] let embeddings = embeddings.flatten_from(2)?.transpose(1, 2)?; let embeddings = self.layer_norm.forward(&embeddings)?; // [B, H * W, C] -> [B, C, H, W] let embeddings = embeddings.transpose(1, 2)?.reshape(shape)?; Ok(embeddings) } } #[derive(Debug, Clone)] struct SegformerEfficientSelfAttention { num_attention_heads: usize, attention_head_size: usize, query: Linear, key: Linear, value: Linear, sr: Option<Conv2d>, layer_norm: Option<layer_norm::LayerNorm>, } impl SegformerEfficientSelfAttention { fn new( config: &Config, hidden_size: usize, num_attention_heads: usize, sequence_reduction_ratio: usize, vb: VarBuilder, ) -> Result<Self> { if hidden_size % num_attention_heads != 0 { candle::bail!( "The hidden size {} is not a multiple of the number of attention heads {}", hidden_size, num_attention_heads ) } let attention_head_size = hidden_size / num_attention_heads; let all_head_size = num_attention_heads * attention_head_size; let query = linear(hidden_size, all_head_size, vb.pp("query"))?; let key = linear(hidden_size, all_head_size, vb.pp("key"))?; let value = linear(hidden_size, all_head_size, vb.pp("value"))?; let (sr, layer_norm) = if sequence_reduction_ratio > 1 { ( Some(conv2d( hidden_size, hidden_size, sequence_reduction_ratio, Conv2dConfig { stride: sequence_reduction_ratio, ..Default::default() }, vb.pp("sr"), )?), Some(candle_nn::layer_norm( hidden_size, config.layer_norm_eps, vb.pp("layer_norm"), )?), ) } else { (None, None) }; Ok(Self { num_attention_heads, attention_head_size, query, key, value, sr, layer_norm, }) } fn transpose_for_scores(&self, hidden_states: Tensor) -> Result<Tensor> { let (batch, seq_length, _) = hidden_states.shape().dims3()?; let new_shape = &[ batch, seq_length, self.num_attention_heads, self.attention_head_size, ]; let hidden_states = hidden_states.reshape(new_shape)?; let hidden_states = hidden_states.permute((0, 2, 1, 3))?; Ok(hidden_states) } } impl Module for SegformerEfficientSelfAttention { fn forward(&self, x: &Tensor) -> Result<Tensor> { // [B, C, H, W] -> [B, H * W, C] let hidden_states = x.flatten_from(2)?.permute((0, 2, 1))?; let query = self .transpose_for_scores(self.query.forward(&hidden_states)?)? .contiguous()?; let hidden_states = if let (Some(sr), Some(layer_norm)) = (&self.sr, &self.layer_norm) { let hidden_states = sr.forward(x)?; // [B, C, H, W] -> [B, H * W, C] let hidden_states = hidden_states.flatten_from(2)?.permute((0, 2, 1))?; layer_norm.forward(&hidden_states)? } else { // already [B, H * W, C] hidden_states }; // standard self-attention let key = self .transpose_for_scores(self.key.forward(&hidden_states)?)? .contiguous()?; let value = self .transpose_for_scores(self.value.forward(&hidden_states)?)? .contiguous()?; let attention_scores = (query.matmul(&key.t()?)? / f64::sqrt(self.attention_head_size as f64))?; let attention_scores = candle_nn::ops::softmax_last_dim(&attention_scores)?; let result = attention_scores.matmul(&value)?; let result = result.permute((0, 2, 1, 3))?.contiguous()?; result.flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct SegformerSelfOutput { dense: Linear, } impl SegformerSelfOutput { fn new(hidden_size: usize, vb: VarBuilder) -> Result<Self> { let dense = linear(hidden_size, hidden_size, vb.pp("dense"))?; Ok(Self { dense }) } } impl Module for SegformerSelfOutput { fn forward(&self, x: &Tensor) -> Result<Tensor> { self.dense.forward(x) } } #[derive(Debug, Clone)] struct SegformerAttention { attention: SegformerEfficientSelfAttention, output: SegformerSelfOutput, } impl SegformerAttention { fn new( config: &Config, hidden_size: usize, num_attention_heads: usize, sequence_reduction_ratio: usize, vb: VarBuilder, ) -> Result<Self> { let attention = SegformerEfficientSelfAttention::new( config, hidden_size, num_attention_heads, sequence_reduction_ratio, vb.pp("self"), )?; let output = SegformerSelfOutput::new(hidden_size, vb.pp("output"))?; Ok(Self { attention, output }) } } impl Module for SegformerAttention { fn forward(&self, x: &Tensor) -> Result<Tensor> { let attention_output = self.attention.forward(x)?; self.output.forward(&attention_output) } } #[derive(Debug, Clone)] struct SegformerDWConv { dw_conv: Conv2d, } impl SegformerDWConv { fn new(dim: usize, vb: VarBuilder) -> Result<Self> { let dw_conv = conv2d( dim, dim, 3, Conv2dConfig { stride: 1, padding: 1, groups: dim, ..Default::default() }, vb.pp("dwconv"), )?; Ok(Self { dw_conv }) } } impl Module for SegformerDWConv { fn forward(&self, x: &Tensor) -> Result<Tensor> { self.dw_conv.forward(x) } } #[derive(Debug, Clone)] struct SegformerMixFFN { dense1: Linear, dw_conv: SegformerDWConv, act: Activation, dense2: Linear, } impl SegformerMixFFN { fn new( config: &Config, in_features: usize, hidden_features: usize, out_features: usize, vb: VarBuilder, ) -> Result<Self> { let dense1 = linear(in_features, hidden_features, vb.pp("dense1"))?; let dw_conv = SegformerDWConv::new(hidden_features, vb.pp("dwconv"))?; let act = config.hidden_act; let dense2 = linear(hidden_features, out_features, vb.pp("dense2"))?; Ok(Self { dense1, dw_conv, act, dense2, }) } } impl Module for SegformerMixFFN { fn forward(&self, x: &Tensor) -> Result<Tensor> { let (batch, _, height, width) = x.shape().dims4()?; let hidden_states = self .dense1 .forward(&x.flatten_from(2)?.permute((0, 2, 1))?)?; let channels = hidden_states.dim(2)?; let hidden_states = self.dw_conv.forward( &hidden_states .permute((0, 2, 1))? .reshape((batch, channels, height, width))?, )?; let hidden_states = self.act.forward(&hidden_states)?; let hidden_states = self .dense2 .forward(&hidden_states.flatten_from(2)?.permute((0, 2, 1))?)?; let channels = hidden_states.dim(2)?; hidden_states .permute((0, 2, 1))? .reshape((batch, channels, height, width)) } } #[derive(Debug, Clone)] struct SegformerLayer { layer_norm_1: candle_nn::LayerNorm, attention: SegformerAttention, layer_norm_2: candle_nn::LayerNorm, mlp: SegformerMixFFN, } impl SegformerLayer { fn new( config: &Config, hidden_size: usize, num_attention_heads: usize, sequence_reduction_ratio: usize, mlp_ratio: usize, vb: VarBuilder, ) -> Result<Self> { let layer_norm_1 = layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm_1"))?; let attention = SegformerAttention::new( config, hidden_size, num_attention_heads, sequence_reduction_ratio, vb.pp("attention"), )?; let layer_norm_2 = layer_norm(hidden_size, config.layer_norm_eps, vb.pp("layer_norm_2"))?; let mlp = SegformerMixFFN::new( config, hidden_size, hidden_size * mlp_ratio, hidden_size, vb.pp("mlp"), )?; Ok(Self { layer_norm_1, attention, layer_norm_2, mlp, }) } } impl Module for SegformerLayer { fn forward(&self, x: &Tensor) -> Result<Tensor> { let shape = x.shape().dims4()?; // [B, C, H, W] -> [B, H * W, C] let hidden_states = x.flatten_from(2)?.permute((0, 2, 1))?; let layer_norm_output = self.layer_norm_1.forward(&hidden_states)?; let layer_norm_output = layer_norm_output.permute((0, 2, 1))?.reshape(shape)?; // attention takes in [B, C, H, W] in order to properly do conv2d (and output [B, H * W, C]) let attention_output = self.attention.forward(&layer_norm_output)?; let hidden_states = (attention_output + hidden_states)?; let layer_norm_output = self.layer_norm_2.forward(&hidden_states)?; let mlp_output = self .mlp .forward(&layer_norm_output.permute((0, 2, 1))?.reshape(shape)?)?; hidden_states.permute((0, 2, 1))?.reshape(shape)? + mlp_output } } #[derive(Debug, Clone)] struct SegformerEncoder { /// config file config: Config, /// a list of embeddings patch_embeddings: Vec<SegformerOverlapPatchEmbeddings>, /// a list of attention blocks, each consisting of layers blocks: Vec<Vec<SegformerLayer>>, /// a final list of layer norms layer_norms: Vec<candle_nn::LayerNorm>, } impl SegformerEncoder { fn new(config: Config, vb: VarBuilder) -> Result<Self> { let mut patch_embeddings = Vec::with_capacity(config.num_encoder_blocks); let mut blocks = Vec::with_capacity(config.num_encoder_blocks); let mut layer_norms = Vec::with_capacity(config.num_encoder_blocks); for i in 0..config.num_encoder_blocks { let patch_size = config.patch_sizes[i]; let stride = config.strides[i]; let hidden_size = config.hidden_sizes[i]; let num_channels = if i == 0 { config.num_channels } else { config.hidden_sizes[i - 1] }; patch_embeddings.push(SegformerOverlapPatchEmbeddings::new( &config, patch_size, stride, num_channels, hidden_size, vb.pp(format!("patch_embeddings.{}", i)), )?); let mut layers = Vec::with_capacity(config.depths[i]); for j in 0..config.depths[i] { let sequence_reduction_ratio = config.sr_ratios[i]; let num_attention_heads = config.num_attention_heads[i]; let mlp_ratio = config.mlp_ratios[i]; layers.push(SegformerLayer::new( &config, hidden_size, num_attention_heads, sequence_reduction_ratio, mlp_ratio, vb.pp(format!("block.{}.{}", i, j)), )?); } blocks.push(layers); layer_norms.push(layer_norm( hidden_size, config.layer_norm_eps, vb.pp(format!("layer_norm.{}", i)), )?); } Ok(Self { config, patch_embeddings, blocks, layer_norms, }) } } impl ModuleWithHiddenStates for SegformerEncoder { fn forward(&self, x: &Tensor) -> Result<Vec<Tensor>> { let mut all_hidden_states = Vec::with_capacity(self.config.num_encoder_blocks); let mut hidden_states = x.clone(); for i in 0..self.config.num_encoder_blocks { hidden_states = self.patch_embeddings[i].forward(&hidden_states)?; for layer in &self.blocks[i] { hidden_states = layer.forward(&hidden_states)?; } let shape = hidden_states.shape().dims4()?; hidden_states = self.layer_norms[i].forward(&hidden_states.flatten_from(2)?.permute((0, 2, 1))?)?; hidden_states = hidden_states.permute((0, 2, 1))?.reshape(shape)?; all_hidden_states.push(hidden_states.clone()); } Ok(all_hidden_states) } } #[derive(Debug, Clone)] struct SegformerModel { encoder: SegformerEncoder, } impl SegformerModel { fn new(config: &Config, vb: VarBuilder) -> Result<Self> { let encoder = SegformerEncoder::new(config.clone(), vb.pp("encoder"))?; Ok(Self { encoder }) } } impl ModuleWithHiddenStates for SegformerModel { fn forward(&self, x: &Tensor) -> Result<Vec<Tensor>> { self.encoder.forward(x) } } #[derive(Debug, Clone)] struct SegformerMLP { proj: Linear, } impl SegformerMLP { fn new(config: &Config, input_dim: usize, vb: VarBuilder) -> Result<Self> { let proj = linear(input_dim, config.decoder_hidden_size, vb.pp("proj"))?; Ok(Self { proj }) } } impl Module for SegformerMLP { fn forward(&self, x: &Tensor) -> Result<Tensor> { self.proj.forward(x) } } #[derive(Debug, Clone)] struct SegformerDecodeHead { linear_c: Vec<SegformerMLP>, linear_fuse: candle_nn::Conv2d, batch_norm: candle_nn::BatchNorm, classifier: candle_nn::Conv2d, } impl SegformerDecodeHead { fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> { let mut linear_c = Vec::with_capacity(config.num_encoder_blocks); for i in 0..config.num_encoder_blocks { let hidden_size = config.hidden_sizes[i]; linear_c.push(SegformerMLP::new( config, hidden_size, vb.pp(format!("linear_c.{}", i)), )?); } let linear_fuse = conv2d_no_bias( config.decoder_hidden_size * config.num_encoder_blocks, config.decoder_hidden_size, 1, Conv2dConfig::default(), vb.pp("linear_fuse"), )?; let batch_norm = candle_nn::batch_norm( config.decoder_hidden_size, config.layer_norm_eps, vb.pp("batch_norm"), )?; let classifier = conv2d_no_bias( config.decoder_hidden_size, num_labels, 1, Conv2dConfig::default(), vb.pp("classifier"), )?; Ok(Self { linear_c, linear_fuse, batch_norm, classifier, }) } fn forward(&self, encoder_hidden_states: &[Tensor]) -> Result<Tensor> { if encoder_hidden_states.len() != self.linear_c.len() { candle::bail!( "The number of encoder hidden states {} is not equal to the number of linear layers {}", encoder_hidden_states.len(), self.linear_c.len() ) } // most fine layer let (_, _, upsample_height, upsample_width) = encoder_hidden_states[0].shape().dims4()?; let mut hidden_states = Vec::with_capacity(self.linear_c.len()); for (hidden_state, mlp) in encoder_hidden_states.iter().zip(&self.linear_c) { let (batch, _, height, width) = hidden_state.shape().dims4()?; let hidden_state = mlp.forward(&hidden_state.flatten_from(2)?.permute((0, 2, 1))?)?; let hidden_state = hidden_state.permute((0, 2, 1))?.reshape(( batch, hidden_state.dim(2)?, height, width, ))?; let hidden_state = hidden_state.upsample_nearest2d(upsample_height, upsample_width)?; hidden_states.push(hidden_state); } hidden_states.reverse(); let hidden_states = Tensor::cat(&hidden_states, 1)?; let hidden_states = self.linear_fuse.forward(&hidden_states)?; let hidden_states = self.batch_norm.forward_t(&hidden_states, false)?; let hidden_states = hidden_states.relu()?; self.classifier.forward(&hidden_states) } } trait ModuleWithHiddenStates { fn forward(&self, xs: &Tensor) -> Result<Vec<Tensor>>; } #[derive(Debug, Clone)] pub struct SemanticSegmentationModel { segformer: SegformerModel, decode_head: SegformerDecodeHead, } impl SemanticSegmentationModel { pub fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> { let segformer = SegformerModel::new(config, vb.pp("segformer"))?; let decode_head = SegformerDecodeHead::new(config, num_labels, vb.pp("decode_head"))?; Ok(Self { segformer, decode_head, }) } } impl Module for SemanticSegmentationModel { fn forward(&self, x: &Tensor) -> Result<Tensor> { let hidden_states = self.segformer.forward(x)?; self.decode_head.forward(&hidden_states) } } #[derive(Debug, Clone)] pub struct ImageClassificationModel { segformer: SegformerModel, classifier: Linear, } impl ImageClassificationModel { pub fn new(config: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> { let segformer = SegformerModel::new(config, vb.pp("segformer"))?; let classifier = linear(config.decoder_hidden_size, num_labels, vb.pp("classifier"))?; Ok(Self { segformer, classifier, }) } } impl Module for ImageClassificationModel { fn forward(&self, x: &Tensor) -> Result<Tensor> { let all_hidden_states = self.segformer.forward(x)?; let hidden_states = all_hidden_states.last().context("no last")?; let hidden_states = hidden_states.flatten_from(2)?.permute((0, 2, 1))?; let mean = hidden_states.mean(1)?; self.classifier.forward(&mean) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_config_json_load() { let raw_json = r#"{ "architectures": [ "SegformerForImageClassification" ], "attention_probs_dropout_prob": 0.0, "classifier_dropout_prob": 0.1, "decoder_hidden_size": 256, "depths": [ 2, 2, 2, 2 ], "downsampling_rates": [ 1, 4, 8, 16 ], "drop_path_rate": 0.1, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_sizes": [ 32, 64, 160, 256 ], "image_size": 224, "initializer_range": 0.02, "layer_norm_eps": 1e-06, "mlp_ratios": [ 4, 4, 4, 4 ], "model_type": "segformer", "num_attention_heads": [ 1, 2, 5, 8 ], "num_channels": 3, "num_encoder_blocks": 4, "patch_sizes": [ 7, 3, 3, 3 ], "sr_ratios": [ 8, 4, 2, 1 ], "strides": [ 4, 2, 2, 2 ], "torch_dtype": "float32", "transformers_version": "4.12.0.dev0" }"#; let config: Config = serde_json::from_str(raw_json).unwrap(); assert_eq!(vec![4, 2, 2, 2], config.strides); assert_eq!(1e-6, config.layer_norm_eps); } }
candle/candle-transformers/src/models/segformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/segformer.rs", "repo_id": "candle", "token_count": 11540 }
//! ResNet Building Blocks //! //! Some Residual Network blocks used in UNet models. //! //! Denoising Diffusion Implicit Models, K. He and al, 2015. //! - [Paper](https://arxiv.org/abs/1512.03385) //! use crate::models::with_tracing::{conv2d, Conv2d}; use candle::{Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; /// Configuration for a ResNet block. #[derive(Debug, Clone, Copy)] pub struct ResnetBlock2DConfig { /// The number of output channels, defaults to the number of input channels. pub out_channels: Option<usize>, pub temb_channels: Option<usize>, /// The number of groups to use in group normalization. pub groups: usize, pub groups_out: Option<usize>, /// The epsilon to be used in the group normalization operations. pub eps: f64, /// Whether to use a 2D convolution in the skip connection. When using None, /// such a convolution is used if the number of input channels is different from /// the number of output channels. pub use_in_shortcut: Option<bool>, // non_linearity: silu /// The final output is scaled by dividing by this value. pub output_scale_factor: f64, } impl Default for ResnetBlock2DConfig { fn default() -> Self { Self { out_channels: None, temb_channels: Some(512), groups: 32, groups_out: None, eps: 1e-6, use_in_shortcut: None, output_scale_factor: 1., } } } #[derive(Debug)] pub struct ResnetBlock2D { norm1: nn::GroupNorm, conv1: Conv2d, norm2: nn::GroupNorm, conv2: Conv2d, time_emb_proj: Option<nn::Linear>, conv_shortcut: Option<Conv2d>, span: tracing::Span, config: ResnetBlock2DConfig, } impl ResnetBlock2D { pub fn new( vs: nn::VarBuilder, in_channels: usize, config: ResnetBlock2DConfig, ) -> Result<Self> { let out_channels = config.out_channels.unwrap_or(in_channels); let conv_cfg = nn::Conv2dConfig { stride: 1, padding: 1, groups: 1, dilation: 1, }; let norm1 = nn::group_norm(config.groups, in_channels, config.eps, vs.pp("norm1"))?; let conv1 = conv2d(in_channels, out_channels, 3, conv_cfg, vs.pp("conv1"))?; let groups_out = config.groups_out.unwrap_or(config.groups); let norm2 = nn::group_norm(groups_out, out_channels, config.eps, vs.pp("norm2"))?; let conv2 = conv2d(out_channels, out_channels, 3, conv_cfg, vs.pp("conv2"))?; let use_in_shortcut = config .use_in_shortcut .unwrap_or(in_channels != out_channels); let conv_shortcut = if use_in_shortcut { let conv_cfg = nn::Conv2dConfig { stride: 1, padding: 0, groups: 1, dilation: 1, }; Some(conv2d( in_channels, out_channels, 1, conv_cfg, vs.pp("conv_shortcut"), )?) } else { None }; let time_emb_proj = match config.temb_channels { None => None, Some(temb_channels) => Some(nn::linear( temb_channels, out_channels, vs.pp("time_emb_proj"), )?), }; let span = tracing::span!(tracing::Level::TRACE, "resnet2d"); Ok(Self { norm1, conv1, norm2, conv2, time_emb_proj, span, config, conv_shortcut, }) } pub fn forward(&self, xs: &Tensor, temb: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let shortcut_xs = match &self.conv_shortcut { Some(conv_shortcut) => conv_shortcut.forward(xs)?, None => xs.clone(), }; let xs = self.norm1.forward(xs)?; let xs = self.conv1.forward(&nn::ops::silu(&xs)?)?; let xs = match (temb, &self.time_emb_proj) { (Some(temb), Some(time_emb_proj)) => time_emb_proj .forward(&nn::ops::silu(temb)?)? .unsqueeze(D::Minus1)? .unsqueeze(D::Minus1)? .broadcast_add(&xs)?, _ => xs, }; let xs = self .conv2 .forward(&nn::ops::silu(&self.norm2.forward(&xs)?)?)?; (shortcut_xs + xs)? / self.config.output_scale_factor } }
candle/candle-transformers/src/models/stable_diffusion/resnet.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/resnet.rs", "repo_id": "candle", "token_count": 2292 }
use super::Config; use crate::models::with_tracing::{linear, linear_no_bias, Linear}; use candle::{Device, IndexOp, Result, Tensor, D}; use candle_nn::{embedding, Conv1d, Conv1dConfig, Embedding, LayerNorm, Module, VarBuilder}; fn conv1d( in_channels: usize, out_channels: usize, kernel_size: usize, config: Conv1dConfig, vb: VarBuilder, ) -> Result<Conv1d> { let weight = vb.get((out_channels, in_channels, kernel_size), "weight")?; let bias = vb.get(out_channels, "bias")?; Ok(Conv1d::new(weight, Some(bias), config)) } fn layer_norm(size: usize, vb: VarBuilder) -> Result<LayerNorm> { let weight = vb.get(size, "weight")?; let bias = vb.get(size, "bias")?; Ok(LayerNorm::new(weight, bias, 1e-5)) } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L62 #[derive(Debug, Clone)] struct MultiHeadAttention { query: Linear, key: Linear, value: Linear, out: Linear, n_head: usize, span: tracing::Span, softmax_span: tracing::Span, matmul_span: tracing::Span, kv_cache: Option<(Tensor, Tensor)>, } impl MultiHeadAttention { fn load(n_state: usize, n_head: usize, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "multi-head-attn"); let softmax_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-softmax"); let matmul_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-matmul"); let query = linear(n_state, n_state, vb.pp("q_proj"))?; let value = linear(n_state, n_state, vb.pp("v_proj"))?; let key = linear_no_bias(n_state, n_state, vb.pp("k_proj"))?; let out = linear(n_state, n_state, vb.pp("out_proj"))?; Ok(Self { query, key, value, out, n_head, span, softmax_span, matmul_span, kv_cache: None, }) } fn forward( &mut self, x: &Tensor, xa: Option<&Tensor>, mask: Option<&Tensor>, flush_cache: bool, ) -> Result<Tensor> { let _enter = self.span.enter(); let q = self.query.forward(x)?; let (k, v) = match xa { None => { let k = self.key.forward(x)?; let v = self.value.forward(x)?; (k, v) } Some(x) => { if flush_cache { self.kv_cache = None; } if let Some((k, v)) = &self.kv_cache { (k.clone(), v.clone()) } else { let k = self.key.forward(x)?; let v = self.value.forward(x)?; self.kv_cache = Some((k.clone(), v.clone())); (k, v) } } }; let wv = self.qkv_attention(&q, &k, &v, mask)?; let out = self.out.forward(&wv)?; Ok(out) } fn reshape_head(&self, x: &Tensor) -> Result<Tensor> { let (n_batch, n_ctx, n_state) = x.dims3()?; let target_dims = &[n_batch, n_ctx, self.n_head, n_state / self.n_head]; x.reshape(target_dims)?.transpose(1, 2) } fn qkv_attention( &self, q: &Tensor, k: &Tensor, v: &Tensor, mask: Option<&Tensor>, ) -> Result<Tensor> { let (_, n_ctx, n_state) = q.dims3()?; let scale = ((n_state / self.n_head) as f64).powf(-0.25); let q = (self.reshape_head(q)? * scale)?; let k = (self.reshape_head(k)?.transpose(2, 3)? * scale)?; let v = self.reshape_head(v)?.contiguous()?; let mut qk = { let _enter = self.matmul_span.enter(); q.matmul(&k)? }; if let Some(mask) = mask { let mask = mask.i((0..n_ctx, 0..n_ctx))?; qk = qk.broadcast_add(&mask)? } let w = { let _enter = self.softmax_span.enter(); candle_nn::ops::softmax_last_dim(&qk)? }; let wv = { let _enter = self.matmul_span.enter(); w.matmul(&v)? } .transpose(1, 2)? .flatten_from(2)?; Ok(wv) } fn reset_kv_cache(&mut self) { self.kv_cache = None; } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L111 #[derive(Debug, Clone)] struct ResidualAttentionBlock { attn: MultiHeadAttention, attn_ln: LayerNorm, cross_attn: Option<(MultiHeadAttention, LayerNorm)>, mlp_linear1: Linear, mlp_linear2: Linear, mlp_ln: LayerNorm, span: tracing::Span, } impl ResidualAttentionBlock { fn load(n_state: usize, n_head: usize, ca: bool, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "residual-attn"); let attn = MultiHeadAttention::load(n_state, n_head, vb.pp("self_attn"))?; let attn_ln = layer_norm(n_state, vb.pp("self_attn_layer_norm"))?; let cross_attn = if ca { let cross_attn = MultiHeadAttention::load(n_state, n_head, vb.pp("encoder_attn"))?; let cross_attn_ln = layer_norm(n_state, vb.pp("encoder_attn_layer_norm"))?; Some((cross_attn, cross_attn_ln)) } else { None }; let n_mlp = n_state * 4; let mlp_linear1 = linear(n_state, n_mlp, vb.pp("fc1"))?; let mlp_linear2 = linear(n_mlp, n_state, vb.pp("fc2"))?; let mlp_ln = layer_norm(n_state, vb.pp("final_layer_norm"))?; Ok(Self { attn, attn_ln, cross_attn, mlp_linear1, mlp_linear2, mlp_ln, span, }) } fn forward( &mut self, x: &Tensor, xa: Option<&Tensor>, mask: Option<&Tensor>, flush_kv_cache: bool, ) -> Result<Tensor> { let _enter = self.span.enter(); let attn = self .attn .forward(&self.attn_ln.forward(x)?, None, mask, flush_kv_cache)?; let mut x = (x + attn)?; if let Some((attn, ln)) = &mut self.cross_attn { x = (&x + attn.forward(&ln.forward(&x)?, xa, None, flush_kv_cache)?)?; } let mlp = self.mlp_linear2.forward( &self .mlp_linear1 .forward(&self.mlp_ln.forward(&x)?)? .gelu()?, )?; x + mlp } fn reset_kv_cache(&mut self) { self.attn.reset_kv_cache(); if let Some((attn, _)) = &mut self.cross_attn { attn.reset_kv_cache(); } } } fn sinusoids(length: usize, channels: usize, device: &Device) -> Result<Tensor> { let max_timescale = 10000f32; let log_timescale_increment = max_timescale.ln() / (channels / 2 - 1) as f32; let inv_timescales: Vec<_> = (0..channels / 2) .map(|i| (i as f32 * (-log_timescale_increment)).exp()) .collect(); let inv_timescales = Tensor::new(inv_timescales.as_slice(), device)?.unsqueeze(0)?; let arange = Tensor::arange(0, length as u32, device)? .to_dtype(candle::DType::F32)? .unsqueeze(1)?; let sh = (length, channels / 2); let scaled_time = (arange.broadcast_as(sh)? * inv_timescales.broadcast_as(sh)?)?; let sincos = Tensor::cat(&[scaled_time.sin()?, scaled_time.cos()?], 1)?; Ok(sincos) } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L143 #[derive(Debug, Clone)] pub struct AudioEncoder { conv1: Conv1d, conv2: Conv1d, positional_embedding: Tensor, blocks: Vec<ResidualAttentionBlock>, ln_post: LayerNorm, span: tracing::Span, conv1_span: tracing::Span, conv2_span: tracing::Span, } impl AudioEncoder { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "audio-encoder"); let conv1_span = tracing::span!(tracing::Level::TRACE, "conv1"); let conv2_span = tracing::span!(tracing::Level::TRACE, "conv2"); let n_state = cfg.d_model; let n_head = cfg.encoder_attention_heads; let n_ctx = cfg.max_source_positions; let cfg1 = Conv1dConfig { padding: 1, stride: 1, groups: 1, dilation: 1, }; let cfg2 = Conv1dConfig { padding: 1, stride: 2, groups: 1, dilation: 1, }; let conv1 = conv1d(cfg.num_mel_bins, n_state, 3, cfg1, vb.pp("conv1"))?; let conv2 = conv1d(n_state, n_state, 3, cfg2, vb.pp("conv2"))?; let positional_embedding = sinusoids(n_ctx, n_state, vb.device())?; let blocks = (0..cfg.encoder_layers) .map(|i| { ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(format!("layers.{i}"))) }) .collect::<Result<Vec<_>>>()?; let ln_post = layer_norm(n_state, vb.pp("layer_norm"))?; Ok(Self { conv1, conv2, positional_embedding, blocks, ln_post, conv1_span, conv2_span, span, }) } pub fn forward(&mut self, x: &Tensor, flush_kv_cache: bool) -> Result<Tensor> { let _enter = self.span.enter(); let x = { let _enter = self.conv1_span.enter(); self.conv1.forward(x)?.gelu()? }; let x = { let _enter = self.conv2_span.enter(); self.conv2.forward(&x)?.gelu()? }; let x = x.transpose(1, 2)?; let (_bsize, seq_len, _hidden) = x.dims3()?; let positional_embedding = self.positional_embedding.narrow(0, 0, seq_len)?; let mut x = x.broadcast_add(&positional_embedding)?; for block in self.blocks.iter_mut() { x = block.forward(&x, None, None, flush_kv_cache)? } let x = self.ln_post.forward(&x)?; Ok(x) } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L176 #[derive(Debug, Clone)] pub struct TextDecoder { token_embedding: Embedding, positional_embedding: Tensor, blocks: Vec<ResidualAttentionBlock>, ln: LayerNorm, mask: Tensor, span: tracing::Span, span_final: tracing::Span, } impl TextDecoder { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "text-decoder"); let span_final = tracing::span!(tracing::Level::TRACE, "text-decoder-final"); let n_state = cfg.d_model; let n_head = cfg.decoder_attention_heads; let n_ctx = cfg.max_target_positions; let token_embedding = embedding(cfg.vocab_size, n_state, vb.pp("embed_tokens"))?; let positional_embedding = vb.get((n_ctx, n_state), "embed_positions.weight")?; let blocks = (0..cfg.decoder_layers) .map(|i| { ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(format!("layers.{i}"))) }) .collect::<Result<Vec<_>>>()?; let ln = layer_norm(n_state, vb.pp("layer_norm"))?; let mask: Vec<_> = (0..n_ctx) .flat_map(|i| (0..n_ctx).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 })) .collect(); let mask = Tensor::from_vec(mask, (n_ctx, n_ctx), vb.device())?; Ok(Self { token_embedding, positional_embedding, blocks, ln, mask, span, span_final, }) } pub fn forward(&mut self, x: &Tensor, xa: &Tensor, flush_kv_cache: bool) -> Result<Tensor> { let _enter = self.span.enter(); let last = x.dim(D::Minus1)?; let token_embedding = self.token_embedding.forward(x)?; let positional_embedding = self.positional_embedding.narrow(0, 0, last)?; let mut x = token_embedding.broadcast_add(&positional_embedding)?; for block in self.blocks.iter_mut() { x = block.forward(&x, Some(xa), Some(&self.mask), flush_kv_cache)?; } self.ln.forward(&x) } pub fn final_linear(&self, x: &Tensor) -> Result<Tensor> { let b_size = x.dim(0)?; let w = self.token_embedding.embeddings().broadcast_left(b_size)?; let logits = { let _enter = self.span_final.enter(); x.matmul(&w.t()?)? }; Ok(logits) } pub fn reset_kv_cache(&mut self) { for block in self.blocks.iter_mut() { block.reset_kv_cache(); } } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L221 #[derive(Debug, Clone)] pub struct Whisper { pub encoder: AudioEncoder, pub decoder: TextDecoder, pub config: Config, } impl Whisper { pub fn load(vb: &VarBuilder, config: Config) -> Result<Self> { let encoder = AudioEncoder::load(vb.pp("model.encoder"), &config)?; let decoder = TextDecoder::load(vb.pp("model.decoder"), &config)?; Ok(Self { encoder, decoder, config, }) } pub fn reset_kv_cache(&mut self) { self.encoder .blocks .iter_mut() .for_each(|b| b.reset_kv_cache()); self.decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/whisper/model.rs/0
{ "file_path": "candle/candle-transformers/src/models/whisper/model.rs", "repo_id": "candle", "token_count": 7050 }
//! Varbuilder for Loading gguf files //! //! VarBuilder is a utility to store quantized tensors from a [GGUF model file](https://huggingface.co/docs/hub/gguf). //! These tensors can be loaded from disk using `from_gguf` or from an in-memory //! buffer using `from_gguf_buffer`. use candle::quantized::QTensor; use candle::{Device, Result, Shape}; use std::sync::Arc; // VarBuilder specialized for QTensors #[derive(Clone)] pub struct VarBuilder { data: Arc<std::collections::HashMap<String, Arc<QTensor>>>, path: Vec<String>, device: Device, } impl VarBuilder { pub fn from_gguf<P: AsRef<std::path::Path>>(p: P, device: &Device) -> Result<Self> { let mut file = std::fs::File::open(p)?; let content = candle::quantized::gguf_file::Content::read(&mut file)?; let mut data = std::collections::HashMap::new(); for tensor_name in content.tensor_infos.keys() { let tensor = content.tensor(&mut file, tensor_name, device)?; data.insert(tensor_name.to_string(), Arc::new(tensor)); } Ok(Self { data: Arc::new(data), path: Vec::new(), device: device.clone(), }) } pub fn from_gguf_buffer(buffer: &[u8], device: &Device) -> Result<Self> { let mut cursor = std::io::Cursor::new(buffer); let content = candle::quantized::gguf_file::Content::read(&mut cursor)?; let mut data = std::collections::HashMap::new(); for tensor_name in content.tensor_infos.keys() { let tensor = content.tensor(&mut cursor, tensor_name, device)?; data.insert(tensor_name.to_string(), Arc::new(tensor)); } Ok(Self { data: Arc::new(data), path: Vec::new(), device: device.clone(), }) } pub fn pp<S: ToString>(&self, s: S) -> Self { let mut path = self.path.clone(); path.push(s.to_string()); Self { data: self.data.clone(), path, device: self.device.clone(), } } fn path(&self, tensor_name: &str) -> String { if self.path.is_empty() { tensor_name.to_string() } else { [&self.path.join("."), tensor_name].join(".") } } pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Arc<QTensor>> { let path = self.path(name); match self.data.get(&path) { None => { candle::bail!("cannot find tensor {path}") } Some(qtensor) => { let shape = s.into(); if qtensor.shape() != &shape { candle::bail!( "shape mismatch for {name}, got {:?}, expected {shape:?}", qtensor.shape() ) } Ok(qtensor.clone()) } } } pub fn get_no_shape(&self, name: &str) -> Result<Arc<QTensor>> { let path = self.path(name); match self.data.get(&path) { None => { candle::bail!("cannot find tensor {name}") } Some(qtensor) => Ok(qtensor.clone()), } } pub fn device(&self) -> &Device { &self.device } pub fn contains_key(&self, key: &str) -> bool { self.data.contains_key(key) } }
candle/candle-transformers/src/quantized_var_builder.rs/0
{ "file_path": "candle/candle-transformers/src/quantized_var_builder.rs", "repo_id": "candle", "token_count": 1649 }
<!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <title>Candle Blip Image Captioning Demo</title> <script src="https://cdn.tailwindcss.com"></script> <script type="module" src="./code.js"></script> <script type="module"> const MODELS = { blip_image_quantized_q4k: { base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/", model: "blip-image-captioning-large-q4k.gguf", config: "config.json", tokenizer: "tokenizer.json", quantized: true, size: "271 MB", }, blip_image_quantized_q80: { base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/", model: "blip-image-captioning-large-q80.gguf", config: "config.json", tokenizer: "tokenizer.json", quantized: true, size: "505 MB", }, blip_image_large: { base_url: "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/refs%2Fpr%2F18/", model: "model.safetensors", config: "config.json", tokenizer: "tokenizer.json", quantized: false, size: "1.88 GB", }, }; const blipWorker = new Worker("./blipWorker.js", { type: "module", }); const outputStatusEl = document.querySelector("#output-status"); const outputCaptionEl = document.querySelector("#output-caption"); const modelSelectEl = document.querySelector("#model"); const clearBtn = document.querySelector("#clear-btn"); const fileUpload = document.querySelector("#file-upload"); const dropArea = document.querySelector("#drop-area"); const imagesExamples = document.querySelector("#image-select"); const canvas = document.querySelector("#canvas"); const ctxCanvas = canvas.getContext("2d"); let isCaptioning = false; let currentImageURL = null; clearBtn.addEventListener("click", () => { clearImageCanvas(); }); modelSelectEl.addEventListener("change", () => { if (currentImageURL) { runInference(currentImageURL); } }); //add event listener to file input fileUpload.addEventListener("input", async (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); clearImageCanvas(); await drawImageCanvas(href); runInference(href); } }); // add event listener to drop-area dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); }); dropArea.addEventListener("drop", async (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); clearImageCanvas(); await drawImageCanvas(href); runInference(href); } else if (url) { clearImageCanvas(); await drawImageCanvas(url); runInference(url); } }); imagesExamples.addEventListener("click", async (e) => { if (isCaptioning) { return; } const target = e.target; if (target.nodeName === "IMG") { const href = target.src; clearImageCanvas(); await drawImageCanvas(href); runInference(href); } }); function clearImageCanvas() { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); isCaptioning = false; clearBtn.disabled = true; canvas.parentElement.style.height = "auto"; outputStatusEl.hidden = false; outputCaptionEl.hidden = true; outputStatusEl.innerText = "Please select an image"; currentImageURL = null; } async function drawImageCanvas(imgURL) { if (!imgURL) { throw new Error("No image URL provided"); } return new Promise((resolve, reject) => { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { canvas.width = img.width; canvas.height = img.height; ctxCanvas.drawImage(img, 0, 0); canvas.parentElement.style.height = canvas.offsetHeight + "px"; clearBtn.disabled = false; resolve(img); }; img.src = imgURL; currentImageURL = imgURL; }); } document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelSelectEl.appendChild(option); } }); async function getImageCaption( worker, weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized, updateStatus = null ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } function updateStatus(data) { if (data.status === "status") { outputStatusEl.innerText = data.message; } } async function runInference(imageURL) { if (isCaptioning || !imageURL) { alert("Please select an image first"); return; } outputStatusEl.hidden = false; outputCaptionEl.hidden = true; clearBtn.disabled = true; modelSelectEl.disabled = true; isCaptioning = true; const selectedModel = modelSelectEl.value; const model = MODELS[selectedModel]; const weightsURL = `${model.base_url}${model.model}`; const tokenizerURL = `${model.base_url}${model.tokenizer}`; const configURL = `${model.base_url}${model.config}`; const quantized = model.quantized; try { const time = performance.now(); const caption = await getImageCaption( blipWorker, weightsURL, tokenizerURL, configURL, selectedModel, imageURL, quantized, updateStatus ); outputStatusEl.hidden = true; outputCaptionEl.hidden = false; const totalTime = ((performance.now() - time)/1000).toFixed(2); outputCaptionEl.innerHTML = `${ caption.output }<br/><span class="text-xs">Inference time: ${totalTime} s</span>`; } catch (err) { console.error(err); outputStatusEl.hidden = false; outputCaptionEl.hidden = true; outputStatusEl.innerText = err.message; } clearBtn.disabled = false; modelSelectEl.disabled = false; isCaptioning = false; } </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-5 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle BLIP Image Captioning</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> <a href="https://huggingface.co/Salesforce/blip-image-captioning-large" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >BLIP Image Captioning </a> running in the browser using <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle</a >, a minimalist ML framework for Rust. </p> <p class="text-xs max-w-lg py-2"> <b>Note:</b> The image captioning on the smallest model takes about ~50 seconds, it will vary depending on your machine and model size. </p> </div> <div> <label for="model" class="font-medium block">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max" ></select> </div> <!-- drag and drop area --> <div class="grid gap-4 sm:grid-cols-2 py-4"> <div class="relative max-w-lg"> <div class="absolute w-full bottom-full flex justify-between items-center" > <div class="flex gap-2 w-full"> <button id="clear-btn" disabled title="Clear Image" class="ml-auto text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center" > <svg class="" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 13 12" height="1em" > <path d="M1.6.7 12 11.1M12 .7 1.6 11.1" stroke="#2E3036" stroke-width="2" /> </svg> </button> </div> </div> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden" > <div class="flex flex-col items-center justify-center space-y-1 text-center" > <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg" > <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700" > <span>Drag and drop y our image here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" class="sr-only" /> </div> <canvas id="canvas" class="absolute pointer-events-none w-full" ></canvas> </div> </div> <div class=""> <div class="h-full bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2" > <p id="output-caption" class="m-auto text-xl text-center p-2" hidden ></p> <span id="output-status" class="m-auto font-light"> Please select an image </span> </div> </div> </div> <div> <div class="flex gap-3 items-center overflow-x-scroll" id="image-select" > <h3 class="font-medium">Examples:</h3> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg" class="cursor-pointer w-24 h-24 object-cover" /> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/blip/index.html/0
{ "file_path": "candle/candle-wasm-examples/blip/index.html", "repo_id": "candle", "token_count": 7164 }
use crate::model::{Cache, Config, Llama}; use byteorder::{LittleEndian, ReadBytesExt}; use candle::{DType, Device, IndexOp, Result, Shape, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use serde::{Deserialize, Serialize}; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; use yew_agent::{HandlerId, Public, WorkerLink}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string())) } // Communication to the worker happens through bincode, the model weights and configs are fetched // on the main thread and transferred via the following structure. #[derive(Serialize, Deserialize)] pub struct ModelData { pub tokenizer: Vec<u8>, pub model: Vec<u8>, } fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> { let mut buf = [0u8; 4]; r.read_exact(&mut buf)?; Ok(i32::from_le_bytes(buf)) } fn read_tensor<R: std::io::Read, S: Into<Shape>>( r: &mut R, shape: S, dev: &Device, ) -> Result<Tensor> { let shape = shape.into(); let mut data_t = vec![0f32; shape.elem_count()]; r.read_f32_into::<LittleEndian>(&mut data_t)?; let tensor = Tensor::from_vec(data_t, shape, dev)?; Ok(tensor) } pub struct Model { pub cache: Cache, pub config: Config, pub llama: Llama, pub tokenizer: Tokenizer, } impl Model { fn run( &self, link: &WorkerLink<Worker>, id: HandlerId, temp: f64, top_p: f64, prompt: String, ) -> Result<()> { let dev = Device::Cpu; let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1.0 { None } else { Some(top_p) }; console_log!("temp: {temp:?} top_p: {top_p:?} prompt: {prompt}"); let mut logits_processor = LogitsProcessor::new(299792458, temp, top_p); let mut index_pos = 0; let mut tokens = self .tokenizer .encode(prompt.to_string(), true) .map_err(|m| candle::Error::Msg(m.to_string()))? .get_ids() .to_vec(); link.respond(id, Ok(WorkerOutput::Generated(prompt))); for index in 0.. { if tokens.len() >= self.config.seq_len { break; } let context_size = if self.cache.use_kv_cache && index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &dev)?.unsqueeze(0)?; let logits = self.llama.forward(&input, index_pos)?; let logits = logits.squeeze(0)?; index_pos += ctxt.len(); let next_token = logits_processor.sample(&logits)?; tokens.push(next_token); if let Some(text) = self.tokenizer.id_to_token(next_token) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); link.respond(id, Ok(WorkerOutput::Generated(text))); } } Ok(()) } } impl Config { fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> { let dim = read_i32(r)? as usize; let hidden_dim = read_i32(r)? as usize; let n_layers = read_i32(r)? as usize; let n_heads = read_i32(r)? as usize; let n_kv_heads = read_i32(r)? as usize; let vocab_size = read_i32(r)? as usize; let seq_len = read_i32(r)? as usize; Ok(Self { dim, hidden_dim, n_layers, n_heads, n_kv_heads, vocab_size, seq_len, norm_eps: 1e-5, }) } pub fn head_size(&self) -> usize { self.dim / self.n_heads } } struct TransformerWeights { // token embedding table token_embedding_table: Tensor, // (vocab_size, dim) // weights for rmsnorms rms_att_weight: Tensor, // (layer, dim) rmsnorm weights rms_ffn_weight: Tensor, // (layer, dim) // weights for matmuls wq: Tensor, // (layer, dim, dim) wk: Tensor, // (layer, dim, dim) wv: Tensor, // (layer, dim, dim) wo: Tensor, // (layer, dim, dim) // weights for ffn w1: Tensor, // (layer, hidden_dim, dim) w2: Tensor, // (layer, dim, hidden_dim) w3: Tensor, // (layer, hidden_dim, dim) // final rmsnorm rms_final_weight: Tensor, // (dim,) // freq_cis for RoPE relatively positional embeddings freq_cis_real: Tensor, // (seq_len, head_size/2) freq_cis_imag: Tensor, // (seq_len, head_size/2) } impl TransformerWeights { fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> { let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?; let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?; let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let rms_final_weight = read_tensor(r, c.dim, dev)?; let head_size = c.head_size(); let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?; let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?; Ok(Self { token_embedding_table, rms_att_weight, wq, wk, wv, wo, rms_ffn_weight, w1, w2, w3, rms_final_weight, freq_cis_real, freq_cis_imag, }) } fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder> { let mut ws = std::collections::HashMap::new(); let mut insert = |name: &str, t: Tensor| { ws.insert(name.to_string(), t); }; insert("rot.freq_cis_real", self.freq_cis_real.clone()); insert("rot.freq_cis_imag", self.freq_cis_imag.clone()); insert( "model.embed_tokens.weight", self.token_embedding_table.clone(), ); insert("lm_head.weight", self.token_embedding_table.clone()); insert("model.norm.weight", self.rms_final_weight.clone()); for layer in 0..cfg.n_layers { ws.insert( format!("model.layers.{layer}.self_attn.q_proj.weight"), self.wq.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.k_proj.weight"), self.wk.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.v_proj.weight"), self.wv.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.o_proj.weight"), self.wo.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.gate_proj.weight"), self.w1.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.down_proj.weight"), self.w2.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.up_proj.weight"), self.w3.i(layer)?, ); ws.insert( format!("model.layers.{layer}.input_layernorm.weight"), self.rms_att_weight.i(layer)?, ); ws.insert( format!("model.layers.{layer}.post_attention_layernorm.weight"), self.rms_ffn_weight.i(layer)?, ); } let vb = VarBuilder::from_tensors(ws, DType::F32, device); Ok(vb) } } impl Model { pub fn load(md: ModelData) -> Result<Self> { let dev = Device::Cpu; let mut model = std::io::Cursor::new(md.model); let config = Config::from_reader(&mut model)?; let weights = TransformerWeights::from_reader(&mut model, &config, &dev)?; let vb = weights.var_builder(&config, &dev)?; let cache = Cache::new(true, &config, vb.pp("rot"))?; let llama = Llama::load(vb, &cache, &config)?; let tokenizer = Tokenizer::from_bytes(&md.tokenizer).map_err(|m| candle::Error::Msg(m.to_string()))?; Ok(Self { cache, config, llama, tokenizer, }) } } pub struct Worker { link: WorkerLink<Self>, model: Option<Model>, } #[derive(Serialize, Deserialize)] pub enum WorkerInput { ModelData(ModelData), Run(f64, f64, String), } #[derive(Serialize, Deserialize)] pub enum WorkerOutput { Generated(String), GenerationDone(std::result::Result<(), String>), WeightsLoaded, } impl yew_agent::Worker for Worker { type Input = WorkerInput; type Message = (); type Output = std::result::Result<WorkerOutput, String>; type Reach = Public<Self>; fn create(link: WorkerLink<Self>) -> Self { Self { link, model: None } } fn update(&mut self, _msg: Self::Message) { // no messaging } fn handle_input(&mut self, msg: Self::Input, id: HandlerId) { let output = match msg { WorkerInput::ModelData(md) => match Model::load(md) { Ok(model) => { self.model = Some(model); Ok(WorkerOutput::WeightsLoaded) } Err(err) => Err(format!("model creation error {err:?}")), }, WorkerInput::Run(temp, top_p, prompt) => match &mut self.model { None => Err("model has not been set yet".to_string()), Some(model) => { { let mut cache = model.cache.kvs.lock().unwrap(); for elem in cache.iter_mut() { *elem = None } } let result = model .run(&self.link, id, temp, top_p, prompt) .map_err(|e| e.to_string()); Ok(WorkerOutput::GenerationDone(result)) } }, }; self.link.respond(id, output); } fn name_of_resource() -> &'static str { "worker.js" } fn resource_path_is_relative() -> bool { true } }
candle/candle-wasm-examples/llama2-c/src/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/worker.rs", "repo_id": "candle", "token_count": 5770 }
export async function extractEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus, normalize_embeddings = true ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export async function generateText( worker, weightsURL, tokenizerURL, configURL, modelID, prompt, params, updateStatus ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, prompt, params, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export const MODELS = { t5_small_quantized: { size: "64.4 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, t5_small: { size: "242 MB", base_url: "https://huggingface.co/t5-small/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_small: { size: "308 MB", base_url: "https://huggingface.co/google/flan-t5-small/resolve/refs%2Fpr%2F14/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_base_quantized: { size: "263 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model-flan-t5-base.gguf", tokenizer: "tokenizer.json", config: "config-flan-t5-base.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, coedit_large_quantized: { size: "643 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { fluency: { prefix: "Fix the grammar: ", max_length: 300, }, coherence: { prefix: "Rewrite to make this easier to understand: ", max_length: 300, }, simplification: { prefix: "translate English to Romanian: ", max_length: 300, }, simplification: { prefix: "Paraphrase this: ", max_length: 300, }, formalization: { prefix: "Write this more formally: ", max_length: 300, }, neutralize: { prefix: "Write in a more neutral way: ", max_length: 300, }, }, }, }; export function getModelInfo(id, taskID) { const model = MODELS[id]; return { modelURL: model.base_url + model.model, configURL: model.base_url + model.config, tokenizerURL: model.base_url + model.tokenizer, maxLength: model.tasks[taskID].max_length, }; }
candle/candle-wasm-examples/t5/utils.js/0
{ "file_path": "candle/candle-wasm-examples/t5/utils.js", "repo_id": "candle", "token_count": 2339 }
[package] name = "candle-wasm-example-yolo" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } num-traits = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } image = { workspace = true } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } safetensors = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.11" js-sys = "0.3.64" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" wasm-logger = "0.2" yew-agent = "0.2.0" yew = { version = "0.20.0", features = ["csr"] } [dependencies.web-sys] version = "=0.3.70" features = [ 'Blob', 'CanvasRenderingContext2d', 'Document', 'Element', 'HtmlElement', 'HtmlCanvasElement', 'HtmlImageElement', 'ImageData', 'Node', 'Window', 'Request', 'RequestCache', 'RequestInit', 'RequestMode', 'Response', 'Performance', 'TextMetrics', ]
candle/candle-wasm-examples/yolo/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/yolo/Cargo.toml", "repo_id": "candle", "token_count": 464 }
pub fn add(left: usize, right: usize) -> usize { left + right } #[cfg(test)] mod tests { use super::*; #[test] fn it_works() { let result = add(2, 2); assert_eq!(result, 4); } }
candle/candle-wasm-tests/src/lib.rs/0
{ "file_path": "candle/candle-wasm-tests/src/lib.rs", "repo_id": "candle", "token_count": 108 }
image: repository: huggingface name: chat-ui nodeSelector: role-huggingchat: "true" tolerations: - key: "huggingface.co/huggingchat" operator: "Equal" value: "true" effect: "NoSchedule" serviceAccount: enabled: true create: true name: huggingchat-prod ingress: path: "/chat" annotations: alb.ingress.kubernetes.io/healthcheck-path: "/healthcheck" alb.ingress.kubernetes.io/listen-ports: "[{\"HTTP\": 80}, {\"HTTPS\": 443}]" alb.ingress.kubernetes.io/group.name: "hub-prod" alb.ingress.kubernetes.io/scheme: "internet-facing" alb.ingress.kubernetes.io/ssl-redirect: "443" alb.ingress.kubernetes.io/tags: "Env=prod,Project=hub,Terraform=true" alb.ingress.kubernetes.io/target-node-labels: "role-hub-utils=true" kubernetes.io/ingress.class: "alb" envVars: ADDRESS_HEADER: 'X-Forwarded-For' ALTERNATIVE_REDIRECT_URLS: '["huggingchat://login/callback"]' APP_BASE: "/chat" ALLOW_IFRAME: "false" COMMUNITY_TOOLS: "true" COOKIE_SAMESITE: "lax" COOKIE_SECURE: "true" ENABLE_ASSISTANTS: "true" ENABLE_ASSISTANTS_RAG: "true" EXPOSE_API: "true" METRICS_PORT: 5565 LOG_LEVEL: "debug" METRICS_ENABLED: "true" MODELS: > [ { "name": "Qwen/Qwen2.5-72B-Instruct", "description": "The latest Qwen open model with improved role-playing, long text generation and structured data understanding.", "modelUrl": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", "websiteUrl": "https://qwenlm.github.io/blog/qwen2.5/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/qwen-logo.png", "preprompt": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.", "parameters": { "stop": ["<|endoftext|>", "<|im_end|>"], "temperature": 0.6, "truncate": 28672, "max_new_tokens": 3072 }, "tools": true, "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] }, { "name": "meta-llama/Llama-3.3-70B-Instruct", "id": "meta-llama/Llama-3.3-70B-Instruct", "description": "Ideal for everyday use. A fast and extremely capable model matching closed source models' capabilities. Now with the latest Llama 3.3 weights!", "modelUrl": "https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct", "websiteUrl": "https://llama.meta.com/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/meta-logo.png", "tools": true, "preprompt": "", "parameters": { "stop": ["<|endoftext|>", "<|eot_id|>"], "temperature": 0.6, "max_new_tokens": 1024, "truncate": 7167 }, "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] }, { "name": "CohereForAI/c4ai-command-r-plus-08-2024", "description": "Cohere's largest language model, optimized for conversational interaction and tool use. Now with the 2024 update!", "modelUrl": "https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024", "websiteUrl": "https://docs.cohere.com/docs/command-r-plus", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/cohere-logo.png", "tools": true, "parameters": { "stop": ["<|END_OF_TURN_TOKEN|>"], "truncate": 28672, "max_new_tokens": 2048, "temperature": 0.3 }, "promptExamples": [ { "title": "Generate a mouse portrait", "prompt": "Generate the portrait of a scientific mouse in its laboratory." }, { "title": "Review a pull request", "prompt": "Review this pull request: https://github.com/huggingface/chat-ui/pull/1131/files" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." } ] }, { "name": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "modelUrl": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "websiteUrl": "https://deepseek.com/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/deepseek-logo.png", "description": "The first reasoning model from DeepSeek, distilled into a 32B dense model. Outperforms o1-mini on multiple benchmarks.", "reasoning": { "type": "tokens", "beginToken": "<think>", "endToken": "</think>" }, "promptExamples": [ { "title": "Rs in strawberry", "prompt": "how many R in strawberry?" }, { "title": "Larger number", "prompt": "9.11 or 9.9 which number is larger?" }, { "title": "Measuring 6 liters", "prompt": "I have a 6- and a 12-liter jug. I want to measure exactly 6 liters." } ], "endpoints": [ { "type": "openai", "baseURL": "https://proxy.serverless.api-inference.huggingface.tech/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B/v1" } ] }, { "name": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "tokenizer": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "modelUrl": "https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "websiteUrl": "https://www.nvidia.com/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/nvidia-logo.png", "description": "Nvidia's latest Llama fine-tune, topping alignment benchmarks and optimized for instruction following.", "parameters": { "stop": ["<|eot_id|>", "<|im_end|>"], "temperature": 0.5, "truncate": 28672, "max_new_tokens": 2048 }, "promptExamples": [ { "title": "Rs in strawberry", "prompt": "how many R in strawberry?" }, { "title": "Larger number", "prompt": "9.11 or 9.9 which number is larger?" }, { "title": "Measuring 6 liters", "prompt": "I have a 6- and a 12-liter jug. I want to measure exactly 6 liters." } ], "endpoints": [ { "type": "openai", "baseURL": "https://proxy.serverless.api-inference.huggingface.tech/models/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF/v1" } ] }, { "name": "Qwen/QwQ-32B-Preview", "preprompt": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.", "modelUrl": "https://huggingface.co/Qwen/QwQ-32B-Preview", "websiteUrl": "https://qwenlm.github.io/blog/qwq-32b-preview/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/qwen-logo.png", "description": "QwQ is an experiment model from the Qwen Team with advanced reasoning capabilities.", "reasoning": { "type": "summarize" }, "parameters": { "stop": ["<|im_end|>"], "truncate": 12288, "max_new_tokens": 4096, "temperature": 0.7, "top_k": 20, "top_p": 0.8, "repetition_penalty": 1.05 }, "promptExamples": [ { "title": "Rs in strawberry", "prompt": "how many R in strawberry?" }, { "title": "Larger number", "prompt": "9.11 or 9.9 which number is larger?" }, { "title": "Measuring 6 liters", "prompt": "I have a 6- and a 12-liter jug. I want to measure exactly 6 liters." } ] }, { "name": "Qwen/Qwen2.5-Coder-32B-Instruct", "description": "Qwen's latest coding model, in its biggest size yet. SOTA on many coding benchmarks.", "modelUrl": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct", "websiteUrl": "https://qwenlm.github.io/blog/qwen2.5-coder-family/", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/qwen-logo.png", "preprompt": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.", "parameters": { "stop": ["<|im_end|>", "<|endoftext|>"], "temperature": 0.6, "truncate": 28672, "max_new_tokens": 3072 }, "promptExamples": [ { "title": "To-do list web app", "prompt": "Create a simple to-do list application where users can:\n- Add new tasks.\n- Mark tasks as complete.\n- Delete completed tasks.\nThe tasks should persist in the browser's local storage so that they remain available even after a page reload.\n" }, { "title": "Create a REST API", "prompt": "Build a simple REST API using Node.js, TypeScript and Express:\n- POST /items: Accepts a JSON body with name and quantity and adds a new item.\n- GET /items: Returns a list of all items.\n- PUT /items/:id: Updates the name or quantity of an item by its id.\n- DELETE /items/:id: Removes an item by its id.\nUse an in-memory array as the data store (no need for a database). Include basic error handling (e.g., item not found)." }, { "title": "Simple website", "prompt": "Generate a snazzy static landing page for a local coffee shop using HTML and CSS. You can use tailwind using <script src='https://cdn.tailwindcss.com'></script>." } ], "endpoints": [ { "type": "openai", "baseURL": "https://proxy.serverless.api-inference.huggingface.tech/models/Qwen/Qwen2.5-Coder-32B-Instruct/v1" } ] }, { "name": "meta-llama/Llama-3.2-11B-Vision-Instruct", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/meta-logo.png", "description": "The latest multimodal model from Meta! Supports image inputs natively.", "websiteUrl": "https://llama.com/", "multimodal": true, "parameters": { "stop": ["<|eot_id|>", "<|im_end|>"], "temperature": 0.6, "truncate": 14336, "max_new_tokens": 1536 }, "endpoints": [ { "type": "openai", "baseURL": "https://proxy.serverless.api-inference.huggingface.tech/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1", "multimodal": { "image": { "maxSizeInMB": 10, "maxWidth": 560, "maxHeight": 560, "supportedMimeTypes": ["image/png", "image/jpeg", "image/webp"], "preferredMimeType": "image/webp" } } } ] }, { "name": "NousResearch/Hermes-3-Llama-3.1-8B", "description": "Nous Research's latest Hermes 3 release in 8B size. Follows instruction closely.", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/nous-logo.png", "websiteUrl": "https://nousresearch.com/", "modelUrl": "https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B", "tokenizer": "NousResearch/Hermes-3-Llama-3.1-8B", "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ], "parameters": { "stop": ["<|im_end|>"], "temperature": 0.6, "truncate": 14336, "max_new_tokens": 1536 } }, { "name": "mistralai/Mistral-Nemo-Instruct-2407", "tokenizer": "mistralai/Mistral-Nemo-Instruct-2407", "displayName": "mistralai/Mistral-Nemo-Instruct-2407", "description": "A small model with good capabilities in language understanding and commonsense reasoning.", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/mistral-logo.png", "websiteUrl": "https://mistral.ai/news/mistral-nemo/", "modelUrl": "https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407", "preprompt": "", "parameters": { "stop": ["</s>"], "temperature": 0.6, "truncate": 14336, "max_new_tokens": 1536 }, "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] }, { "name": "microsoft/Phi-3.5-mini-instruct", "tokenizer": "microsoft/Phi-3.5-mini-instruct", "description": "One of the best small models (3.8B parameters), super fast for simple tasks.", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/microsoft-logo.png", "modelUrl": "https://huggingface.co/microsoft/Phi-3.5-mini-instruct", "websiteUrl": "https://techcommunity.microsoft.com/t5/ai-azure-ai-services-blog/discover-the-new-multi-lingual-high-quality-phi-3-5-slms/ba-p/4225280/", "preprompt": "", "parameters": { "stop": ["<|end|>", "<|endoftext|>", "<|assistant|>"], "temperature": 0.6, "truncate": 28672, "max_new_tokens": 3072 }, "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] }, { "name": "meta-llama/Llama-3.1-8B-Instruct", "parameters": { "temperature": 0.6, "stop": ["<|endoftext|>", "<|eot_id|>"] }, "unlisted": true } ] NODE_ENV: "prod" NODE_LOG_STRUCTURED_DATA: true OLD_MODELS: > [ { "name": "bigcode/starcoder" }, { "name": "OpenAssistant/oasst-sft-6-llama-30b-xor" }, { "name": "HuggingFaceH4/zephyr-7b-alpha" }, { "name": "openchat/openchat_3.5" }, { "name": "openchat/openchat-3.5-1210" }, { "name": "tiiuae/falcon-180B-chat" }, { "name": "codellama/CodeLlama-34b-Instruct-hf" }, { "name": "google/gemma-7b-it" }, { "name": "meta-llama/Llama-2-70b-chat-hf" }, { "name": "codellama/CodeLlama-70b-Instruct-hf" }, { "name": "openchat/openchat-3.5-0106" }, { "name": "meta-llama/Meta-Llama-3-70B-Instruct" }, { "name": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8" }, { "name": "CohereForAI/c4ai-command-r-plus", "transferTo": "CohereForAI/c4ai-command-r-plus-08-2024" }, { "name": "01-ai/Yi-1.5-34B-Chat", "transferTo": "CohereForAI/c4ai-command-r-plus-08-2024" }, { "name": "mistralai/Mixtral-8x7B-Instruct-v0.1", "transferTo": "mistralai/Mistral-Nemo-Instruct-2407" }, { "name": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "transferTo": "NousResearch/Hermes-3-Llama-3.1-8B" }, { "name": "mistralai/Mistral-7B-Instruct-v0.3", "transferTo": "mistralai/Mistral-Nemo-Instruct-2407" }, { "name": "microsoft/Phi-3-mini-4k-instruct", "transferTo": "microsoft/Phi-3.5-mini-instruct" }, { "name": "meta-llama/Meta-Llama-3.1-70B-Instruct", "transferTo": "meta-llama/Llama-3.3-70B-Instruct" } ] PUBLIC_ORIGIN: "https://huggingface.co" PUBLIC_SHARE_PREFIX: "https://hf.co/chat" PUBLIC_ANNOUNCEMENT_BANNERS: > [ { "title": "DeepSeek R1 is now available!", "linkTitle": "Try it out!", "linkHref": "https://huggingface.co/chat/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" } ] PUBLIC_APP_NAME: "HuggingChat" PUBLIC_APP_ASSETS: "huggingchat" PUBLIC_APP_COLOR: "yellow" PUBLIC_APP_DESCRIPTION: "Making the community's best AI chat models available to everyone." PUBLIC_APP_DISCLAIMER_MESSAGE: "Disclaimer: AI is an area of active research with known problems such as biased generation and misinformation. Do not use this application for high-stakes decisions or advice." PUBLIC_APP_GUEST_MESSAGE: "Sign in with a free Hugging Face account to continue using HuggingChat." PUBLIC_APP_DATA_SHARING: 0 PUBLIC_APP_DISCLAIMER: 1 PUBLIC_PLAUSIBLE_SCRIPT_URL: "/js/script.js" REQUIRE_FEATURED_ASSISTANTS: "true" TASK_MODEL: "meta-llama/Llama-3.1-8B-Instruct" TEXT_EMBEDDING_MODELS: > [{ "name": "bge-base-en-v1-5-sxa", "displayName": "bge-base-en-v1-5-sxa", "chunkCharLength": 512, "endpoints": [{ "type": "tei", "url": "https://huggingchat-tei.hf.space/" }] }] WEBSEARCH_BLOCKLIST: '["youtube.com", "twitter.com"]' XFF_DEPTH: '2' TOOLS: > [ { "_id": "000000000000000000000001", "displayName": "Image Generation", "description": "Use this tool to generate images based on a prompt.", "color": "yellow", "icon": "camera", "baseUrl": "black-forest-labs/FLUX.1-schnell", "name": "image_generation", "endpoint": "/infer", "inputs": [ { "name": "prompt", "description": "A prompt to generate an image from", "paramType": "required", "type": "str" }, { "name": "seed", "paramType": "fixed", "value": "0", "type": "float" }, { "name": "randomize_seed", "paramType": "fixed", "value": "true", "type": "bool" }, { "name": "width", "description": "numeric value between 256 and 2048", "paramType": "optional", "default": 1024, "type": "float" }, { "name": "height", "description": "numeric value between 256 and 2048", "paramType": "optional", "default": 1024, "type": "float" }, { "name": "num_inference_steps", "paramType": "fixed", "value": "4", "type": "float" } ], "outputComponent": "image", "outputComponentIdx": 0, "showOutput": true }, { "_id": "000000000000000000000002", "displayName": "Document Parser", "description": "Use this tool to parse any document and get its content in markdown format.", "color": "yellow", "icon": "cloud", "baseUrl": "huggingchat/document-parser", "name": "document_parser", "endpoint": "/predict", "inputs": [ { "name": "document", "description": "Filename of the document to parse", "paramType": "required", "type": "file", "mimeTypes": 'application/*' }, { "name": "filename", "paramType": "fixed", "value": "document.pdf", "type": "str" } ], "outputComponent": "textbox", "outputComponentIdx": 0, "showOutput": false, "isHidden": true }, { "_id": "000000000000000000000003", "name": "edit_image", "baseUrl": "multimodalart/cosxl", "endpoint": "/run_edit", "inputs": [ { "name": "image", "description": "The image path to be edited", "paramType": "required", "type": "file", "mimeTypes": 'image/*' }, { "name": "prompt", "description": "The prompt with which to edit the image", "paramType": "required", "type": "str" }, { "name": "negative_prompt", "paramType": "fixed", "value": "", "type": "str" }, { "name": "guidance_scale", "paramType": "fixed", "value": 6.5, "type": "float" }, { "name": "steps", "paramType": "fixed", "value": 30, "type": "float" } ], "outputComponent": "image", "showOutput": true, "displayName": "Image Editor", "color": "green", "icon": "camera", "description": "This tool lets you edit images", "outputComponentIdx": 0 } ] HF_ORG_ADMIN: '644171cfbd0c97265298aa99' HF_ORG_EARLY_ACCESS: '5e67bd5b1009063689407478' HF_API_ROOT: 'https://proxy.serverless.api-inference.huggingface.tech/models' infisical: enabled: true env: "prod-us-east-1" autoscaling: enabled: true minReplicas: 12 maxReplicas: 30 targetMemoryUtilizationPercentage: "50" targetCPUUtilizationPercentage: "50" resources: requests: cpu: 2 memory: 4Gi limits: cpu: 4 memory: 8Gi monitoring: enabled: true
chat-ui/chart/env/prod.yaml/0
{ "file_path": "chat-ui/chart/env/prod.yaml", "repo_id": "chat-ui", "token_count": 11587 }
# Multimodal We currently support [IDEFICS](https://huggingface.co/blog/idefics) (hosted on [TGI](./providers/tgi)), OpenAI and Anthropic Claude 3 as multimodal models. You can enable it by setting `multimodal: true` in your `MODELS` configuration. For IDEFICS, you must have a [PRO HF Api token](https://huggingface.co/settings/tokens). For OpenAI, see the [OpenAI section](./providers/openai). For Anthropic, see the [Anthropic section](./providers/anthropic). ```ini MODELS=`[ { "name": "HuggingFaceM4/idefics-80b-instruct", "multimodal" : true, "description": "IDEFICS is the new multimodal model by Hugging Face.", "preprompt": "", "chatPromptTemplate" : "{{#each messages}}{{#ifUser}}User: {{content}}{{/ifUser}}<end_of_utterance>\nAssistant: {{#ifAssistant}}{{content}}\n{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 12, "truncate": 1000, "max_new_tokens": 1024, "stop": ["<end_of_utterance>", "User:", "\nUser:"] } } ]` ```
chat-ui/docs/source/configuration/models/multimodal.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/multimodal.md", "repo_id": "chat-ui", "token_count": 439 }
# Web Search Chat UI features a powerful Web Search feature. A high level overview of how it works: 1. Generate an appropriate search query from the user prompt using the `TASK_MODEL` 2. Perform web search via an external provider (i.e. Serper) or via locally scrape Google results 3. Load each search result into playwright and scrape 4. Convert scraped HTML to Markdown tree with headings as parents 5. Create embeddings for each Markdown element 6. Find the embedings clossest to the user query using a vector similarity search (inner product) 7. Get the corresponding Markdown elements and their parent, up to 8000 characters 8. Supply the information as context to the model <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/websearch-light.png" height="auto"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/websearch-dark.png" height="auto"/> </div> ## Providers Many providers are supported for the web search, or you can use locally scraped Google results. ### Local For locally scraped Google results, put `USE_LOCAL_WEBSEARCH=true` in your `.env.local`. Please note that you may hit rate limits as we make no attempt to make the traffic look legitimate. To avoid this, you may choose a provider, such as Serper, used on the official instance. ### SearXNG > SearXNG is a free internet metasearch engine which aggregates results from various search services and databases. Users are neither tracked nor profiled. You may enable support via the `SEARXNG_QUERY_URL` where `<query>` will be replaceed with the query keywords. Please see [the official documentation](https://docs.searxng.org/dev/search_api.html) for more information Example: `https://searxng.yourdomain.com/search?q=<query>&engines=duckduckgo,google&format=json` ### Third Party Many third party providers are supported as well. The official instance uses Serper. ```ini YDC_API_KEY=docs.you.com api key here SERPER_API_KEY=serper.dev api key here SERPAPI_KEY=serpapi key here SERPSTACK_API_KEY=serpstack api key here SEARCHAPI_KEY=searchapi api key here ``` ## Block/Allow List You may block or allow specific websites from the web search results. When using an allow list, only the links in the allowlist will be used. For supported search engines, the links will be blocked from the results directly. Any URL in the results that **partially or fully matches** the entry will be filtered out. ```ini WEBSEARCH_BLOCKLIST=`["youtube.com", "https://example.com/foo/bar"]` WEBSEARCH_ALLOWLIST=`["stackoverflow.com"]` ``` ## Disabling Javascript By default, Playwright will execute all Javascript on the page. This can be intensive, requiring up to 6 cores for full performance, on some webpages. You may block scripts from running by settings `WEBSEARCH_JAVASCRIPT=false`. However, this will not block Javascript inlined in the HTML.
chat-ui/docs/source/configuration/web-search.md/0
{ "file_path": "chat-ui/docs/source/configuration/web-search.md", "repo_id": "chat-ui", "token_count": 838 }
/// <reference types="@sveltejs/kit" /> /// <reference types="unplugin-icons/types/svelte" /> import type { User } from "$lib/types/User"; // See https://kit.svelte.dev/docs/types#app // for information about these interfaces declare global { namespace App { // interface Error {} interface Locals { sessionId: string; user?: User & { logoutDisabled?: boolean }; } interface Error { message: string; errorId?: ReturnType<typeof crypto.randomUUID>; } // interface PageData {} // interface Platform {} } } export {};
chat-ui/src/app.d.ts/0
{ "file_path": "chat-ui/src/app.d.ts", "repo_id": "chat-ui", "token_count": 190 }
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/state"; import { env as envPublic } from "$env/dynamic/public"; import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte"; import Modal from "$lib/components/Modal.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import { cookiesAreEnabled } from "$lib/utils/cookiesAreEnabled"; import Logo from "./icons/Logo.svelte"; const settings = useSettingsStore(); </script> <Modal on:close> <div class="from-primary-500/40 via-primary-500/10 to-primary-500/0 flex w-full flex-col items-center gap-6 bg-gradient-to-b px-5 pb-8 pt-9 text-center" > <h2 class="flex items-center text-2xl font-semibold text-gray-800"> <Logo classNames="mr-1" /> {envPublic.PUBLIC_APP_NAME} </h2> <p class="text-balance text-lg font-semibold leading-snug text-gray-800"> {envPublic.PUBLIC_APP_DESCRIPTION} </p> <p class="text-balance rounded-xl border bg-white/80 p-2 text-base text-gray-800"> {envPublic.PUBLIC_APP_GUEST_MESSAGE} </p> <form action="{base}/{page.data.loginRequired ? 'login' : 'settings'}" target="_parent" method="POST" class="flex w-full flex-col items-center gap-2" > {#if page.data.loginRequired} <button type="submit" class="flex w-full flex-wrap items-center justify-center whitespace-nowrap rounded-full bg-black px-5 py-2 text-center text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" > Sign in {#if envPublic.PUBLIC_APP_NAME === "HuggingChat"} <span class="flex items-center"> &nbsp;with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5" /> Hugging Face </span> {/if} </button> {:else} <button class="flex w-full items-center justify-center whitespace-nowrap rounded-full border-2 border-black bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" onclick={(e) => { if (!cookiesAreEnabled()) { e.preventDefault(); window.open(window.location.href, "_blank"); } $settings.ethicsModalAccepted = true; }} > Start chatting </button> {/if} </form> </div> </Modal>
chat-ui/src/lib/components/LoginModal.svelte/0
{ "file_path": "chat-ui/src/lib/components/LoginModal.svelte", "repo_id": "chat-ui", "token_count": 941 }
<script lang="ts"> import { fade } from "svelte/transition"; import Portal from "./Portal.svelte"; import IconDazzled from "$lib/components/icons/IconDazzled.svelte"; interface Props { message?: string; } let { message = "" }: Props = $props(); </script> <Portal> <div transition:fade|global={{ duration: 300 }} class="pointer-events-none fixed right-0 top-12 z-50 bg-gradient-to-bl from-red-500/20 via-red-500/0 to-red-500/0 pb-36 pl-36 pr-2 pt-2 md:top-0 md:pr-8 md:pt-5" > <div class="pointer-events-auto flex items-center rounded-full bg-white/90 px-3 py-1 shadow-sm dark:bg-gray-900/80" > <IconDazzled classNames="text-2xl mr-2" /> <h2 class="max-w-2xl font-semibold text-gray-800 dark:text-gray-200">{message}</h2> </div> </div> </Portal>
chat-ui/src/lib/components/Toast.svelte/0
{ "file_path": "chat-ui/src/lib/components/Toast.svelte", "repo_id": "chat-ui", "token_count": 337 }
<script lang="ts"> import { invalidateAll } from "$app/navigation"; import { page } from "$app/state"; import { base } from "$app/paths"; import type { Model } from "$lib/types/Model"; interface Props { models: Model[]; currentModel: Model; } let { models, currentModel }: Props = $props(); let selectedModelId = $state( models.map((m) => m.id).includes(currentModel.id) ? currentModel.id : models[0].id ); async function handleModelChange() { if (!page.params.id) return; try { const response = await fetch(`${base}/conversation/${page.params.id}`, { method: "PATCH", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ model: selectedModelId }), }); if (!response.ok) { throw new Error("Failed to update model"); } await invalidateAll(); } catch (error) { console.error(error); } } </script> <div class="mx-auto mt-0 flex w-fit flex-col items-center justify-center gap-2 rounded-lg border border-gray-200 bg-gray-500/20 p-4 dark:border-gray-800" > <span> This model is no longer available. Switch to a new one to continue this conversation: </span> <div class="flex items-center space-x-2"> <select bind:value={selectedModelId} class="rounded-md bg-gray-100 px-2 py-1 dark:bg-gray-900 max-sm:max-w-32" > {#each models as model} <option value={model.id}>{model.name}</option> {/each} </select> <button onclick={handleModelChange} disabled={selectedModelId === currentModel.id} class="rounded-md bg-gray-100 px-2 py-1 dark:bg-gray-900" > Accept </button> </div> </div>
chat-ui/src/lib/components/chat/ModelSwitch.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ModelSwitch.svelte", "repo_id": "chat-ui", "token_count": 640 }
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { ReviewStatus } from "$lib/types/Review"; const updateFeaturedToReview: Migration = { _id: new ObjectId("000000000000000000000008"), name: "Update featured to review", up: async () => { const { assistants, tools } = collections; // Update assistants await assistants.updateMany({ featured: true }, { $set: { review: ReviewStatus.APPROVED } }); await assistants.updateMany( { featured: { $ne: true } }, { $set: { review: ReviewStatus.PRIVATE } } ); await assistants.updateMany({}, { $unset: { featured: "" } }); // Update tools await tools.updateMany({ featured: true }, { $set: { review: ReviewStatus.APPROVED } }); await tools.updateMany({ featured: { $ne: true } }, { $set: { review: ReviewStatus.PRIVATE } }); await tools.updateMany({}, { $unset: { featured: "" } }); return true; }, runEveryTime: false, }; export default updateFeaturedToReview;
chat-ui/src/lib/migrations/routines/08-update-featured-to-review.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/08-update-featured-to-review.ts", "repo_id": "chat-ui", "token_count": 326 }
import { buildPrompt } from "$lib/buildPrompt"; import { textGenerationStream } from "@huggingface/inference"; import { z } from "zod"; import type { Endpoint } from "../endpoints"; export const endpointAwsParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("aws"), url: z.string().url(), accessKey: z .string({ description: "An AWS Access Key ID. If not provided, the default AWS identity resolution will be used", }) .min(1) .optional(), secretKey: z .string({ description: "An AWS Access Key Secret. If not provided, the default AWS identity resolution will be used", }) .min(1) .optional(), sessionToken: z.string().optional(), service: z.union([z.literal("sagemaker"), z.literal("lambda")]).default("sagemaker"), region: z.string().optional(), }); export async function endpointAws( input: z.input<typeof endpointAwsParametersSchema> ): Promise<Endpoint> { let createSignedFetcher; try { createSignedFetcher = (await import("aws-sigv4-fetch")).createSignedFetcher; } catch (e) { throw new Error("Failed to import aws-sigv4-fetch"); } const { url, accessKey, secretKey, sessionToken, model, region, service } = endpointAwsParametersSchema.parse(input); const signedFetch = createSignedFetcher({ service, region, credentials: accessKey && secretKey ? { accessKeyId: accessKey, secretAccessKey: secretKey, sessionToken } : undefined, }); return async ({ messages, preprompt, continueMessage, generateSettings }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); return textGenerationStream( { parameters: { ...model.parameters, ...generateSettings, return_full_text: false }, model: url, inputs: prompt, }, { use_cache: false, fetch: signedFetch, } ); }; } export default endpointAws;
chat-ui/src/lib/server/endpoints/aws/endpointAws.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/aws/endpointAws.ts", "repo_id": "chat-ui", "token_count": 695 }