text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel from diffusers.utils import PIL_INTERPOLATION from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch, torch_device, ) enable_full_determinism() class LDMSuperResolutionPipelineFastTests(unittest.TestCase): @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=6, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model @property def dummy_vq_model(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, ) return model def test_inference_superresolution(self): device = "cpu" unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vqvae = self.dummy_vq_model ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) ldm.to(device) ldm.set_progress_bar_config(disable=None) init_image = self.dummy_image.to(device) generator = torch.Generator(device=device).manual_seed(0) image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_inference_superresolution_fp16(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vqvae = self.dummy_vq_model # put models in fp16 unet = unet.half() vqvae = vqvae.half() ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) ldm.to(torch_device) ldm.set_progress_bar_config(disable=None) init_image = self.dummy_image.to(torch_device) image = ldm(init_image, num_inference_steps=2, output_type="numpy").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase): def test_inference_superresolution(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool.png" ) init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"]) ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution", device_map="auto") ldm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py/0
{ "file_path": "diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py", "repo_id": "diffusers", "token_count": 2046 }
126
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_inpaint_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) model_id = "xvjiarui/stable-diffusion-2-inpainting" pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] init_image = num_samples * [init_image] mask_image = num_samples * [mask_image] prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) processed_masked_images = shard(processed_masked_images) processed_masks = shard(processed_masks) output = pipeline( prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True ) images = output.images.reshape(num_samples, 512, 512, 3) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py", "repo_id": "diffusers", "token_count": 1260 }
127
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDM3DPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase): pipeline_class = StableDiffusionLDM3DPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=6, out_channels=6, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth image_slice_rgb = rgb[0, -3:, -3:, -1] image_slice_depth = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) expected_slice_rgb = np.array( [0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] ) expected_slice_depth = np.array([103.46727, 85.812004, 87.849236]) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): components = self.get_dummy_components() ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = ldm3d_pipe(**inputs) rgb_slice_1, depth_slice_1 = output.rgb, output.depth rgb_slice_1 = rgb_slice_1[0, -3:, -3:, -1] depth_slice_1 = depth_slice_1[0, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = ldm3d_pipe.tokenizer( prompt, padding="max_length", max_length=ldm3d_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = ldm3d_pipe.text_encoder(text_inputs)[0] inputs["prompt_embeds"] = prompt_embeds # forward output = ldm3d_pipe(**inputs) rgb_slice_2, depth_slice_2 = output.rgb, output.depth rgb_slice_2 = rgb_slice_2[0, -3:, -3:, -1] depth_slice_2 = depth_slice_2[0, -3:, -1] assert np.abs(rgb_slice_1.flatten() - rgb_slice_2.flatten()).max() < 1e-4 assert np.abs(depth_slice_1.flatten() - depth_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = ldm3d_pipe(**inputs, negative_prompt=negative_prompt) rgb, depth = output.rgb, output.depth rgb_slice = rgb[0, -3:, -3:, -1] depth_slice = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) expected_slice_rgb = np.array( [0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] ) expected_slice_depth = np.array([107.84738, 84.62802, 89.962135]) assert np.abs(rgb_slice.flatten() - expected_slice_rgb).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1e-2 @nightly @require_torch_gpu class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_ldm3d_stable_diffusion(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d") ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth rgb_slice = rgb[0, -3:, -3:, -1].flatten() depth_slice = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512) expected_slice_rgb = np.array( [0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] ) expected_slice_depth = np.array( [0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] ) assert np.abs(rgb_slice - expected_slice_rgb).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth).max() < 3e-3 @nightly @require_torch_gpu class StableDiffusionPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_ldm3d(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d").to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth expected_rgb_mean = 0.495586 expected_rgb_std = 0.33795515 expected_depth_mean = 112.48518 expected_depth_std = 98.489746 assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3 assert np.abs(expected_rgb_std - rgb.std()) < 1e-3 assert np.abs(expected_depth_mean - depth.mean()) < 1e-3 assert np.abs(expected_depth_std - depth.std()) < 1e-3 def test_ldm3d_v2(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d-4c").to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth expected_rgb_mean = 0.4194127 expected_rgb_std = 0.35375586 expected_depth_mean = 0.5638502 expected_depth_std = 0.34686103 assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512, 1) assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3 assert np.abs(expected_rgb_std - rgb.std()) < 1e-3 assert np.abs(expected_depth_mean - depth.mean()) < 1e-3 assert np.abs(expected_depth_std - depth.std()) < 1e-3
diffusers/tests/pipelines/stable_diffusion_ldm3d/test_stable_diffusion_ldm3d.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_ldm3d/test_stable_diffusion_ldm3d.py", "repo_id": "diffusers", "token_count": 5573 }
128
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImg2ImgPipeline, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableUnCLIPImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableUnCLIPImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): embedder_hidden_size = 32 embedder_projection_dim = embedder_hidden_size # image encoding components feature_extractor = CLIPImageProcessor(crop_size=32, size=32) torch.manual_seed(0) image_encoder = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=embedder_hidden_size, projection_dim=embedder_projection_dim, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0) image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) text_encoder = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", # The class embeddings are the noise augmented image embeddings. # I.e. the image embeddings concated with the noised embeddings of the same dimension projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=embedder_hidden_size, layers_per_block=1, upcast_attention=True, use_linear_projection=True, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL() components = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def get_dummy_inputs(self, device, seed=0, pil_image=True): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if pil_image: input_image = input_image * 0.5 + 0.5 input_image = input_image.clamp(0, 1) input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy() input_image = DiffusionPipeline.numpy_to_pil(input_image)[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def test_image_embeds_none(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableUnCLIPImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs.update({"image_embeds": None}) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because GPU undeterminism requires a looser check. def test_attention_slicing_forward_pass(self): test_max_difference = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because undeterminism requires a looser check. def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False) @nightly @require_torch_gpu class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_unclip_l_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe(input_image, "anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_h_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe(input_image, "anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_img2img_pipeline_with_sequential_cpu_offloading(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( input_image, "anime turtle", num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
diffusers/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py", "repo_id": "diffusers", "token_count": 5046 }
129
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import PriorTransformer, UnCLIPPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = UnCLIPPipeline params = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", "guidance_scale", "prompt_embeds", "cross_attention_kwargs", } batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = [ "generator", "return_dict", "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) return model @property def dummy_text_proj(self): torch.manual_seed(0) model_kwargs = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } model = UnCLIPTextProjModel(**model_kwargs) return model @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_super_res_kwargs(self): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "in_channels": 6, "out_channels": 3, } @property def dummy_super_res_first(self): torch.manual_seed(0) model = UNet2DModel(**self.dummy_super_res_kwargs) return model @property def dummy_super_res_last(self): # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1) model = UNet2DModel(**self.dummy_super_res_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior decoder = self.dummy_decoder text_proj = self.dummy_text_proj text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer super_res_first = self.dummy_super_res_first super_res_last = self.dummy_super_res_last prior_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample_range=5.0, ) decoder_scheduler = UnCLIPScheduler( variance_type="learned_range", prediction_type="epsilon", num_train_timesteps=1000, ) super_res_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="epsilon", num_train_timesteps=1000, ) components = { "prior": prior, "decoder": decoder, "text_proj": text_proj, "text_encoder": text_encoder, "tokenizer": tokenizer, "super_res_first": super_res_first, "super_res_last": super_res_last, "prior_scheduler": prior_scheduler, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_num_inference_steps": 2, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "numpy", } return inputs def test_unclip(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [ 0.9997, 0.9988, 0.0028, 0.9997, 0.9984, 0.9965, 0.0029, 0.9986, 0.0025, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_passed_text_embed(self): device = torch.device("cpu") class DummyScheduler: init_noise_sigma = 1 components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) prior = components["prior"] decoder = components["decoder"] super_res_first = components["super_res_first"] tokenizer = components["tokenizer"] text_encoder = components["text_encoder"] generator = torch.Generator(device=device).manual_seed(0) dtype = prior.dtype batch_size = 1 shape = (batch_size, prior.config.embedding_dim) prior_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) shape = (batch_size, decoder.config.in_channels, decoder.config.sample_size, decoder.config.sample_size) decoder_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) shape = ( batch_size, super_res_first.config.in_channels // 2, super_res_first.config.sample_size, super_res_first.config.sample_size, ) super_res_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) pipe.set_progress_bar_config(disable=None) prompt = "this is a prompt example" generator = torch.Generator(device=device).manual_seed(0) output = pipe( [prompt], generator=generator, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, prior_latents=prior_latents, decoder_latents=decoder_latents, super_res_latents=super_res_latents, output_type="np", ) image = output.images text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ) text_model_output = text_encoder(text_inputs.input_ids) text_attention_mask = text_inputs.attention_mask generator = torch.Generator(device=device).manual_seed(0) image_from_text = pipe( generator=generator, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, prior_latents=prior_latents, decoder_latents=decoder_latents, super_res_latents=super_res_latents, text_model_output=text_model_output, text_attention_mask=text_attention_mask, output_type="np", )[0] # make sure passing text embeddings manually is identical assert np.abs(image - image_from_text).max() < 1e-4 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference, expected_max_diff=0.01) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. @skip_mps def test_inference_batch_single_identical(self): additional_params_copy_to_batched_inputs = [ "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, expected_max_diff=5e-3 ) def test_inference_batch_consistent(self): additional_params_copy_to_batched_inputs = [ "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes batch_sizes = [2, 3] self._test_inference_batch_consistent( batch_sizes=batch_sizes, additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs ) @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_local(self): return super().test_save_load_local(expected_max_difference=5e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @unittest.skip("UnCLIP produces very large differences in fp16 vs fp32. Test is not useful.") def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1.0) @nightly class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_unclip_karlo_cpu_fp32(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_horse_cpu.npy" ) pipeline = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha") pipeline.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipeline( "horse", num_images_per_prompt=1, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 1e-1 @nightly @require_torch_gpu class UnCLIPPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_unclip_karlo(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_horse_fp16.npy" ) pipeline = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( "horse", generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(image, expected_image) def test_unclip_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( "horse", num_images_per_prompt=1, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
diffusers/tests/pipelines/unclip/test_unclip.py/0
{ "file_path": "diffusers/tests/pipelines/unclip/test_unclip.py", "repo_id": "diffusers", "token_count": 7864 }
130
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class DPMSolverMultistepSchedulerTest(SchedulerCommonTest): scheduler_classes = (DPMSolverMultistepScheduler,) forward_default_kwargs = (("num_inference_steps", 25),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lower_order_final": False, "euler_at_final": False, "lambda_min_clipped": -float("inf"), "variance_type": None, "final_sigmas_type": "sigma_min", } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output, new_output = sample, sample for t in range(time_step, time_step + scheduler.config.solver_order + 1): t = new_scheduler.timesteps[t] output = scheduler.step(residual, t, output, **kwargs).prev_sample new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] time_step = new_scheduler.timesteps[time_step] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, scheduler=None, **config): if scheduler is None: scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_thresholding(self): self.check_over_configs(thresholding=False) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, algorithm_type="dpmsolver++", solver_order=order, solver_type=solver_type, ) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_solver_order_and_type(self): for algorithm_type in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: if algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: if order == 3: continue else: self.check_over_configs( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) sample = self.full_loop( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) assert not torch.isnan(sample).any(), "Samples have nan numbers" def test_lower_order_final(self): self.check_over_configs(lower_order_final=True) self.check_over_configs(lower_order_final=False) def test_euler_at_final(self): self.check_over_configs(euler_at_final=True) self.check_over_configs(euler_at_final=False) def test_lambda_min_clipped(self): self.check_over_configs(lambda_min_clipped=-float("inf")) self.check_over_configs(lambda_min_clipped=-5.1) def test_variance_type(self): self.check_over_configs(variance_type=None) self.check_over_configs(variance_type="learned_range") def test_inference_steps(self): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.3301) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 t_start = 5 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 318.4111) < 1e-2, f" expected result sum 318.4111, but get {result_sum}" assert abs(result_mean.item() - 0.4146) < 1e-3, f" expected result mean 0.4146, but get {result_mean}" def test_full_loop_no_noise_thres(self): sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 1.1364) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2251) < 1e-3 def test_full_loop_with_karras_and_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2096) < 1e-3 def test_full_loop_with_lu_and_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction", use_lu_lambdas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.1554) < 1e-3 def test_switch(self): # make sure that iterating over schedulers with same config names gives same results # for defaults scheduler = DPMSolverMultistepScheduler(**self.get_scheduler_config()) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.3301) < 1e-3 scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) scheduler = UniPCMultistepScheduler.from_config(scheduler.config) scheduler = DEISMultistepScheduler.from_config(scheduler.config) scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.3301) < 1e-3 def test_fp16_support(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter.half() scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample assert sample.dtype == torch.float16 def test_duplicated_timesteps(self, **config): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(scheduler.config.num_train_timesteps) assert len(scheduler.timesteps) == scheduler.num_inference_steps
diffusers/tests/schedulers/test_scheduler_dpm_multi.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_dpm_multi.py", "repo_id": "diffusers", "token_count": 6367 }
131
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest # UnCLIPScheduler is a modified DDPMScheduler with a subset of the configuration. class UnCLIPSchedulerTest(SchedulerCommonTest): scheduler_classes = (UnCLIPScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_variance_type(self): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_clip_sample_range(self): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=clip_sample_range) def test_prediction_type(self): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=time_step, prev_timestep=prev_timestep) def test_variance_fixed_small_log(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(variance_type="fixed_small_log") scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0000e-10)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0549625)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.9994987)) < 1e-5 def test_variance_learned_range(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(variance_type="learned_range") scheduler = scheduler_class(**scheduler_config) predicted_variance = 0.5 assert scheduler._get_variance(1, predicted_variance=predicted_variance) - -10.1712790 < 1e-5 assert scheduler._get_variance(487, predicted_variance=predicted_variance) - -5.7998052 < 1e-5 assert scheduler._get_variance(999, predicted_variance=predicted_variance) - -0.0010011 < 1e-5 def test_full_loop(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = scheduler.timesteps model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for i, t in enumerate(timesteps): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 252.2682495) < 1e-2 assert abs(result_mean.item() - 0.3284743) < 1e-3 def test_full_loop_skip_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(25) timesteps = scheduler.timesteps model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for i, t in enumerate(timesteps): # 1. predict noise residual residual = model(sample, t) if i + 1 == timesteps.shape[0]: prev_timestep = None else: prev_timestep = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step( residual, t, sample, prev_timestep=prev_timestep, generator=generator ).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 258.2044983) < 1e-2 assert abs(result_mean.item() - 0.3362038) < 1e-3 def test_trained_betas(self): pass def test_add_noise_device(self): pass
diffusers/tests/schedulers/test_scheduler_unclip.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_unclip.py", "repo_id": "diffusers", "token_count": 2227 }
132
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import packaging.version PATH_TO_EXAMPLES = "examples/" REPLACE_PATTERNS = { "examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), "init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), "setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), "doc": (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } REPLACE_FILES = { "init": "src/diffusers/__init__.py", "setup": "setup.py", } README_FILE = "README.md" def update_version_in_file(fname, version, pattern): """Update the version in one file using a specific pattern.""" with open(fname, "r", encoding="utf-8", newline="\n") as f: code = f.read() re_pattern, replace = REPLACE_PATTERNS[pattern] replace = replace.replace("VERSION", version) code = re_pattern.sub(replace, code) with open(fname, "w", encoding="utf-8", newline="\n") as f: f.write(code) def update_version_in_examples(version): """Update the version in all examples files.""" for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects") if "legacy" in directories: directories.remove("legacy") for fname in fnames: if fname.endswith(".py"): update_version_in_file(os.path.join(folder, fname), version, pattern="examples") def global_version_update(version, patch=False): """Update the version in all needed files.""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: update_version_in_examples(version) def clean_main_ref_in_model_list(): """Replace the links from main doc tp stable doc in the model list of the README.""" # If the introduction or the conclusion of the list change, the prompts may need to be updated. _start_prompt = "🤗 Transformers currently provides the following architectures" _end_prompt = "1. Want to contribute a new model?" with open(README_FILE, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start of the list. start_index = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 index = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("1."): lines[index] = lines[index].replace( "https://huggingface.co/docs/diffusers/main/model_doc", "https://huggingface.co/docs/diffusers/model_doc", ) index += 1 with open(README_FILE, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) def get_version(): """Reads the current version in the __init__.""" with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version) def pre_release_work(patch=False): """Do all the necessary pre-release steps.""" # First let's get the default version: base version if we are in dev, bump minor otherwise. default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: default_version = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. version = input(f"Which version are you releasing? [{default_version}]") if len(version) == 0: version = default_version print(f"Updating version to {version}.") global_version_update(version, patch=patch) # if not patch: # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() def post_release_work(): """Do all the necessary post-release steps.""" # First let's get the current version current_version = get_version() dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" current_version = current_version.base_version # Check with the user we got that right. version = input(f"Which version are we developing now? [{dev_version}]") if len(version) == 0: version = dev_version print(f"Updating version to {version}.") global_version_update(version) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") args = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
diffusers/utils/release.py/0
{ "file_path": "diffusers/utils/release.py", "repo_id": "diffusers", "token_count": 2306 }
133
<jupyter_start><jupyter_text>Derrière le pipeline (PyTorch) Installez la bibliothèque 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece] from transformers import pipeline classifier = pipeline("sentiment-analysis", model="tblard/tf-allocine") classifier( ["J'ai attendu un cours d'HuggingFace toute ma vie.", "Je déteste tellement ça !"] ) from transformers import AutoTokenizer checkpoint = "tblard/tf-allocine" tokenizer = AutoTokenizer.from_pretrained(checkpoint) raw_inputs = [ "J'ai attendu un cours d'HuggingFace toute ma vie.", "Je déteste tellement ça !", ] inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors="pt") print(inputs) from transformers import AutoModel checkpoint = "tblard/tf-allocine" model = AutoModel.from_pretrained(checkpoint, from_tf=True) outputs = model(**inputs) print(outputs.last_hidden_state.shape) from transformers import AutoModelForSequenceClassification checkpoint = "tblard/tf-allocine" model = AutoModelForSequenceClassification.from_pretrained(checkpoint, from_tf=True) outputs = model(**inputs) print(outputs.logits.shape) print(outputs.logits) import torch predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) print(predictions) model.config.id2label<jupyter_output><empty_output>
notebooks/course/fr/chapter2/section2_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter2/section2_pt.ipynb", "repo_id": "notebooks", "token_count": 471 }
134
<jupyter_start><jupyter_text>Un entraînement complet Installez les bibliothèques 🤗 Transformers et 🤗 Datasets pour exécuter ce notebook.<jupyter_code>!pip install datasets transformers[sentencepiece] !pip install accelerate # Pour exécuter l'entraînement sur TPU, vous devez décommenter la ligne suivante : # !pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl from datasets import load_dataset from transformers import AutoTokenizer, DataCollatorWithPadding raw_datasets = load_dataset("paws-x", "fr") checkpoint = "camembert-base" tokenizer = AutoTokenizer.from_pretrained(checkpoint) def tokenize_function(example): return tokenizer(example["sentence1"], example["sentence2"], truncation=True) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) data_collator = DataCollatorWithPadding(tokenizer=tokenizer) tokenized_datasets = tokenized_datasets.remove_columns(["sentence1", "sentence2", "idx"]) tokenized_datasets = tokenized_datasets.rename_column("label", "labels") tokenized_datasets.set_format("torch") tokenized_datasets["train"].column_names ["attention_mask", "input_ids", "labels", "token_type_ids"] from torch.utils.data import DataLoader train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, batch_size=8, collate_fn=data_collator ) eval_dataloader = DataLoader( tokenized_datasets["validation"], batch_size=8, collate_fn=data_collator ) for batch in train_dataloader: break {k: v.shape for k, v in batch.items()} from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) outputs = model(**batch) print(outputs.loss, outputs.logits.shape) from transformers import AdamW optimizer = AdamW(model.parameters(), lr=5e-5) from transformers import get_scheduler num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps, ) print(num_training_steps) import torch device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model.to(device) device from tqdm.auto import tqdm progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) from datasets import load_metric metric = load_metric("glue", "mrpc") model.eval() for batch in eval_dataloader: batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) metric.add_batch(predictions=predictions, references=batch["labels"]) metric.compute() from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model.to(device) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps, ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) train_dl, eval_dl, model, optimizer = accelerator.prepare( train_dataloader, eval_dataloader, model, optimizer ) num_epochs = 3 num_training_steps = num_epochs * len(train_dl) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps, ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dl: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) from accelerate import notebook_launcher notebook_launcher(training_function)<jupyter_output><empty_output>
notebooks/course/fr/chapter3/section4.ipynb/0
{ "file_path": "notebooks/course/fr/chapter3/section4.ipynb", "repo_id": "notebooks", "token_count": 1937 }
135
<jupyter_start><jupyter_text>Réponses aux questions (PyTorch) Installez les bibliothèques 🤗 *Datasets* et 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !pip install accelerate # Pour exécuter l'entraînement sur TPU, vous devez décommenter la ligne suivante : # !pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() from datasets import load_dataset raw_datasets = load_dataset("piaf") # Piaf n'ayant pas de jeu de données de test, nous en créons un raw_datasets = raw_datasets['train'] raw_datasets = raw_datasets.train_test_split(test_size=0.2, shuffle=True) raw_datasets print("Context: ", raw_datasets["train"][0]["context"]) print("Question: ", raw_datasets["train"][0]["question"]) print("Answer: ", raw_datasets["train"][0]["answers"]) raw_datasets["train"].filter(lambda x: len(x["answers"]["text"]) != 1) print(raw_datasets["test"][0]["answers"]) print(raw_datasets["test"][2]["answers"]) print(raw_datasets["test"][2]["context"]) print(raw_datasets["test"][2]["question"]) from transformers import AutoTokenizer model_checkpoint = "camembert-base" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) tokenizer.is_fast context = raw_datasets["train"][0]["context"] question = raw_datasets["train"][0]["question"] inputs = tokenizer(question, context) tokenizer.decode(inputs["input_ids"]) inputs = tokenizer( question, context, max_length=100, truncation="only_second", stride=50, return_overflowing_tokens=True, ) for ids in inputs["input_ids"]: print(tokenizer.decode(ids)) inputs = tokenizer( question, context, max_length=100, truncation="only_second", stride=50, return_overflowing_tokens=True, return_offsets_mapping=True, ) inputs.keys() inputs["overflow_to_sample_mapping"] inputs = tokenizer( raw_datasets["train"][2:6]["question"], raw_datasets["train"][2:6]["context"], max_length=100, truncation="only_second", stride=50, return_overflowing_tokens=True, return_offsets_mapping=True, ) print(f"The 4 examples gave {len(inputs['input_ids'])} features.") print(f"Here is where each comes from: {inputs['overflow_to_sample_mapping']}.") answers = raw_datasets["train"][2:6]["answers"] start_positions = [] end_positions = [] for i, offset in enumerate(inputs["offset_mapping"]): sample_idx = inputs["overflow_to_sample_mapping"][i] answer = answers[sample_idx] start_char = answer["answer_start"][0] end_char = answer["answer_start"][0] + len(answer["text"][0]) sequence_ids = inputs.sequence_ids(i) # Trouver le début et la fin du contexte idx = 0 while sequence_ids[idx] != 1: idx += 1 context_start = idx while sequence_ids[idx] == 1: idx += 1 context_end = idx - 1 # Si la réponse n'est pas entièrement dans le contexte, l'étiquette est (0, 0) if offset[context_start][0] > start_char or offset[context_end][1] < end_char: start_positions.append(0) end_positions.append(0) else: # Sinon, ce sont les positions de début et de fin du token idx = context_start while idx <= context_end and offset[idx][0] <= start_char: idx += 1 start_positions.append(idx - 1) idx = context_end while idx >= context_start and offset[idx][1] >= end_char: idx -= 1 end_positions.append(idx + 1) start_positions, end_positions idx = 0 sample_idx = inputs["overflow_to_sample_mapping"][idx] answer = answers[sample_idx]["text"][0] start = start_positions[idx] end = end_positions[idx] labeled_answer = tokenizer.decode(inputs["input_ids"][idx][start : end + 1]) print(f"Theoretical answer: {answer}, labels give: {labeled_answer}") idx = 4 sample_idx = inputs["overflow_to_sample_mapping"][idx] answer = answers[sample_idx]["text"][0] decoded_example = tokenizer.decode(inputs["input_ids"][idx]) print(f"Theoretical answer: {answer}, decoded example: {decoded_example}") max_length = 384 stride = 128 def preprocess_training_examples(examples): questions = [q.strip() for q in examples["question"]] inputs = tokenizer( questions, examples["context"], max_length=max_length, truncation="only_second", stride=stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) offset_mapping = inputs.pop("offset_mapping") sample_map = inputs.pop("overflow_to_sample_mapping") answers = examples["answers"] start_positions = [] end_positions = [] for i, offset in enumerate(offset_mapping): sample_idx = sample_map[i] answer = answers[sample_idx] start_char = answer["answer_start"][0] end_char = answer["answer_start"][0] + len(answer["text"][0]) sequence_ids = inputs.sequence_ids(i) # Trouver le début et la fin du contexte idx = 0 while sequence_ids[idx] != 1: idx += 1 context_start = idx while sequence_ids[idx] == 1: idx += 1 context_end = idx - 1 # Si la réponse n'est pas entièrement dans le contexte, l'étiquette est (0, 0) if offset[context_start][0] > start_char or offset[context_end][1] < end_char: start_positions.append(0) end_positions.append(0) else: # Sinon, ce sont les positions de début et de fin du token idx = context_start while idx <= context_end and offset[idx][0] <= start_char: idx += 1 start_positions.append(idx - 1) idx = context_end while idx >= context_start and offset[idx][1] >= end_char: idx -= 1 end_positions.append(idx + 1) inputs["start_positions"] = start_positions inputs["end_positions"] = end_positions return inputs train_dataset = raw_datasets["train"].map( preprocess_training_examples, batched=True, remove_columns=raw_datasets["train"].column_names, ) len(raw_datasets["train"]), len(train_dataset) def preprocess_validation_examples(examples): questions = [q.strip() for q in examples["question"]] inputs = tokenizer( questions, examples["context"], max_length=max_length, truncation="only_second", stride=stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) sample_map = inputs.pop("overflow_to_sample_mapping") example_ids = [] for i in range(len(inputs["input_ids"])): sample_idx = sample_map[i] example_ids.append(examples["id"][sample_idx]) sequence_ids = inputs.sequence_ids(i) offset = inputs["offset_mapping"][i] inputs["offset_mapping"][i] = [ o if sequence_ids[k] == 1 else None for k, o in enumerate(offset) ] inputs["example_id"] = example_ids return inputs validation_dataset = raw_datasets["test"].map( preprocess_validation_examples, batched=True, remove_columns=raw_datasets["test"].column_names, ) len(raw_datasets["test"]), len(validation_dataset) small_eval_set = raw_datasets["test"].select(range(100)) trained_checkpoint = "distilbert-base-cased-distilled-squad" tokenizer = AutoTokenizer.from_pretrained(trained_checkpoint) eval_set = small_eval_set.map( preprocess_validation_examples, batched=True, remove_columns=raw_datasets["test"].column_names, ) tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) import torch from transformers import AutoModelForQuestionAnswering eval_set_for_model = eval_set.remove_columns(["example_id", "offset_mapping"]) eval_set_for_model.set_format("torch") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") batch = {k: eval_set_for_model[k].to(device) for k in eval_set_for_model.column_names} trained_model = AutoModelForQuestionAnswering.from_pretrained(trained_checkpoint).to( device ) with torch.no_grad(): outputs = trained_model(**batch) start_logits = outputs.start_logits.cpu().numpy() end_logits = outputs.end_logits.cpu().numpy() import collections example_to_features = collections.defaultdict(list) for idx, feature in enumerate(eval_set): example_to_features[feature["example_id"]].append(idx) import numpy as np n_best = 20 max_answer_length = 30 predicted_answers = [] for example in small_eval_set: example_id = example["id"] context = example["context"] answers = [] for feature_index in example_to_features[example_id]: start_logit = start_logits[feature_index] end_logit = end_logits[feature_index] offsets = eval_set["offset_mapping"][feature_index] start_indexes = np.argsort(start_logit)[-1 : -n_best - 1 : -1].tolist() end_indexes = np.argsort(end_logit)[-1 : -n_best - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Ignorer les réponses qui ne sont pas entièrement dans le contexte if offsets[start_index] is None or offsets[end_index] is None: continue # Ignorer les réponses dont la longueur est soit < 0 soit > max_answer_length if ( end_index < start_index or end_index - start_index + 1 > max_answer_length ): continue answers.append( { "text": context[offsets[start_index][0] : offsets[end_index][1]], "logit_score": start_logit[start_index] + end_logit[end_index], } ) best_answer = max(answers, key=lambda x: x["logit_score"]) predicted_answers.append({"id": example_id, "prediction_text": best_answer["text"]}) from datasets import load_metric metric = load_metric("squad") theoretical_answers = [ {"id": ex["id"], "answers": ex["answers"]} for ex in small_eval_set ] print(predicted_answers[0]) print(theoretical_answers[0]) metric.compute(predictions=predicted_answers, references=theoretical_answers) from tqdm.auto import tqdm def compute_metrics(start_logits, end_logits, features, examples): example_to_features = collections.defaultdict(list) for idx, feature in enumerate(features): example_to_features[feature["example_id"]].append(idx) predicted_answers = [] for example in tqdm(examples): example_id = example["id"] context = example["context"] answers = [] # Parcourir en boucle toutes les fonctionnalités associées à cet exemple for feature_index in example_to_features[example_id]: start_logit = start_logits[feature_index] end_logit = end_logits[feature_index] offsets = features[feature_index]["offset_mapping"] start_indexes = np.argsort(start_logit)[-1 : -n_best - 1 : -1].tolist() end_indexes = np.argsort(end_logit)[-1 : -n_best - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Ignorez les réponses qui ne sont pas entièrement dans le contexte if offsets[start_index] is None or offsets[end_index] is None: continue # Sauter les réponses dont la longueur est soit < 0, soit > max_answer_length if ( end_index < start_index or end_index - start_index + 1 > max_answer_length ): continue answer = { "text": context[offsets[start_index][0] : offsets[end_index][1]], "logit_score": start_logit[start_index] + end_logit[end_index], } answers.append(answer) # Sélectionnez la réponse avec le meilleur score if len(answers) > 0: best_answer = max(answers, key=lambda x: x["logit_score"]) predicted_answers.append( {"id": example_id, "prediction_text": best_answer["text"]} ) else: predicted_answers.append({"id": example_id, "prediction_text": ""}) theoretical_answers = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples] return metric.compute(predictions=predicted_answers, references=theoretical_answers) compute_metrics(start_logits, end_logits, eval_set, small_eval_set) model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint) from transformers import TrainingArguments args = TrainingArguments( "camembert-base-finetuned-piaf", evaluation_strategy="no", save_strategy="epoch", learning_rate=2e-5, num_train_epochs=3, weight_decay=0.01, fp16=True, push_to_hub=True, ) from transformers import Trainer trainer = Trainer( model=model, args=args, train_dataset=train_dataset, eval_dataset=validation_dataset, tokenizer=tokenizer, ) trainer.train() predictions, _, _ = trainer.predict(validation_dataset) start_logits, end_logits = predictions compute_metrics(start_logits, end_logits, validation_dataset, raw_datasets["test"]) trainer.push_to_hub(commit_message="Training complete") from torch.utils.data import DataLoader from transformers import default_data_collator train_dataset.set_format("torch") validation_set = validation_dataset.remove_columns(["example_id", "offset_mapping"]) validation_set.set_format("torch") train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=8, ) eval_dataloader = DataLoader( validation_set, collate_fn=default_data_collator, batch_size=8 ) model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint) from torch.optim import AdamW optimizer = AdamW(model.parameters(), lr=2e-5) from accelerate import Accelerator accelerator = Accelerator(fp16=True) model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) from transformers import get_scheduler num_train_epochs = 3 num_update_steps_per_epoch = len(train_dataloader) num_training_steps = num_train_epochs * num_update_steps_per_epoch lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps, ) from huggingface_hub import Repository, get_full_repo_name model_name = "camembert-base-finetuned-piaf-accelerate" repo_name = get_full_repo_name(model_name) repo_name output_dir = "camembert-base-finetuned-piaf-accelerate" repo = Repository(output_dir, clone_from=repo_name) from tqdm.auto import tqdm import torch progress_bar = tqdm(range(num_training_steps)) for epoch in range(num_train_epochs): # Entraînement model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) # Evaluation model.eval() start_logits = [] end_logits = [] accelerator.print("Evaluation!") for batch in tqdm(eval_dataloader): with torch.no_grad(): outputs = model(**batch) start_logits.append(accelerator.gather(outputs.start_logits).cpu().numpy()) end_logits.append(accelerator.gather(outputs.end_logits).cpu().numpy()) start_logits = np.concatenate(start_logits) end_logits = np.concatenate(end_logits) start_logits = start_logits[: len(validation_dataset)] end_logits = end_logits[: len(validation_dataset)] metrics = compute_metrics( start_logits, end_logits, validation_dataset, raw_datasets["test"] ) print(f"epoch {epoch}:", metrics) # Sauvegarder et télécharger accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False ) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save) from transformers import pipeline # Remplacez par votre propre checkpoint model_checkpoint = "huggingface-course/camembert-finetuned-piaf" question_answerer = pipeline("question-answering", model=model_checkpoint) context = """ 🤗 Transformers est soutenu par les trois bibliothèques d'apprentissage profond les plus populaires - Jax, PyTorch et TensorFlow - avec une intégration transparente entre elles. Il est simple d'entraîner vos modèles avec l'une avant de les charger pour l'inférence avec l'autre. """ question = "Quelles sont les bibliothèques d'apprentissage profond derrière 🤗 Transformers ?" question_answerer(question=question, context=context)<jupyter_output><empty_output>
notebooks/course/fr/chapter7/section7_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter7/section7_pt.ipynb", "repo_id": "notebooks", "token_count": 7562 }
136
<jupyter_start><jupyter_text>LoRAs of the World Unite - Training SOTA DreamBooth LoRA with Pivotal Tuning 🧨In this notebook, we show how to fine-tune [Stable Diffusion XL (SDXL)](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl) with [DreamBooth](https://huggingface.co/docs/diffusers/main/en/training/dreambooth) and [LoRA](https://huggingface.co/docs/diffusers/main/en/training/lora) using some of the most popular SOTA methods.Learn more about the techniques used in this exmaple [here](linke to blogpost)Let's get started 🧪 Setup 🪓<jupyter_code># Install dependencies. !pip install xformers bitsandbytes transformers accelerate wandb dadaptation prodigyopt -q !pip install peft -q<jupyter_output>WARNING: Ignoring invalid distribution -etworkx (/usr/lib/python3/dist-packages) WARNING: Ignoring invalid distribution -etworkx (/usr/lib/python3/dist-packages) DEPRECATION: distro-info 0.23ubuntu1 has a non-standard version number. pip 23.3 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of distro-info or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 DEPRECATION: python-debian 0.1.36ubuntu1 has a non-standard version number. pip 23.3 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of python-debian or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063  [notice] A new [...]<jupyter_text>Make sure to install `diffusers` from `main`.<jupyter_code>!pip install git+https://github.com/huggingface/diffusers.git -q<jupyter_output>WARNING: Ignoring invalid distribution -etworkx (/usr/lib/python3/dist-packages) WARNING: Ignoring invalid distribution -etworkx (/usr/lib/python3/dist-packages) DEPRECATION: distro-info 0.23ubuntu1 has a non-standard version number. pip 23.3 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of distro-info or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 DEPRECATION: python-debian 0.1.36ubuntu1 has a non-standard version number. pip 23.3 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of python-debian or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063  [notice] A new [...]<jupyter_text>Download diffusers SDXL DreamBooth training script.<jupyter_code>!wget https://raw.githubusercontent.com/huggingface/diffusers/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py<jupyter_output>--2023-12-04 08:52:41-- https://raw.githubusercontent.com/huggingface/diffusers/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.111.133, 185.199.109.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 86471 (84K) [text/plain] Saving to: ‘train_dreambooth_lora_sdxl_advanced.py.1’ train_dreambooth_lo 100%[===================>] 84.44K --.-KB/s in 0.001s 2023-12-04 08:52:41 (111 MB/s) - ‘train_dreambooth_lora_sdxl_advanced.py.1’ saved [86471/86471]<jupyter_text>Dataset 🐶 **Let's get our training data!**For this example, we'll download some images from the hub.If you already have a dataset on the hub you wish to use, you can skip this part and go straight to: "Prep fortraining 💻" section, where you'll simply specify the dataset name.If your images are saved locally, and/or you want to add BLIP generated captions,pick option 1 or 2 below. **Option 1:** upload example images from your local files:<jupyter_code>import os from google.colab import files # pick a name for the image folder local_dir = "./my_folder" #@param os.makedirs(local_dir) os.chdir(local_dir) # choose and upload local images into the newly created directory uploaded_images = files.upload() os.chdir("/content") # back to parent directory<jupyter_output><empty_output><jupyter_text>**Option 2:** download example images from the hub -<jupyter_code>from huggingface_hub import snapshot_download local_dir = "./3d_icon" #@param dataset_to_download = "LinoyTsaban/3d_icon" #@param snapshot_download( dataset_to_download, local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", )<jupyter_output><empty_output><jupyter_text>Preview the images:<jupyter_code>from PIL import Image def image_grid(imgs, rows, cols, resize=256): assert len(imgs) == rows * cols if resize is not None: imgs = [img.resize((resize, resize)) for img in imgs] w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid import glob local_dir = "./3d_icon" img_paths = f"{local_dir}/*.jpg" imgs = [Image.open(path) for path in glob.glob(img_paths)] num_imgs_to_preview = 5 image_grid(imgs[:num_imgs_to_preview], 1, num_imgs_to_preview)<jupyter_output>/home/ubuntu/.local/lib/python3.10/site-packages/PIL/Image.py:3182: DecompressionBombWarning: Image size (122880000 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack. warnings.warn( /home/ubuntu/.local/lib/python3.10/site-packages/PIL/Image.py:3182: DecompressionBombWarning: Image size (132710400 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack. warnings.warn(<jupyter_text>Generate custom captions with BLIP Load BLIP2 to auto caption your images: **Note:** if you downloaded the `LinoyTsaban/3d_icon dataset` from the hub, you would find it already contains captions (generated with BLIP and prefixed with a token identifier) in the `metadata.jsonl` fileYou can skip this part if you wish to train on that dataset using the existing captions.<jupyter_code>import requests from transformers import Blip2Processor, Blip2ForConditionalGeneration import torch device = "cuda" if torch.cuda.is_available() else "cpu" # load pipelines blip_processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") blip_model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b",torch_dtype=torch.float16).to(device) ## IMAGE CPATIONING ## def caption_images(input_image): inputs = blip_processor(images=input_image, return_tensors="pt").to(device, torch.float16) pixel_values = inputs.pixel_values generated_ids = blip_model.generate(pixel_values=pixel_values, max_length=50) generated_caption = blip_processor.batch_decode(generated_ids, skip_special_tokens=True)[0] return generated_caption import glob from PIL import Image # create a list of (Pil.Image, path) pairs local_dir = "./3d_icon/" imgs_and_paths = [(path,Image.open(path)) for path in glob.glob(f"{local_dir}*.jpg")]<jupyter_output>/home/ubuntu/.local/lib/python3.10/site-packages/PIL/Image.py:3182: DecompressionBombWarning: Image size (122880000 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack. warnings.warn( /home/ubuntu/.local/lib/python3.10/site-packages/PIL/Image.py:3182: DecompressionBombWarning: Image size (132710400 pixels) exceeds limit of 89478485 pixels, could be decompression bomb DOS attack. warnings.warn(<jupyter_text>Now let's add the concept token identifier (e.g. TOK) to each caption using a caption prefix.*Note:* When training with **pivotal tuning**, this token identifier (e.g. TOK) is only a **place holder**, and will be mapped to new tokens we insert to the tokenizers - so no need to spend too much time choosing the token!Change the prefix according to the concept you're training on:- for this example we can use "In the style of TOK," other options include: - For objects - "photoof a TOK/ a TOK" - For faces - "photo of a TOK person"- You can add additional identifiers to the prefix that can help steer the model in the right direction.-- e.g. for this example, instead of "In the style of TOK" we can use "3d icon in the style of TOK"/"a TOK 3d style icon" saves image paths and corresponding prompts to metadata file for training<jupyter_code>import json from IPython.display import display, Markdown caption_prefix = "3d icon in the style of TOK, " #@param # saves each caption and corresponding image to a metadata.jsonl file with open(f'{local_dir}metadata.jsonl', 'w') as outfile: for img in imgs_and_paths: caption = caption_prefix + caption_images(img[1]).split("\n")[0] entry = {"file_name":img[0].split("/")[-1], "prompt": caption} json.dump(entry, outfile) outfile.write('\n') display(Markdown(f"Your image captions are ready here: {local_dir}metadata.jsonl"))<jupyter_output><empty_output><jupyter_text>Free some memory:<jupyter_code>import gc # delete the BLIP2 pipelines and clear up some memory del blip_processor, blip_model gc.collect() torch.cuda.empty_cache()<jupyter_output><empty_output><jupyter_text>Prep for training 💻 Initialize `accelerate`:<jupyter_code>!accelerate config default<jupyter_output>Configuration already exists at /home/ubuntu/.cache/huggingface/accelerate/default_config.yaml, will not override. Run `accelerate config` manually or pass a different `save_location`.<jupyter_text>Log into your Hugging Face accountPass [your **write** access token](https://huggingface.co/settings/tokens) so that we can push the trained checkpoints to the Hugging Face Hub:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Train! 🔬 `Diffusers` 🧨 Training loop hyperparameters 📐1. **How to choose your hyperparams?** Check out this [blog post]() - where we explore and comapre different hyperparmas and configurations for different use cases, depending on your data and subject. 2. **Make sure to add** `push_to_hub` so that the checkpoint is automatically pushed to the Hub and doesn't get lost. The `--push_to_hub` argument ensures that the trained checkpoints are automatically pushed to the Hugging Face Hub.3. Some paramters that can help us with **compute** when doing DreamBooth with LoRA on a heavy pipeline like Stable Diffusion XL: * Gradient checkpointing (`--gradient_accumulation_steps`) * 8-bit Adam (`--use_8bit_adam`) - optional when using `--optimizer='AdamW'`, with `--optimizer='Prodigy'` this will be ignored * Mixed-precision training (`--mixed-precision="bf16"`) Launch training 🚀🚀🚀 **To allow for custom captions** we need to install the `datasets` library:- Use `--caption_column` to specify name of the cpation column in your dataset. - In this example we used `"prompt"` to save our captions in the metadata file, change this according to your needs.**Otherwise:** - you can skip the installation if you want to train soley with `--instance_prompt`. in that case, specify `--instance_data_dir` instead of `--dataset_name`<jupyter_code># makes sure we install datasets from main !pip install git+https://github.com/huggingface/datasets.git -q<jupyter_output>WARNING: Ignoring invalid distribution -etworkx (/usr/lib/python3/dist-packages) WARNING: Ignoring invalid distribution -etworkx (/usr/lib/python3/dist-packages) DEPRECATION: distro-info 0.23ubuntu1 has a non-standard version number. pip 23.3 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of distro-info or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 DEPRECATION: python-debian 0.1.36ubuntu1 has a non-standard version number. pip 23.3 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of python-debian or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063  [notice] A new [...]<jupyter_text>🤗Pick a name for your Dreambooth LoRA fine-tuned model:🤗This name will be used to save your model, so pick an informative name based on your chosen concept💡<jupyter_code>!pip install python-slugify from slugify import slugify model_name = "3d icon SDXL LoRA" # @param output_dir = slugify(model_name)<jupyter_output>Defaulting to user installation because normal site-packages is not writeable WARNING: Ignoring invalid distribution -etworkx (/usr/lib/python3/dist-packages) Looking in indexes: https://pypi.org/simple/ Requirement already satisfied: python-slugify in /home/ubuntu/.local/lib/python3.10/site-packages (8.0.1) Requirement already satisfied: text-unidecode>=1.3 in /home/ubuntu/.local/lib/python3.10/site-packages (from python-slugify) (1.3) WARNING: Ignoring invalid distribution -etworkx (/usr/lib/python3/dist-packages) DEPRECATION: distro-info 0.23ubuntu1 has a non-standard version number. pip 23.3 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of distro-info or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 DEPRECATION: python-debian 0.1.36ubuntu1 has a non-standard version [...]<jupyter_text>**Instance & Validation Prompt*** `instance_prompt` - * when custom captions are enabled this prompt is still used in case there are missing captions, as well as in the model's readme. * If custom captions are not used, this prompt will be used as the caption for all training images. * `validation_prompt` - * this prompt is used to generate images throught the training process, this way you can see the models learning curve during training. * you can also change `num_validation_images` (4 by default) and `validation_epochs` (50 by default) to control the amount images generated with the validation prompt, and the number of ephochs between each dreambooth validation.<jupyter_code>instance_prompt = "3d icon in the style of TOK" # @param validation_prompt = "a TOK icon of an astronaut riding a horse, in the style of TOK" # @param<jupyter_output><empty_output><jupyter_text>**Set your LoRA rank**The rank of your LoRA is linked to its expressiveness.The bigger the rank the closer we are to regular dreambooth, and in theory we have more expressive power (and heavier weights). For a very simple concept that you have a good high quality image set for (e.g. a pet, a generic object), a rank as low as 4 can be enough to get great results. We reccomend going between 8 and 64 depending on your concept and how much of a priortiy it is for you to keep the LoRA small or not.<jupyter_code>rank = 8 # @param #!/usr/bin/env bash !accelerate launch train_dreambooth_lora_sdxl_advanced.py \ --pretrained_model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" \ --pretrained_vae_model_name_or_path="madebyollin/sdxl-vae-fp16-fix" \ --dataset_name="./3d_icon" \ --instance_prompt="$instance_prompt" \ --validation_prompt="$validation_prompt" \ --output_dir="$output_dir" \ --caption_column="prompt" \ --mixed_precision="bf16" \ --resolution=1024 \ --train_batch_size=3 \ --repeats=1 \ --report_to="wandb"\ --gradient_accumulation_steps=1 \ --gradient_checkpointing \ --learning_rate=1.0 \ --text_encoder_lr=1.0 \ --adam_beta2=0.99 \ --optimizer="prodigy"\ --train_text_encoder_ti\ --train_text_encoder_ti_frac=0.5\ --snr_gamma=5.0 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --rank="$rank" \ --max_train_steps=1000 \ --checkpointing_steps=2000 \ --seed="0" \ --push_to_hub<jupyter_output>2023-12-04 08:53:29.479966: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-12-04 08:53:30.305590: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT 12/04/2023 08:53:32 - INFO - __main__ - Distributed environment: NO Num processes: 1 Process index: 0 Local process index: 0 Device: cuda Mixed precision type: bf16 You are using a model of type clip_text_model to instantiate a model of type . This is not supported for all configurations of models and can yield errors. You are using a model of type clip_text_model to instantiate a model of type . This is not supported for all configurations of models and can yield errors. {'dynamic_thresholding_ratio', 'variance_type', 'thresholding', 'clip_sample_range'} [...]<jupyter_text>Check out your model 🔥<jupyter_code>from huggingface_hub import whoami from pathlib import Path from IPython.display import display, Markdown username = whoami(token=Path("/root/.cache/huggingface/"))["name"] repo_id = f"{username}/{output_dir}" link_to_model = f"https://huggingface.co/{repo_id}" display(Markdown("### Your model has finished training.\nAccess it here: {}".format(link_to_model)))<jupyter_output><empty_output><jupyter_text>Inference 🐕<jupyter_code>import torch from huggingface_hub import hf_hub_download, upload_file from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL from safetensors.torch import load_file pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipe.load_lora_weights(repo_id, weight_name="pytorch_lora_weights.safetensors")<jupyter_output>2023-12-04 09:37:49.371395: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-12-04 09:37:50.221671: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT<jupyter_text>Load Pivotal Tuning Embeddings<jupyter_code>text_encoders = [pipe.text_encoder, pipe.text_encoder_2] tokenizers = [pipe.tokenizer, pipe.tokenizer_2] embedding_path = hf_hub_download(repo_id=repo_id, filename="embeddings.safetensors", repo_type="model") state_dict = load_file(embedding_path) # load embeddings of text_encoder 1 (CLIP ViT-L/14) pipe.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer) # load embeddings of text_encoder 2 (CLIP ViT-G/14) pipe.load_textual_inversion(state_dict["clip_g"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2) instance_token = "<s0><s1>" prompt = f"a {instance_token} icon of an orange llama eating ramen, in the style of {instance_token}" image = pipe(prompt=prompt, num_inference_steps=25, cross_attention_kwargs={"scale": 1.0}).images[0] image<jupyter_output><empty_output>
notebooks/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb/0
{ "file_path": "notebooks/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb", "repo_id": "notebooks", "token_count": 6610 }
137
<jupyter_start><jupyter_text>**Stable Diffusion** 🎨 *...using `🧨diffusers`*Stable Diffusion is a text-to-image latent diffusion model created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/) and [LAION](https://laion.ai/). It's trained on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and can run on many consumer GPUs.See the [model card](https://huggingface.co/CompVis/stable-diffusion) for more information.This Colab notebook shows how to use Stable Diffusion with the 🤗 Hugging Face [🧨 Diffusers library](https://github.com/huggingface/diffusers). Let's get started! 1. How to use `StableDiffusionPipeline`Before diving into the theoretical aspects of how Stable Diffusion functions, let's try it out a bit 🤗.In this section, we show how you can run text to image inference in just a few lines of code! SetupFirst, please make sure you are using a GPU runtime to run this notebook, so inference is much faster. If the following command fails, use the `Runtime` menu above and select `Change runtime type`.<jupyter_code>!nvidia-smi<jupyter_output>Fri Dec 9 16:32:59 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 | | N/A 72C P0 30W / 70W | 0MiB / 15109MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-------[...]<jupyter_text>Next, you should install `diffusers` as well `scipy`, `ftfy` and `transformers`. `accelerate` is used to achieve much faster loading.<jupyter_code>!pip install diffusers==0.11.1 !pip install transformers scipy ftfy accelerate<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting diffusers==0.10.0 Downloading diffusers-0.10.0-py3-none-any.whl (502 kB)  |████████████████████████████████| 502 kB 18.5 MB/s [?25hRequirement already satisfied: numpy in /usr/local/lib/python3.8/dist-packages (from diffusers==0.10.0) (1.21.6) Collecting huggingface-hub>=0.10.0 Downloading huggingface_hub-0.11.1-py3-none-any.whl (182 kB)  |████████████████████████████████| 182 kB 54.0 MB/s [?25hRequirement already satisfied: filelock in /usr/local/lib/python3.8/dist-packages (from diffusers==0.10.0) (3.8.0) Requirement already satisfied: Pillow in /usr/local/lib/python3.8/dist-packages (from diffusers==0.10.0) (7.1.2) Requirement already satisfied: requests in /usr/local/lib/python3.8/dist-packages (from diffusers==0.10.0) (2.23.0) Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.8/dist-packages (from diffusers==0.10.0) (2022.6.2)[...]<jupyter_text>Stable Diffusion Pipeline`StableDiffusionPipeline` is an end-to-end inference pipeline that you can use to generate images from text with just a few lines of code.First, we load the pre-trained weights of all components of the model. In this notebook we use Stable Diffusion version 1.4 ([CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)), but there are other variants that you may want to try:* [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)* [stabilityai/stable-diffusion-2-1-base](https://huggingface.co/stabilityai/stable-diffusion-2-1-base)* [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1). This version can produce images with a resolution of 768x768, while the others work at 512x512.In addition to the model id [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4), we're also passing a specific `revision` and `torch_dtype` to the `from_pretrained` method.We want to ensure that every free Google Colab can run Stable Diffusion, hence we're loading the weights from the half-precision branch [`fp16`](https://huggingface.co/CompVis/stable-diffusion-v1-4/tree/fp16) and also tell `diffusers` to expect the weights in float16 precision by passing `torch_dtype=torch.float16`.If you want to ensure the highest possible precision, please make sure to remove `torch_dtype=torch.float16` at the cost of a higher memory usage.<jupyter_code>import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)<jupyter_output><empty_output><jupyter_text>Next, let's move the pipeline to GPU to have faster inference.<jupyter_code>pipe = pipe.to("cuda")<jupyter_output><empty_output><jupyter_text>And we are ready to generate images:<jupyter_code>prompt = "a photograph of an astronaut riding a horse" image = pipe(prompt).images[0] # image here is in [PIL format](https://pillow.readthedocs.io/en/stable/) # Now to display an image you can either save it such as: image.save(f"astronaut_rides_horse.png") # or if you're in a google colab you can directly display it with image<jupyter_output><empty_output><jupyter_text>Running the above cell multiple times will give you a different image every time. If you want deterministic output you can pass a random seed to the pipeline. Every time you use the same seed you'll have the same image result.<jupyter_code>import torch generator = torch.Generator("cuda").manual_seed(1024) image = pipe(prompt, generator=generator).images[0] image<jupyter_output><empty_output><jupyter_text>You can change the number of inference steps using the `num_inference_steps` argument. In general, results are better the more steps you use. Stable Diffusion, being one of the latest models, works great with a relatively small number of steps, so we recommend to use the default of `50`. If you want faster results you can use a smaller number.The following cell uses the same seed as before, but with fewer steps. Note how some details, such as the horse's head or the helmet, are less defin realistic and less defined than in the previous image:<jupyter_code>import torch generator = torch.Generator("cuda").manual_seed(1024) image = pipe(prompt, num_inference_steps=15, generator=generator).images[0] image<jupyter_output><empty_output><jupyter_text>The other parameter in the pipeline call is `guidance_scale`. It is a way to increase the adherence to the conditional signal which in this case is text as well as overall sample quality. In simple terms classifier free guidance forces the generation to better match with the prompt. Numbers like `7` or `8.5` give good results, if you use a very large number the images might look good, but will be less diverse. You can learn about the technical details of this parameter in [the last section](https://colab.research.google.com/drive/1ALXuCM5iNnJDNW5vqBm5lCtUQtZJHN2f?authuser=1scrollTo=UZp-ynZLrS-S) of this notebook. To generate multiple images for the same prompt, we simply use a list with the same prompt repeated several times. We'll send the list to the pipeline instead of the string we used before. Let's first write a helper function to display a grid of images. Just run the following cell to create the `image_grid` function, or disclose the code if you are interested in how it's done.<jupyter_code>from PIL import Image def image_grid(imgs, rows, cols): assert len(imgs) == rows*cols w, h = imgs[0].size grid = Image.new('RGB', size=(cols*w, rows*h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i%cols*w, i//cols*h)) return grid<jupyter_output><empty_output><jupyter_text>Now, we can generate a grid image once having run the pipeline with a list of 3 prompts.<jupyter_code>num_images = 3 prompt = ["a photograph of an astronaut riding a horse"] * num_images images = pipe(prompt).images grid = image_grid(images, rows=1, cols=3) grid<jupyter_output><empty_output><jupyter_text>And here's how to generate a grid of `n × m` images.<jupyter_code>num_cols = 3 num_rows = 4 prompt = ["a photograph of an astronaut riding a horse"] * num_cols all_images = [] for i in range(num_rows): images = pipe(prompt).images all_images.extend(images) grid = image_grid(all_images, rows=num_rows, cols=num_cols) grid<jupyter_output><empty_output><jupyter_text>Generate non-square imagesStable Diffusion produces images of `512 × 512` pixels by default. But it's very easy to override the default using the `height` and `width` arguments, so you can create rectangular images in portrait or landscape ratios.These are some recommendations to choose good image sizes:- Make sure `height` and `width` are both multiples of `8`.- Going below 512 might result in lower quality images.- Going over 512 in both directions will repeat image areas (global coherence is lost).- The best way to create non-square images is to use `512` in one dimension, and a value larger than that in the other one.<jupyter_code>prompt = "a photograph of an astronaut riding a horse" image = pipe(prompt, height=512, width=768).images[0] image<jupyter_output><empty_output><jupyter_text>2. What is Stable DiffusionNow, let's go into the theoretical part of Stable Diffusion 👩‍🎓.Stable Diffusion is based on a particular type of diffusion model called **Latent Diffusion**, proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752). General diffusion models are machine learning systems that are trained to *denoise* random gaussian noise step by step, to get to a sample of interest, such as an *image*. For a more detailed overview of how they work, check [this colab](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb).Diffusion models have shown to achieve state-of-the-art results for generating image data. But one downside of diffusion models is that the reverse denoising process is slow. In addition, these models consume a lot of memory because they operate in pixel space, which becomes unreasonably expensive when generating high-resolution images. Therefore, it is challenging to train these models and also use them for inference. Latent diffusion can reduce the memory and compute complexity by applying the diffusion process over a lower dimensional _latent_ space, instead of using the actual pixel space. This is the key difference between standard diffusion and latent diffusion models: **in latent diffusion the model is trained to generate latent (compressed) representations of the images.** There are three main components in latent diffusion.1. An autoencoder (VAE).2. A [U-Net](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynbscrollTo=wW8o1Wp0zRkq).3. A text-encoder, *e.g.* [CLIP's Text Encoder](https://huggingface.co/docs/transformers/model_doc/cliptransformers.CLIPTextModel). **1. The autoencoder (VAE)**The VAE model has two parts, an encoder and a decoder. The encoder is used to convert the image into a low dimensional latent representation, which will serve as the input to the *U-Net* model.The decoder, conversely, transforms the latent representation back into an image. During latent diffusion _training_, the encoder is used to get the latent representations (_latents_) of the images for the forward diffusion process, which applies more and more noise at each step. During _inference_, the denoised latents generated by the reverse diffusion process are converted back into images using the VAE decoder. As we will see during inference we **only need the VAE decoder**. **2. The U-Net**The U-Net has an encoder part and a decoder part both comprised of ResNet blocks.The encoder compresses an image representation into a lower resolution image representation and the decoder decodes the lower resolution image representation back to the original higher resolution image representation that is supposedly less noisy.More specifically, the U-Net output predicts the noise residual which can be used to compute the predicted denoised image representation.To prevent the U-Net from losing important information while downsampling, short-cut connections are usually added between the downsampling ResNets of the encoder to the upsampling ResNets of the decoder.Additionally, the stable diffusion U-Net is able to condition its output on text-embeddings via cross-attention layers. The cross-attention layers are added to both the encoder and decoder part of the U-Net usually between ResNet blocks. **3. The Text-encoder**The text-encoder is responsible for transforming the input prompt, *e.g.* "An astronout riding a horse" into an embedding space that can be understood by the U-Net. It is usually a simple *transformer-based* encoder that maps a sequence of input tokens to a sequence of latent text-embeddings.Inspired by [Imagen](https://imagen.research.google/), Stable Diffusion does **not** train the text-encoder during training and simply uses an CLIP's already trained text encoder, [CLIPTextModel](https://huggingface.co/docs/transformers/model_doc/cliptransformers.CLIPTextModel). **Why is latent diffusion fast and efficient?**Since the U-Net of latent diffusion models operates on a low dimensional space, it greatly reduces the memory and compute requirements compared to pixel-space diffusion models. For example, the autoencoder used in Stable Diffusion has a reduction factor of 8. This means that an image of shape `(3, 512, 512)` becomes `(3, 64, 64)` in latent space, which requires `8 × 8 = 64` times less memory.This is why it's possible to generate `512 × 512` images so quickly, even on 16GB Colab GPUs! **Stable Diffusion during inference**Putting it all together, let's now take a closer look at how the model works in inference by illustrating the logical flow. The stable diffusion model takes both a latent seed and a text prompt as an input. The latent seed is then used to generate random latent image representations of size $64 \times 64$ where as the text prompt is transformed to text embeddings of size $77 \times 768$ via CLIP's text encoder.Next the U-Net iteratively *denoises* the random latent image representations while being conditioned on the text embeddings. The output of the U-Net, being the noise residual, is used to compute a denoised latent image representation via a scheduler algorithm. Many different scheduler algorithms can be used for this computation, each having its pros and cons. For Stable Diffusion, we recommend using one of:- [PNDM scheduler](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_pndm.py) (used by default).- [K-LMS scheduler](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_lms_discrete.py).- [Heun Discrete scheduler](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_heun_discrete.py).- [DPM Solver Multistep scheduler](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py). This scheduler is able to achieve great quality in less steps. You can try with 25 instead of the default 50!Theory on how the scheduler algorithm function is out of scope for this notebook, but in short one should remember that they compute the predicted denoised image representation from the previous noise representation and the predicted noise residual.For more information, we recommend looking into [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364)The *denoising* process is repeated *ca.* 50 times to step-by-step retrieve better latent image representations.Once complete, the latent image representation is decoded by the decoder part of the variational auto encoder. After this brief introduction to Latent and Stable Diffusion, let's see how to make advanced use of 🤗 Hugging Face Diffusers! 3. How to write your own inference pipeline with `diffusers`Finally, we show how you can create custom diffusion pipelines with `diffusers`.This is often very useful to dig a bit deeper into certain functionalities of the system and to potentially switch out certain components. In this section, we will demonstrate how to use Stable Diffusion with a different scheduler, namely [Katherine Crowson's](https://github.com/crowsonkb) K-LMS scheduler that was added in [this PR](https://github.com/huggingface/diffusers/pull/185pullrequestreview-1074247365). Let's go through the `StableDiffusionPipeline` step by step to see how we could have written it ourselves.We will start by loading the individual models involved.<jupyter_code>import torch torch_device = "cuda" if torch.cuda.is_available() else "cpu"<jupyter_output><empty_output><jupyter_text>The [pre-trained model](https://huggingface.co/CompVis/stable-diffusion-v1-3-diffusers/tree/main) includes all the components required to setup a complete diffusion pipeline. They are stored in the following folders:- `text_encoder`: Stable Diffusion uses CLIP, but other diffusion models may use other encoders such as `BERT`.- `tokenizer`. It must match the one used by the `text_encoder` model.- `scheduler`: The scheduling algorithm used to progressively add noise to the image during training.- `unet`: The model used to generate the latent representation of the input.- `vae`: Autoencoder module that we'll use to decode latent representations into real images.We can load the components by referring to the folder they were saved, using the `subfolder` argument to `from_pretrained`.<jupyter_code>from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler # 1. Load the autoencoder model which will be used to decode the latents into image space. vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") # 2. Load the tokenizer and text encoder to tokenize and encode the text. tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") # 3. The UNet model for generating the latents. unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")<jupyter_output><empty_output><jupyter_text>Now instead of loading the pre-defined scheduler, we'll use the K-LMS scheduler instead.<jupyter_code>from diffusers import LMSDiscreteScheduler scheduler = LMSDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")<jupyter_output><empty_output><jupyter_text>Next we move the models to the GPU.<jupyter_code>vae = vae.to(torch_device) text_encoder = text_encoder.to(torch_device) unet = unet.to(torch_device)<jupyter_output><empty_output><jupyter_text>We now define the parameters we'll use to generate images.Note that `guidance_scale` is defined analog to the guidance weight `w` of equation (2) in the [Imagen paper](https://arxiv.org/pdf/2205.11487.pdf). `guidance_scale == 1` corresponds to doing no classifier-free guidance. Here we set it to 7.5 as also done previously.In contrast to the previous examples, we set `num_inference_steps` to 100 to get an even more defined image.<jupyter_code>prompt = ["a photograph of an astronaut riding a horse"] height = 512 # default height of Stable Diffusion width = 512 # default width of Stable Diffusion num_inference_steps = 100 # Number of denoising steps guidance_scale = 7.5 # Scale for classifier-free guidance generator = torch.manual_seed(32) # Seed generator to create the inital latent noise batch_size = 1<jupyter_output><empty_output><jupyter_text>First, we get the text_embeddings for the prompt. These embeddings will be used to condition the UNet model.<jupyter_code>text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") with torch.no_grad(): text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]<jupyter_output><empty_output><jupyter_text>We'll also get the unconditional text embeddings for classifier-free guidance, which are just the embeddings for the padding token (empty text). They need to have the same shape as the conditional `text_embeddings` (`batch_size` and `seq_length`)<jupyter_code>max_length = text_input.input_ids.shape[-1] uncond_input = tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" ) with torch.no_grad(): uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]<jupyter_output><empty_output><jupyter_text>For classifier-free guidance, we need to do two forward passes. One with the conditioned input (`text_embeddings`), and another with the unconditional embeddings (`uncond_embeddings`). In practice, we can concatenate both into a single batch to avoid doing two forward passes.<jupyter_code>text_embeddings = torch.cat([uncond_embeddings, text_embeddings])<jupyter_output><empty_output><jupyter_text>Generate the intial random noise.<jupyter_code>latents = torch.randn( (batch_size, unet.in_channels, height // 8, width // 8), generator=generator, ) latents = latents.to(torch_device) latents.shape<jupyter_output><empty_output><jupyter_text>Cool $64 \times 64$ is expected. The model will transform this latent representation (pure noise) into a `512 × 512` image later on.Next, we initialize the scheduler with our chosen `num_inference_steps`.This will compute the `sigmas` and exact time step values to be used during the denoising process.<jupyter_code>scheduler.set_timesteps(num_inference_steps)<jupyter_output><empty_output><jupyter_text>The K-LMS scheduler needs to multiply the `latents` by its `sigma` values. Let's do this here<jupyter_code>latents = latents * scheduler.init_noise_sigma<jupyter_output><empty_output><jupyter_text>We are ready to write the denoising loop.<jupyter_code>from tqdm.auto import tqdm from torch import autocast for t in tqdm(scheduler.timesteps): # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. latent_model_input = torch.cat([latents] * 2) latent_model_input = scheduler.scale_model_input(latent_model_input, t) # predict the noise residual with torch.no_grad(): noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = scheduler.step(noise_pred, t, latents).prev_sample<jupyter_output><empty_output><jupyter_text>We now use the `vae` to decode the generated `latents` back into the image.<jupyter_code># scale and decode the image latents with vae latents = 1 / 0.18215 * latents with torch.no_grad(): image = vae.decode(latents).sample<jupyter_output><empty_output><jupyter_text>And finally, let's convert the image to PIL so we can display or save it.<jupyter_code>image = (image / 2 + 0.5).clamp(0, 1) image = image.detach().cpu().permute(0, 2, 3, 1).numpy() images = (image * 255).round().astype("uint8") pil_images = [Image.fromarray(image) for image in images] pil_images[0]<jupyter_output><empty_output>
notebooks/diffusers/stable_diffusion.ipynb/0
{ "file_path": "notebooks/diffusers/stable_diffusion.ipynb", "repo_id": "notebooks", "token_count": 7373 }
138
<jupyter_start><jupyter_text>Launching Multi-Node Training from a Jupyter Environment> Using the `notebook_launcher` to use Accelerate from inside a Jupyter Notebook General OverviewThis notebook covers how to run the `cv_example.py` script as a Jupyter Notebook and train it on a distributed system. It will also cover the few specific requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training. Configuring the EnvironmentBefore any training can be performed, an accelerate config file must exist in the system. Usually this can be done by running the following in a terminal:```bashaccelerate config```However, if general defaults are fine and you are *not* running on a TPU, accelerate has a utility to quickly write your GPU configuration into a config file via `write_basic_config`.The following cell will restart Jupyter after writing the configuration, as CUDA code was called to perform this. CUDA can't be initialized more than once (once for the single-GPU's notebooks use by default, and then what would be again when `notebook_launcher` is called). It's fine to debug in the notebook and have calls to CUDA, but remember that in order to finally train a full cleanup and restart will need to be performed, such as what is shown below:<jupyter_code>#import os #from accelerate.utils import write_basic_config #write_basic_config() # Write a config file #os._exit(00) # Restart the notebook<jupyter_output><empty_output><jupyter_text>Preparing the Dataset and ModelNext you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU. If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later. Make sure the dataset is downloaded based on the directions [here](https://github.com/huggingface/accelerate/tree/main/examplessimple-vision-example)<jupyter_code>import os, re, torch, PIL import numpy as np from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator from accelerate.utils import set_seed from timm import create_model<jupyter_output><empty_output><jupyter_text>First we'll create a function to extract the class name based on a file:<jupyter_code>import os data_dir = "../../images" fnames = os.listdir(data_dir) fname = fnames[0] print(fname)<jupyter_output>beagle_32.jpg<jupyter_text>In the case here, the label is `beagle`:<jupyter_code>import re def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] extract_label(fname)<jupyter_output><empty_output><jupyter_text>Next we'll create a `Dataset` class:<jupyter_code>class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label}<jupyter_output><empty_output><jupyter_text>And build our dataset<jupyter_code># Grab all the image filenames fnames = [ os.path.join(data_dir, fname) for fname in fnames if fname.endswith(".jpg") ] # Build the labels all_labels = [ extract_label(fname) for fname in fnames ] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}<jupyter_output><empty_output><jupyter_text>> Note: This will be stored inside of a function as we'll be setting our seed during training.<jupyter_code>def get_dataloaders(batch_size:int=64): "Builds a set of dataloaders with a batch_size" random_perm = np.random.permutation(len(fnames)) cut = int(0.8 * len(fnames)) train_split = random_perm[:cut] eval_split = random_perm[:cut] # For training we use a simple RandomResizedCrop train_tfm = Compose([ RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor() ]) train_dataset = PetsDataset( [fnames[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id ) # For evaluation we use a deterministic Resize eval_tfm = Compose([ Resize((224, 224)), ToTensor() ]) eval_dataset = PetsDataset( [fnames[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id ) # Instantiate dataloaders train_dataloader = DataLoader( train_dataset, shuffle=True, batch_size=batch_size, num_workers=4 ) eval_dataloader = DataLoader( eval_dataset, shuffle=False, batch_size=batch_size*2, num_workers=4 ) return train_dataloader, eval_dataloader<jupyter_output><empty_output><jupyter_text>Writing the Training FunctionNow we can build our training loop. `notebook_launcher` works by passing in a function to call that will be ran across the distributed system.Here is a basic training loop for our animal classification problem:<jupyter_code>from torch.optim.lr_scheduler import CosineAnnealingLR def training_loop(mixed_precision="fp16", seed:int=42, batch_size:int=64): set_seed(seed) # Initialize accelerator accelerator = Accelerator(mixed_precision=mixed_precision) # Build dataloaders train_dataloader, eval_dataloader = get_dataloaders(batch_size) # instantiate the model (we build the model here so that the seed also controls new weight initaliziations) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # Freeze the base model for param in model.parameters(): param.requires_grad=False for param in model.get_classifier().parameters(): param.requires_grad=True # We normalize the batches of images to be a bit faster mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] # To make this constant available on the active device, we set it to the accelerator device mean = mean.to(accelerator.device) std = std.to(accelerator.device) # Intantiate the optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr = 3e-2/25) # Instantiate the learning rate scheduler lr_scheduler = OneCycleLR( optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(5): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for _, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}")<jupyter_output><empty_output><jupyter_text>All that's left is to use the `notebook_launcher`.We pass in the function, the arguments (as a tuple), and the number of processes to train on. (See the [documentation](https://huggingface.co/docs/accelerate/package_reference/launchersaccelerate.notebook_launcher) for more information)<jupyter_code>from accelerate import notebook_launcher args = ("fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=2)<jupyter_output>Launching training on 2 GPUs. epoch 0: 88.12 epoch 1: 91.73 epoch 2: 92.58 epoch 3: 93.90 epoch 4: 94.71
notebooks/examples/accelerate_examples/simple_cv_example.ipynb/0
{ "file_path": "notebooks/examples/accelerate_examples/simple_cv_example.ipynb", "repo_id": "notebooks", "token_count": 3573 }
139
<jupyter_start><jupyter_text>Fine-tune BLIP using Hugging Face `transformers` and `datasets` 🤗This tutorial is largely based from the [GiT tutorial](https://colab.research.google.com/drive/1HLxgrG7xZJ9FvXckNG61J72FkyrbqKAA?usp=sharing) on how to fine-tune GiT on a custom image captioning dataset. Here we will use a dummy dataset of [football players](https://huggingface.co/datasets/ybelkada/football-dataset) ⚽ that is uploaded on the Hub. The images have been manually selected together with the captions. Check the 🤗 [documentation](https://huggingface.co/docs/datasets/image_dataset) on how to create and upload your own image-text dataset. Set-up environment<jupyter_code>!pip install git+https://github.com/huggingface/transformers.git@main !pip install -q datasets<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("image_captioning_blip_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Load the image captioning datasetLet's load the image captioning dataset, you just need few lines of code for that.<jupyter_code>from datasets import load_dataset dataset = load_dataset("ybelkada/football-dataset", split="train")<jupyter_output>WARNING:datasets.builder:Using custom data configuration ybelkada--football-dataset-1ad065f8e9005a29 WARNING:datasets.builder:Found cached dataset parquet (/root/.cache/huggingface/datasets/ybelkada___parquet/ybelkada--football-dataset-1ad065f8e9005a29/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)<jupyter_text>Let's retrieve the caption of the first example:<jupyter_code>dataset[0]["text"]<jupyter_output><empty_output><jupyter_text>And the corresponding image<jupyter_code>dataset[0]["image"]<jupyter_output><empty_output><jupyter_text>Create PyTorch Dataset The lines below are entirely copied from the original notebook!<jupyter_code>from torch.utils.data import Dataset, DataLoader class ImageCaptioningDataset(Dataset): def __init__(self, dataset, processor): self.dataset = dataset self.processor = processor def __len__(self): return len(self.dataset) def __getitem__(self, idx): item = self.dataset[idx] encoding = self.processor(images=item["image"], text=item["text"], padding="max_length", return_tensors="pt") # remove batch dimension encoding = {k:v.squeeze() for k,v in encoding.items()} return encoding<jupyter_output><empty_output><jupyter_text>Load model and processor<jupyter_code>from transformers import AutoProcessor, BlipForConditionalGeneration processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")<jupyter_output><empty_output><jupyter_text>Now that we have loaded the processor, let's load the dataset and the dataloader:<jupyter_code>train_dataset = ImageCaptioningDataset(dataset, processor) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=2)<jupyter_output><empty_output><jupyter_text>Train the model Let's train the model! Run the simply the cell below for training the model<jupyter_code>import torch optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) model.train() for epoch in range(50): print("Epoch:", epoch) for idx, batch in enumerate(train_dataloader): input_ids = batch.pop("input_ids").to(device) pixel_values = batch.pop("pixel_values").to(device) outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=input_ids) loss = outputs.loss print("Loss:", loss.item()) loss.backward() optimizer.step() optimizer.zero_grad()<jupyter_output>Epoch: 0 Loss: 13.106168746948242 Loss: 10.644421577453613 Loss: 9.593768119812012 Epoch: 1 Loss: 9.306917190551758 Loss: 9.081585884094238 Loss: 8.899713516235352 Epoch: 2 Loss: 8.757176399230957 Loss: 8.57335090637207 Loss: 8.46764087677002 Epoch: 3 Loss: 8.328741073608398 Loss: 8.201028823852539 Loss: 8.095505714416504 Epoch: 4 Loss: 7.967352867126465 Loss: 7.861735820770264 Loss: 7.732804298400879 Epoch: 5 Loss: 7.630571365356445 Loss: 7.519181251525879 Loss: 7.405021667480469 Epoch: 6 Loss: 7.284258842468262 Loss: 7.187586784362793 Loss: 7.060364723205566 Epoch: 7 Loss: 6.954672813415527 Loss: 6.846510410308838 Loss: 6.6976189613342285 Epoch: 8 Loss: 6.587822437286377 Loss: 6.486807346343994 Loss: 6.362427711486816 Epoch: 9 Loss: 6.233264923095703 Loss: 6.120891571044922 Loss: 5.994716644287109 Epoch: 10 Loss: 5.855278968811035 Loss: 5.752918243408203 Loss: 5.645371437072754 Epoch: 11 Loss: 5.505440711975098 Loss: 5.391564846038818 Loss: 5.268132209777832 Epoch: 12 Loss: 5.1524944[...]<jupyter_text>Inference Let's check the results on our train dataset<jupyter_code># load image example = dataset[0] image = example["image"] image # prepare image for the model inputs = processor(images=image, return_tensors="pt").to(device) pixel_values = inputs.pixel_values generated_ids = model.generate(pixel_values=pixel_values, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_caption)<jupyter_output>benzema after real mardid's win against psg<jupyter_text>Load from the Hub Once trained you can push the model and processor on the Hub to use them later. Meanwhile you can play with the model that we have fine-tuned!<jupyter_code>from transformers import BlipForConditionalGeneration, AutoProcessor model = BlipForConditionalGeneration.from_pretrained("ybelkada/blip-image-captioning-base-football-finetuned").to(device) processor = AutoProcessor.from_pretrained("ybelkada/blip-image-captioning-base-football-finetuned")<jupyter_output><empty_output><jupyter_text>Let's check the results on our train dataset!<jupyter_code>from matplotlib import pyplot as plt fig = plt.figure(figsize=(18, 14)) # prepare image for the model for i, example in enumerate(dataset): image = example["image"] inputs = processor(images=image, return_tensors="pt").to(device) pixel_values = inputs.pixel_values generated_ids = model.generate(pixel_values=pixel_values, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] fig.add_subplot(2, 3, i+1) plt.imshow(image) plt.axis("off") plt.title(f"Generated caption: {generated_caption}")<jupyter_output><empty_output>
notebooks/examples/image_captioning_blip.ipynb/0
{ "file_path": "notebooks/examples/image_captioning_blip.ipynb", "repo_id": "notebooks", "token_count": 2569 }
140
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.<jupyter_code>#! pip install datasets transformers<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code># !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.11.0 since the functionality was introduced in that version:<jupyter_code>import transformers print(transformers.__version__)<jupyter_output><empty_output><jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/language-modeling). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("language_modeling_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a language model In this notebook, we'll see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model on a language modeling tasks. We will cover two types of language modeling tasks which are:- Causal language modeling: the model has to predict the next token in the sentence (so the labels are the same as the inputs shifted to the right). To make sure the model does not cheat, it gets an attention mask that will prevent it to access the tokens after token i when trying to predict the token i+1 in the sentence.- Masked language modeling: the model has to predict some tokens that are masked in the input. It still has access to the whole sentence, so it can use the tokens before and after the tokens masked to predict their value.We will see how to easily load and preprocess the dataset for each one of those tasks, and how to use the `Trainer` API to fine-tune a model on it.A script version of this notebook you can directly run on a distributed environment or on TPU is available in our [examples folder](https://github.com/huggingface/transformers/tree/master/examples). Preparing the dataset For each of those tasks, we will use the [Wikitext 2]() dataset as an example. You can load it very easily with the 🤗 Datasets library.<jupyter_code>from datasets import load_dataset datasets = load_dataset('wikitext', 'wikitext-2-raw-v1')<jupyter_output>Reusing dataset wikitext (/home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91)<jupyter_text>You can replace the dataset above with any dataset hosted on [the hub](https://huggingface.co/datasets) or use your own files. Just uncomment the following cell and replace the paths with values that will lead to your files:<jupyter_code># datasets = load_dataset("text", data_files={"train": path_to_train.txt, "validation": path_to_validation.txt}<jupyter_output><empty_output><jupyter_text>You can also load datasets from a csv or a JSON file, see the [full documentation](https://huggingface.co/docs/datasets/loading_datasets.htmlfrom-local-files) for more information. To access an actual element, you need to select a split first, then give an index:<jupyter_code>datasets["train"][10]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>from datasets import ClassLabel import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html())) show_random_elements(datasets["train"])<jupyter_output><empty_output><jupyter_text>As we can see, some of the texts are a full paragraph of a Wikipedia article while others are just titles or empty lines. Causal Language modeling For causal language modeling (CLM) we are going to take all the texts in our dataset and concatenate them after they are tokenized. Then we will split them in examples of a certain sequence length. This way the model will receive chunks of contiguous text that may look like:```part of text 1```or ```end of text 1 [BOS_TOKEN] beginning of text 2```depending on whether they span over several of the original texts in the dataset or not. The labels will be the same as the inputs, shifted to the left.We will use the [`distilgpt2`](https://huggingface.co/distilgpt2) model for this example. You can pick any of the checkpoints listed [here](https://huggingface.co/models?filter=causal-lm) instead:<jupyter_code>model_checkpoint = "distilgpt2"<jupyter_output><empty_output><jupyter_text>To tokenize all our texts with the same vocabulary that was used when training the model, we have to download a pretrained tokenizer. This is all done by the `AutoTokenizer` class:<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)<jupyter_output><empty_output><jupyter_text>We can now call the tokenizer on all our texts. This is very simple, using the [`map`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Dataset.map) method from the Datasets library. First we define a function that call the tokenizer on our texts:<jupyter_code>def tokenize_function(examples): return tokenizer(examples["text"])<jupyter_output><empty_output><jupyter_text>Then we apply it to all the splits in our `datasets` object, using `batched=True` and 4 processes to speed up the preprocessing. We won't need the `text` column afterward, so we discard it.<jupyter_code>tokenized_datasets = datasets.map(tokenize_function, batched=True, num_proc=4, remove_columns=["text"])<jupyter_output>Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-0a686d6f64cb210f.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-659bcb80cad0097c.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-7f22912475d34c88.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-b3566e2fe9c5c036.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cach[...]<jupyter_text>If we now look at an element of our datasets, we will see the text have been replaced by the `input_ids` the model will need:<jupyter_code>tokenized_datasets["train"][1]<jupyter_output><empty_output><jupyter_text>Now for the harder part: we need to concatenate all our texts together then split the result in small chunks of a certain `block_size`. To do this, we will use the `map` method again, with the option `batched=True`. This option actually lets us change the number of examples in the datasets by returning a different number of examples than we got. This way, we can create our new samples from a batch of examples.First, we grab the maximum length our model was pretrained with. This might be a big too big to fit in your GPU RAM, so here we take a bit less at just 128.<jupyter_code># block_size = tokenizer.model_max_length block_size = 128<jupyter_output><empty_output><jupyter_text>Then we write the preprocessing function that will group our texts:<jupyter_code>def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result<jupyter_output><empty_output><jupyter_text>First note that we duplicate the inputs for our labels. This is because the model of the 🤗 Transformers library apply the shifting to the right, so we don't need to do it manually.Also note that by default, the `map` method will send a batch of 1,000 examples to be treated by the preprocessing function. So here, we will drop the remainder to make the concatenated tokenized texts a multiple of `block_size` every 1,000 examples. You can adjust this behavior by passing a higher batch size (which will also be processed slower). You can also speed-up the preprocessing by using multiprocessing:<jupyter_code>lm_datasets = tokenized_datasets.map( group_texts, batched=True, batch_size=1000, num_proc=4, )<jupyter_output>Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-da77bf362d4c6fa4.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-7d08a6d62516c9ff.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-a985b575c96ddae3.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-47fffef35acafddb.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cach[...]<jupyter_text>And we can check our datasets have changed: now the samples contain chunks of `block_size` contiguous tokens, potentially spanning over several of our original texts.<jupyter_code>tokenizer.decode(lm_datasets["train"][1]["input_ids"])<jupyter_output><empty_output><jupyter_text>Now that the data has been cleaned, we're ready to instantiate our `Trainer`. We will a model:<jupyter_code>from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>And some `TrainingArguments`:<jupyter_code>from transformers import Trainer, TrainingArguments model_name = model_checkpoint.split("/")[-1] training_args = TrainingArguments( f"{model_name}-finetuned-wikitext2", evaluation_strategy = "epoch", learning_rate=2e-5, weight_decay=0.01, push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/gpt-finetuned-wikitext2"` or `"huggingface/gpt-finetuned-wikitext2"`). We pass along all of those to the `Trainer` class:<jupyter_code>trainer = Trainer( model=model, args=training_args, train_dataset=lm_datasets["train"], eval_dataset=lm_datasets["validation"], )<jupyter_output><empty_output><jupyter_text>And we can train our model:<jupyter_code>trainer.train()<jupyter_output><empty_output><jupyter_text>Once the training is completed, we can evaluate our model and get its perplexity on the validation set like this:<jupyter_code>import math eval_results = trainer.evaluate() print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")<jupyter_output>Perplexity: 38.17<jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output><jupyter_text>You can now share this model with all your friends, family, favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:```pythonfrom transformers import AutoModelForCausalLMmodel = AutoModelForCausalLM.from_pretrained("sgugger/my-awesome-model")``` Masked language modeling For masked language modeling (MLM) we are going to use the same preprocessing as before for our dataset with one additional step: we will randomly mask some tokens (by replacing them by `[MASK]`) and the labels will be adjusted to only include the masked tokens (we don't have to predict the non-masked tokens).We will use the [`distilroberta-base`](https://huggingface.co/distilroberta-base) model for this example. You can pick any of the checkpoints listed [here](https://huggingface.co/models?filter=masked-lm) instead:<jupyter_code>model_checkpoint = "distilroberta-base"<jupyter_output><empty_output><jupyter_text>We can apply the same tokenization function as before, we just need to update our tokenizer to use the checkpoint we just picked:<jupyter_code>tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True) tokenized_datasets = datasets.map(tokenize_function, batched=True, num_proc=4, remove_columns=["text"])<jupyter_output>Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-333e4baa6f280a66.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-23acd0930cc16da7.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-56ae8ad41a9fdf19.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-599a47a0e666ad65.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cach[...]<jupyter_text>And like before, we group texts together and chunk them in samples of length `block_size`. You can skip that step if your dataset is composed of individual sentences.<jupyter_code>lm_datasets = tokenized_datasets.map( group_texts, batched=True, batch_size=1000, num_proc=4, )<jupyter_output>Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-661796332aa2b576.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-e019d91824c225fd.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-b5875c725d0e5cb7.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cache-a8e3eeaa703ca023.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/47c57a6745aa5ce8e16a5355aaa4039e3aa90d1adad87cef1ad4e0f29e74ac91/cach[...]<jupyter_text>The rest is very similar to what we had, with two exceptions. First we use a model suitable for masked LM:<jupyter_code>from transformers import AutoModelForMaskedLM model = AutoModelForMaskedLM.from_pretrained(model_checkpoint)<jupyter_output>Some weights of RobertaForMaskedLM were not initialized from the model checkpoint at distilroberta-base and are newly initialized: ['lm_head.decoder.bias'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.<jupyter_text>We redefine our `TrainingArguments`:<jupyter_code>model_name = model_checkpoint.split("/")[-1] training_args = TrainingArguments( f"{model_name}-finetuned-wikitext2", evaluation_strategy = "epoch", learning_rate=2e-5, weight_decay=0.01, push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>Like before, the last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/bert-finetuned-wikitext2"` or `"huggingface/bert-finetuned-wikitext2"`). Finally, we use a special `data_collator`. The `data_collator` is a function that is responsible of taking the samples and batching them in tensors. In the previous example, we had nothing special to do, so we just used the default for this argument. Here we want to do the random-masking. We could do it as a pre-processing step (like the tokenization) but then the tokens would always be masked the same way at each epoch. By doing this step inside the `data_collator`, we ensure this random masking is done in a new way each time we go over the data.To do this masking for us, the library provides a `DataCollatorForLanguageModeling`. We can adjust the probability of the masking:<jupyter_code>from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)<jupyter_output><empty_output><jupyter_text>Then we just have to pass everything to `Trainer` and begin training:<jupyter_code>trainer = Trainer( model=model, args=training_args, train_dataset=lm_datasets["train"], eval_dataset=lm_datasets["validation"], data_collator=data_collator, ) trainer.train()<jupyter_output><empty_output><jupyter_text>Like before, we can evaluate our model on the validation set. The perplexity is much lower than for the CLM objective because for the MLM objective, we only have to make predictions for the masked tokens (which represent 15% of the total here) while having access to the rest of the tokens. It's thus an easier task for the model.<jupyter_code>eval_results = trainer.evaluate() print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")<jupyter_output><empty_output><jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output>
notebooks/examples/language_modeling.ipynb/0
{ "file_path": "notebooks/examples/language_modeling.ipynb", "repo_id": "notebooks", "token_count": 7093 }
141
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.<jupyter_code>#! pip install transformers datasets huggingface_hub<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your token.<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email:<jupyter_code># !apt install git-lfs # !git config --global user.email "[email protected]" # !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.16.0 since the functionality was introduced in that version:<jupyter_code>import transformers print(transformers.__version__)<jupyter_output>4.21.0.dev0<jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("question_answering_notebook", framework="tensorflow")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a question-answering task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a question answering task, which is the task of extracting the answer to a question from a given context. We will see how to easily load a dataset for these kinds of tasks and use Keras to fine-tune a model on it. Note that this model **does not generate new text!** Instead, it selects a span of the input passage as the answer. This notebook is built to run on any question answering task with the same format as SQUAD (version 1 or 2), with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a token classification head and a fast tokenizer (check on [this table](https://huggingface.co/transformers/index.htmlbigtable) if this is the case). It might, however, need some small adjustments if you decide to use a different dataset than the one used here. Depending on your model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those three parameters, then the rest of the notebook should run smoothly:<jupyter_code># This flag is the difference between SQUAD v1 or 2 (if you're using another dataset, it indicates if impossible # answers are allowed or not). squad_v2 = False model_checkpoint = "distilbert-base-uncased" batch_size = 16<jupyter_output><empty_output><jupyter_text>Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.<jupyter_code>from datasets import load_dataset, load_metric<jupyter_output><empty_output><jupyter_text>For our example here, we'll use the [SQUAD dataset](https://rajpurkar.github.io/SQuAD-explorer/). The notebook should work with any question answering dataset in the 🤗 Datasets library. If you're using your own dataset in a JSON or CSV file (see the [Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets.htmlfrom-local-files) on how to load them), it might need some adjustments to the column names.<jupyter_code>datasets = load_dataset("squad_v2" if squad_v2 else "squad")<jupyter_output>Reusing dataset squad (/home/matt/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453)<jupyter_text>The `datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set.<jupyter_code>datasets<jupyter_output><empty_output><jupyter_text>We can see the training, validation and test sets all have a column for the context, the question and the answers to those questions. To access an actual element, you need to select a split first, then give an index:<jupyter_code>datasets["train"][0]<jupyter_output><empty_output><jupyter_text>We can see the answers are indicated by their start position in the text (here at character 515) and their full text, which is a substring of the context as we mentioned above. To get a sense of what the data looks like, the following function will show some examples picked randomly from the dataset and decoded back to strings.<jupyter_code>from datasets import ClassLabel, Sequence import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len( dataset ), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset) - 1) while pick in picks: pick = random.randint(0, len(dataset) - 1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel): df[column] = df[column].transform( lambda x: [typ.feature.names[i] for i in x] ) display(HTML(df.to_html())) show_random_elements(datasets["train"])<jupyter_output><empty_output><jupyter_text>Preprocessing the training data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>The following assertion ensures that our tokenizer is a fast tokenizer (backed by Rust) from the 🤗 Tokenizers library. Those fast tokenizers are available for almost all models, and we will need some of the special features they have for our preprocessing.<jupyter_code>import transformers assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast)<jupyter_output><empty_output><jupyter_text>You can check which type of models have a fast tokenizer available and which don't in the [big table of models](https://huggingface.co/transformers/index.htmlbigtable). You can directly call this tokenizer on two sentences (one for the answer, one for the context):<jupyter_code>tokenizer("What is your name?", "My name is Sylvain.")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Now one specific thing for the preprocessing in question answering is how to deal with very long documents. We usually truncate them in other tasks, when they are longer than the model maximum sentence length, but here, removing part of the the context might result in losing the answer we are looking for. To deal with this, we will allow one (long) example in our dataset to give several input features, each of length shorter than the maximum length of the model (or the one we set as a hyper-parameter). Also, just in case the answer lies at the point we split a long context, we allow some overlap between the features we generate controlled by the hyper-parameter `doc_stride`:<jupyter_code>max_length = 384 # The maximum length of a feature (question and context) doc_stride = 128 # The allowed overlap between two part of the context when splitting is performed.<jupyter_output><empty_output><jupyter_text>Let's find one long example in our dataset:<jupyter_code>for i, example in enumerate(datasets["train"]): if len(tokenizer(example["question"], example["context"])["input_ids"]) > 384: break example = datasets["train"][i]<jupyter_output><empty_output><jupyter_text>Without any truncation, we get the following length for the input IDs:<jupyter_code>len(tokenizer(example["question"], example["context"])["input_ids"])<jupyter_output><empty_output><jupyter_text>Now, if we just truncate, we will lose information (and possibly the answer to our question):<jupyter_code>len( tokenizer( example["question"], example["context"], max_length=max_length, truncation="only_second", )["input_ids"] )<jupyter_output><empty_output><jupyter_text>Note that we never want to truncate the question, only the context, and so we use the `only_second` truncation method. Our tokenizer can automatically return a list of features capped by a certain maximum length, with the overlap we talked about above, we just have to tell it to do so with `return_overflowing_tokens=True` and by passing the stride:<jupyter_code>tokenized_example = tokenizer( example["question"], example["context"], max_length=max_length, truncation="only_second", return_overflowing_tokens=True, stride=doc_stride, )<jupyter_output><empty_output><jupyter_text>Now we don't have one list of `input_ids`, but several:<jupyter_code>[len(x) for x in tokenized_example["input_ids"]]<jupyter_output><empty_output><jupyter_text>And if we decode them, we can see the overlap:<jupyter_code>for x in tokenized_example["input_ids"][:2]: print(tokenizer.decode(x))<jupyter_output>[CLS] how many wins does the notre dame men's basketball team have? [SEP] the men's basketball team has over 1, 600 wins, one of only 12 schools who have reached that mark, and have appeared in 28 ncaa tournaments. former player austin carr holds the record for most points scored in a single game of the tournament with 61. although the team has never won the ncaa tournament, they were named by the helms athletic foundation as national champions twice. the team has orchestrated a number of upsets of number one ranked teams, the most notable of which was ending ucla's record 88 - game winning streak in 1974. the team has beaten an additional eight number - one teams, and those nine wins rank second, to ucla's 10, all - time in wins against the top team. the team plays in newly renovated purcell pavilion ( within the edmund p. joyce center ), which reopened for the beginning of the 2009 – 2010 season. the team is coached by mike brey, who, as of the 2014 – 15 season, his fifteenth at notr[...]<jupyter_text>It's going to take some work to properly label the answers here: we need to find in which of those features the answer actually is, and where exactly in that feature. The models we will use require the start and end positions of these answers in the tokens, so we will also need to to map parts of the original context to some tokens. Thankfully, the tokenizer we're using can help us with that by returning an `offset_mapping`:<jupyter_code>tokenized_example = tokenizer( example["question"], example["context"], max_length=max_length, truncation="only_second", return_overflowing_tokens=True, return_offsets_mapping=True, stride=doc_stride, ) print(tokenized_example["offset_mapping"][0][:100])<jupyter_output>[(0, 0), (0, 3), (4, 8), (9, 13), (14, 18), (19, 22), (23, 28), (29, 33), (34, 37), (37, 38), (38, 39), (40, 50), (51, 55), (56, 60), (60, 61), (0, 0), (0, 3), (4, 7), (7, 8), (8, 9), (10, 20), (21, 25), (26, 29), (30, 34), (35, 36), (36, 37), (37, 40), (41, 45), (45, 46), (47, 50), (51, 53), (54, 58), (59, 61), (62, 69), (70, 73), (74, 78), (79, 86), (87, 91), (92, 96), (96, 97), (98, 101), (102, 106), (107, 115), (116, 118), (119, 121), (122, 126), (127, 138), (138, 139), (140, 146), (147, 153), (154, 160), (161, 165), (166, 171), (172, 175), (176, 182), (183, 186), (187, 191), (192, 198), (199, 205), (206, 208), (209, 210), (211, 217), (218, 222), (223, 225), (226, 229), (230, 240), (241, 245), (246, 248), (248, 249), (250, 258), (259, 262), (263, 267), (268, 271), (272, 277), (278, 281), (282, 285), (286, 290), (291, 301), (301, 302), (303, 307), (308, 312), (313, 318), (319, 321), (322, 325), (326, 330), (330, 331), (332, 340), (341, 351), (352, 354), (355, 363), (364, 373), (374,[...]<jupyter_text>This gives the corresponding start and end character in the original text for each token in our input IDs. The very first token (`[CLS]`) has (0, 0) because it doesn't correspond to any part of the question/answer, then the second token is the same as the characters 0 to 3 of the question:<jupyter_code>first_token_id = tokenized_example["input_ids"][0][1] offsets = tokenized_example["offset_mapping"][0][1] print( tokenizer.convert_ids_to_tokens([first_token_id])[0], example["question"][offsets[0] : offsets[1]], )<jupyter_output>how How<jupyter_text>So we can use this mapping to find the position of the start and end tokens of our answer in a given feature. We just have to distinguish which parts of the offsets correspond to the question and which part correspond to the context, this is where the `sequence_ids` method of our `tokenized_example` can be useful:<jupyter_code>sequence_ids = tokenized_example.sequence_ids() print(sequence_ids)<jupyter_output>[None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, None, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, [...]<jupyter_text>It returns `None` for the special tokens, then 0 or 1 depending on whether the corresponding token comes from the first sentence past (the question) or the second (the context). Now with all of this, we can find the first and last token of the answer in one of our input feature (or if the answer is not in this feature):<jupyter_code>answers = example["answers"] start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != 1: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(tokenized_example["input_ids"][0]) - 1 while sequence_ids[token_end_index] != 1: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). offsets = tokenized_example["offset_mapping"][0] if ( offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char ): # Move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while ( token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char ): token_start_index += 1 start_position = token_start_index - 1 while offsets[token_end_index][1] >= end_char: token_end_index -= 1 end_position = token_end_index + 1 print(start_position, end_position) else: print("The answer is not in this feature.")<jupyter_output>23 26<jupyter_text>And we can double check that it is indeed the correct answer:<jupyter_code>print( tokenizer.decode( tokenized_example["input_ids"][0][start_position : end_position + 1] ) ) print(answers["text"][0])<jupyter_output>over 1, 600 over 1,600<jupyter_text>For this notebook to work with any kind of model, we need to account for the special case where the model expects padding on the left (in which case we switch the order of the question and the context):<jupyter_code>pad_on_right = tokenizer.padding_side == "right"<jupyter_output><empty_output><jupyter_text>Now let's put everything together in one function we will apply to our training set. In the case of impossible answers (the answer is in another feature given by an example with a long context), we set the cls index for both the start and end position. We could also simply discard those examples from the training set if the flag `allow_impossible_answers` is `False`. Since the preprocessing is already complex enough as it is, we've kept is simple for this part.<jupyter_code>def prepare_train_features(examples): # Tokenize our examples with truncation and padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples["question" if pad_on_right else "context"], examples["context" if pad_on_right else "question"], truncation="only_second" if pad_on_right else "only_first", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] cls_index = input_ids.index(tokenizer.cls_token_id) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples["answers"][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != (1 if pad_on_right else 0): token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != (1 if pad_on_right else 0): token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not ( offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char ): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while ( token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char ): token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) return tokenized_examples<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>features = prepare_train_features(datasets["train"][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of the `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command. Since our preprocessing changes the number of samples, we need to remove the old columns when applying it.<jupyter_code>tokenized_datasets = datasets.map( prepare_train_features, batched=True, remove_columns=datasets["train"].column_names )<jupyter_output>Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-ad89cfc588b4b5ad.arrow Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-123d7bb970edffa2.arrow<jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready for training, we can download the pretrained model and fine-tune it. Since our task is question answering, we use the `TFAutoModelForQuestionAnswering` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us:<jupyter_code>from transformers import TFAutoModelForQuestionAnswering model = TFAutoModelForQuestionAnswering.from_pretrained(model_checkpoint)<jupyter_output>2022-07-21 15:10:11.409257: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:975] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2022-07-21 15:10:11.415291: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:975] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2022-07-21 15:10:11.415996: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:975] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2022-07-21 15:10:11.417100: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags[...]<jupyter_text>The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. To train our model, we will need to define a few more things. The first two arguments are to setup everything so we can push the model to the [Hub](https://huggingface.co/models) at the end of training. Remove the two of them if you didn't follow the installation steps at the top of the notebook, otherwise you can change the value of `push_to_hub_model_id` to something you would prefer.We also tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay.<jupyter_code>model_name = model_checkpoint.split("/")[-1] push_to_hub_model_id = f"{model_name}-finetuned-squad" learning_rate = 2e-5 num_train_epochs = 2 weight_decay = 0.01<jupyter_output><empty_output><jupyter_text>Next, we convert our datasets to `tf.data.Dataset`, which Keras understands natively. There are two ways to do this - we can use the slightly more low-level [`Dataset.to_tf_dataset()`](https://huggingface.co/docs/datasets/package_reference/main_classesdatasets.Dataset.to_tf_dataset) method, or we can use [`Model.prepare_tf_dataset()`](https://huggingface.co/docs/transformers/main_classes/modeltransformers.TFPreTrainedModel.prepare_tf_dataset). The main difference between these two is that the `Model` method can inspect the model to determine which column names it can use as input, which means you don't need to specify them yourself. It also supplies a default data collator that will work fine for us, as our samples are already padded to the same length and ready to go.<jupyter_code>train_set = model.prepare_tf_dataset( tokenized_datasets["train"], shuffle=True, batch_size=batch_size, ) validation_set = model.prepare_tf_dataset( tokenized_datasets["validation"], shuffle=False, batch_size=batch_size, )<jupyter_output><empty_output><jupyter_text>Next, we can create an optimizer and specify a loss function. The `create_optimizer` function gives us a very solid `AdamW` optimizer with weight decay and a learning rate schedule, but it needs us to compute the number of training steps to build that schedule.<jupyter_code>from transformers import create_optimizer total_train_steps = len(train_set) * num_train_epochs optimizer, schedule = create_optimizer( init_lr=learning_rate, num_warmup_steps=0, num_train_steps=total_train_steps )<jupyter_output><empty_output><jupyter_text>Note that most Transformers models compute loss internally, so we actually don't have to specify anything there! You can of course set your own loss function if you want, but by default our models will choose the 'obvious' loss that matches their task, such as cross-entropy in the case of language modelling. The built-in loss will also correctly handle things like masking the loss on padding tokens, or unlabelled tokens in the case of masked language modelling, so we recommend using it unless you're an advanced user!In addition, because the outputs and loss for this model class are quite straightforward, we can use built-in Keras metrics - these are liable to misbehave in other contexts (for example, they don't know about the masking in masked language modelling) but work well here.We can also use `jit_compile` to compile the model with [XLA](https://www.tensorflow.org/xla). In other cases, we should be careful about that - if our inputs might have variable sequence lengths, we may end up having to do a new XLA compilation for each possible length, because XLA compilation expects a static input shape! In this notebook, however, we have padded all examples to exactly the same length. This makes it perfect for XLA, which will give us a nice performance boost.<jupyter_code>import tensorflow as tf model.compile(optimizer=optimizer, jit_compile=True, metrics=["accuracy"])<jupyter_output>No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.<jupyter_text>We will evaluate our model and compute metrics in the next section (this is a very long operation, so we will only compute the evaluation loss during training). For now, let's just train our model. We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! If you don't want to do this, simply remove the callbacks argument in the call to `fit()`.<jupyter_code>from transformers.keras_callbacks import PushToHubCallback from tensorflow.keras.callbacks import TensorBoard push_to_hub_callback = PushToHubCallback( output_dir="./qa_model_save", tokenizer=tokenizer, hub_model_id=push_to_hub_model_id, ) tensorboard_callback = TensorBoard(log_dir="./qa_model_save/logs") callbacks = [tensorboard_callback, push_to_hub_callback] model.fit( train_set, validation_data=validation_set, epochs=num_train_epochs, callbacks=callbacks, )<jupyter_output>/home/matt/PycharmProjects/notebooks/examples/qa_model_save is already a clone of https://huggingface.co/Rocketknight1/distilbert-base-uncased-finetuned-squad. Make sure you pull the latest changes with `repo.git_pull()`.<jupyter_text>Evaluation Evaluating our model will require a bit more work, as we will need to map the predictions of our model back to parts of the context. The model itself predicts logits for the start and end position of our answers: if we take a batch from our validation dataset, here is the output our model gives us:<jupyter_code>batch = next(iter(validation_set)) output = model.predict_on_batch(batch) output.keys()<jupyter_output><empty_output><jupyter_text>The output of the model is a dict-like object that contains the loss (since we provided labels), the start and end logits. We won't need the loss for our predictions, let's have a look a the logits:<jupyter_code>output.start_logits.shape, output.end_logits.shape<jupyter_output><empty_output><jupyter_text>We have one logit for each feature and each token. The most obvious thing to predict an answer for each feature is to take the index for the maximum of the start logits as a start position and the index of the maximum of the end logits as an end position.<jupyter_code>import numpy as np np.argmax(output.start_logits, -1), np.argmax(output.end_logits, -1)<jupyter_output><empty_output><jupyter_text>This will work great in a lot of cases, but what if this prediction gives us something impossible: the start position could be greater than the end position, or point to a span of text in the question instead of the answer. In that case, we might want to look at the second best prediction to see if it gives a possible answer and select that instead.However, picking the second best answer is not as easy as picking the best one: is it the second best index in the start logits with the best index in the end logits? Or the best index in the start logits with the second best index in the end logits? And if that second best answer is not possible either, it gets even trickier for the third best answer.To classify our answers, we will use the score obtained by adding the start and end logits. We won't try to order all the possible answers and limit ourselves to with a hyper-parameter we call `n_best_size`. We'll pick the best indices in the start and end logits and gather all the answers this predicts. After checking if each one is valid, we will sort them by their score and keep the best one. Here is how we would do this on the first feature in the batch:<jupyter_code>n_best_size = 20 import numpy as np start_logits = output.start_logits[0] end_logits = output.end_logits[0] # Gather the indices the best start/end logits: start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() valid_answers = [] for start_index in start_indexes: for end_index in end_indexes: if ( start_index <= end_index ): # We need to refine that test to check the answer is inside the context valid_answers.append( { "score": start_logits[start_index] + end_logits[end_index], "text": "", # We need to find a way to get back the original substring corresponding to the answer in the context } )<jupyter_output><empty_output><jupyter_text>And then we can sort the `valid_answers` according to their `score` and only keep the best one. The only point left is how to check a given span is inside the context (and not the question) and how to get back the text inside. To do this, we need to add two things to our validation features:- the ID of the example that generated the feature (since each example can generate several features, as seen before);- the offset mapping that will give us a map from token indices to character positions in the context.That's why we will re-process the validation set with the following function, slightly different from `prepare_train_features`:<jupyter_code>def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples["question" if pad_on_right else "context"], examples["context" if pad_on_right else "question"], truncation="only_second" if pad_on_right else "only_first", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # We keep the example_id that gave us this feature and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples<jupyter_output><empty_output><jupyter_text>And like before, we can apply that function to our validation set easily:<jupyter_code>validation_features = datasets["validation"].map( prepare_validation_features, batched=True, remove_columns=datasets["validation"].column_names, )<jupyter_output>Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-fb6eddd5466a5d8b.arrow<jupyter_text>And turn the dataset into a `tf.data.Dataset` as before.<jupyter_code>validation_dataset = model.prepare_tf_dataset( validation_features, shuffle=False, batch_size=batch_size, )<jupyter_output><empty_output><jupyter_text>Now we can grab the predictions for all features by using the `model.predict` method:<jupyter_code>raw_predictions = model.predict(validation_dataset) raw_predictions<jupyter_output><empty_output><jupyter_text>We can now refine the test we had before: since we set `None` in the offset mappings when it corresponds to a part of the question, it's easy to check if an answer is fully inside the context. We also eliminate very long answers from our considerations (with an hyper-parameter we can tune)<jupyter_code>max_answer_length = 30 start_logits = output.start_logits[0] end_logits = output.end_logits[0] offset_mapping = validation_features[0]["offset_mapping"] # The first feature comes from the first example. For the more general case, we will need to be match the example_id to # an example index context = datasets["validation"][0]["context"] # Gather the indices the best start/end logits: start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() valid_answers = [] for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue if ( start_index <= end_index ): # We need to refine that test to check the answer is inside the context start_char = offset_mapping[start_index][0] end_char = offset_mapping[end_index][1] valid_answers.append( { "score": start_logits[start_index] + end_logits[end_index], "text": context[start_char:end_char], } ) valid_answers = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[ :n_best_size ] valid_answers<jupyter_output><empty_output><jupyter_text>We can compare to the actual ground-truth answer:<jupyter_code>datasets["validation"][0]["answers"]<jupyter_output><empty_output><jupyter_text>Our model's most likely answer is correct!As we mentioned in the code above, this was easy on the first feature because we knew it comes from the first example. For the other features, we will need a map between examples and their corresponding features. Also, since one example can give several features, we will need to gather together all the answers in all the features generated by a given example, then pick the best one. The following code builds a map from example index to its corresponding features indices:<jupyter_code>import collections examples = datasets["validation"] features = validation_features example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i)<jupyter_output><empty_output><jupyter_text>We're almost ready for our post-processing function. The last bit to deal with is the impossible answer (when `squad_v2 = True`). The code above only keeps answers that are inside the context, we need to also grab the score for the impossible answer (which has start and end indices corresponding to the index of the CLS token). When one example gives several features, we have to predict the impossible answer when all the features give a high score to the impossible answer (since one feature could predict the impossible answer just because the answer isn't in the part of the context it has access too), which is why the score of the impossible answer for one example is the *minimum* of the scores for the impossible answer in each feature generated by the example.We then predict the impossible answer when that score is greater than the score of the best non-impossible answer. All combined together, this gives us this post-processing function:<jupyter_code>from tqdm.auto import tqdm def postprocess_qa_predictions( examples, features, all_start_logits, all_end_logits, n_best_size=20, max_answer_length=30, ): # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i) # The dictionaries we have to fill. predictions = collections.OrderedDict() # Logging. print( f"Post-processing {len(examples)} example predictions split into {len(features)} features." ) # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_score = None # Only used if squad_v2 is True. valid_answers = [] context = example["context"] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_logits = all_start_logits[feature_index] end_logits = all_end_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index]["offset_mapping"] # Update minimum null prediction. cls_index = features[feature_index]["input_ids"].index( tokenizer.cls_token_id ) feature_null_score = start_logits[cls_index] + end_logits[cls_index] if min_null_score is None or min_null_score < feature_null_score: min_null_score = feature_null_score # Go through all possibilities for the `n_best_size` greater start and end logits. start_indexes = np.argsort(start_logits)[ -1 : -n_best_size - 1 : -1 ].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or not offset_mapping[start_index] or not offset_mapping[end_index] ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if ( end_index < start_index or end_index - start_index + 1 > max_answer_length ): continue start_char = offset_mapping[start_index][0] end_char = offset_mapping[end_index][1] valid_answers.append( { "score": start_logits[start_index] + end_logits[end_index], "text": context[start_char:end_char], } ) if len(valid_answers) > 0: best_answer = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[ 0 ] else: # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. best_answer = {"text": "", "score": 0.0} # Let's pick our final answer: the best one or the null answer (only for squad_v2) if not squad_v2: predictions[example["id"]] = best_answer["text"] else: answer = ( best_answer["text"] if best_answer["score"] > min_null_score else "" ) predictions[example["id"]] = answer return predictions<jupyter_output><empty_output><jupyter_text>And we can apply our post-processing function to our raw predictions:<jupyter_code>final_predictions = postprocess_qa_predictions( datasets["validation"], validation_features, raw_predictions["start_logits"], raw_predictions["end_logits"], )<jupyter_output>Post-processing 10570 example predictions split into 10784 features.<jupyter_text>Then we can load the metric from the datasets library.<jupyter_code>metric = load_metric("squad_v2" if squad_v2 else "squad")<jupyter_output><empty_output><jupyter_text>Then we can call compute on it. We just need to format predictions and labels a bit as it expects a list of dictionaries and not one big dictionary. In the case of squad_v2, we also have to set a `no_answer_probability` argument (which we set to 0.0 here as we have already set the answer to empty if we picked it).<jupyter_code>if squad_v2: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in final_predictions.items() ] else: formatted_predictions = [ {"id": k, "prediction_text": v} for k, v in final_predictions.items() ] references = [ {"id": ex["id"], "answers": ex["answers"]} for ex in datasets["validation"] ] metric.compute(predictions=formatted_predictions, references=references)<jupyter_output><empty_output><jupyter_text>If you ran the callback above, you can now share this model with all your friends, family or favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:```pythonfrom transformers import TFAutoModelForQuestionAnsweringmodel = TFAutoModelForQuestionAnswering.from_pretrained("your-username/my-awesome-model")``` Inference Now we've trained our model, let's see how we could load it and use it to answer questions in future! First, let's load it from the hub. This means we can resume the code from here without needing to rerun everything above every time.<jupyter_code>from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering # You can, of course, use your own username and model name here # once you've pushed your model using the code above! checkpoint = "Rocketknight1/distilbert-base-uncased-finetuned-squad" model = TFAutoModelForQuestionAnswering.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint)<jupyter_output><empty_output><jupyter_text>Now, let's get some sample text and ask a question. Feel free to substitute your own text and question!<jupyter_code>context = """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train.""" question = "What kind of mechanisms is Transformer based on?" inputs = tokenizer([context], [question], return_tensors="np") outputs = model(inputs)<jupyter_output><empty_output><jupyter_text>The outputs are logits, so let's use argmax to find the largest logit, which represents the model's best guess for the right answer.<jupyter_code>start_position = np.argmax(outputs.start_logits[0]) end_position = np.argmax(outputs.end_logits[0]) print(start_position) print(end_position) # Extract this substring from the inputs answer = inputs["input_ids"][0, start_position: end_position + 1] print(answer)<jupyter_output>64 65 [ 3086 10595]<jupyter_text>Well, these are definitely tokens. Let's decode them back to text:<jupyter_code>tokenizer.decode(answer)<jupyter_output><empty_output><jupyter_text>Pipeline API An alternative way to quickly perform inference with any model on the hub is to use the [Pipeline API](https://huggingface.co/docs/transformers/main_classes/pipelines), which abstracts away all the steps we did manually above. It will perform the preprocessing, forward pass and postprocessing all in a single object.Let's showcase this for our trained model:<jupyter_code>from transformers import pipeline question_answerer = pipeline("question-answering", "Rocketknight1/distilbert-base-uncased-finetuned-squad", framework="tf") question_answerer(context=context, question=question)<jupyter_output><empty_output>
notebooks/examples/question_answering-tf.ipynb/0
{ "file_path": "notebooks/examples/question_answering-tf.ipynb", "repo_id": "notebooks", "token_count": 17339 }
142
<jupyter_start><jupyter_text>Probabilistic Time Series Forecasting with 🤗 Transformers IntroductionTime series forecasting is an essential scientific and business problem and as such has also seen a lot of innovation recently with the use of [deep learning based](https://dl.acm.org/doi/abs/10.1145/3533382) models in addition to the [classical methods](https://otexts.com/fpp3/). An important difference between classical methods like ARIMA and novel deep learning methods is the following. Probabilistic ForecastingTypically, classical methods are fitted on each time series in a dataset individually. These are often referred to as "single" or "local" methods. However, when dealing with a large amount of time series for some applications, it is beneficial to train a "global" model on all available time series, which enables the model to learn latent representations from many different sources.Some classical methods are point-valued (meaning, they just output a single value per time step) and models are trained by minimizing an L2 or L1 type of loss with respect to the ground truth data. However, since forecasts are often used in some real-world decision making pipeline, even with humans in the loop, it is much more beneficial to provide the uncertainties of predictions. This is also called "probabilistic forecasting", as opposed to "point forecasting". This entails modeling a probabilistic distribution, from which one can sample.So in short, rather than training local point forecasting models, we hope to train **global probabilistic** models. Deep learning is a great fit for this, as neural networks can learn representations from several related time series as well as model the uncertainty of the data.It is common in the probabilistic setting to learn the future parameters of some chosen parametric distribution, like Gaussian or Student-T; or learn the conditional quantile function; or use the framework of Conformal Prediction adapted to the time series setting. The choice of method does not affect the modeling aspect and thus can be typically thought of as yet another hyperparameter. One can always turn a probabilistic model into a point-forecasting model, by taking empirical means or medians. The Time Series TransformerIn terms of modeling time series data which are sequential in nature, as one can imagine, researchers have come up with models which use Recurrent Neural Networks (RNN) like LSTM or GRU, or Convolutional Networks (CNN), and more recently Transformer based methods which fit naturally to the time series forecasting setting.In this blog post, we're going to leverage the vanilla Transformer [(Vaswani et al., 2017)](https://arxiv.org/abs/1706.03762) for the **univariate** probabilistic forecasting task (i.e. predicting each time series' 1-d distribution individually). The Encoder-Decoder Transformer is a natural choice for forecasting as it encapsulates several inductive biases nicely.To begin with, the use of an Encoder-Decoder architecture is helpful at inference time where typically for some logged data we wish to forecast some prediction steps into the future. This can be thought of as analogous to the text generation task where given some context, we sample the next token and pass it back into the decoder (also called "autoregressive generation"). Similarly here we can also, given some distribution type, sample from it to provide forecasts up until our desired prediction horizon. This is known as Greedy Sampling/Search and there is a great blog post about it [here](https://huggingface.co/blog/how-to-generate) for the NLP setting.Secondly, a Transformer helps us to train on time series data which might contain thousands of time points. It might not be feasible to input *all* the history of a time series at once to the model, due to the time- and memory constraints of the attention mechanism. Thus, one can consider some appropriate context window and sample this window and the subsequent prediction length sized window from the training data when constructing batches for stochastic gradient descent (SGD). The context sized window can be passed to the encoder and the prediction window to a *causal-masked* decoder. This means that the decoder can only look at previous time steps when learning the next value. This is equivalent to how one would train a vanilla Transformer for machine translation, referred to as "teacher forcing".Another benefit of Transformers over the other architectures is that we can incorporate missing values (which are common in the time series setting) as an additional mask to the encoder or decoder and still train without resorting to in-filling or imputation. This is equivalent to the `attention_mask` of models like BERT and GPT-2 in the Transformers library, to not include padding tokens in the computation of the attention matrix.A drawback of the Transformer architecture is the limit to the sizes of the context and prediction windows because of the quadratic compute and memory requirements of the vanilla Transformer, see [Tay et al., 2020](https://arxiv.org/abs/2009.06732). Additionally, since the Transformer is a powerful architecture, it might overfit or learn spurious correlations much more easily compared to other [methods](https://openreview.net/pdf?id=D7YBmfX_VQy).The 🤗 Transformers library comes with a vanilla probabilistic time series Transformer model, simply called the [Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer). In the sections below, we'll show how to train such a model on a custom dataset. Set-up EnvironmentFirst, let's install the necessary libraries: 🤗 Transformers, 🤗 Datasets, 🤗 Evaluate, 🤗 Accelerate and [GluonTS](https://github.com/awslabs/gluonts).As we will show, GluonTS will be used for transforming the data to create features as well as for creating appropriate training, validation and test batches.<jupyter_code>!pip install -q transformers !pip install -q datasets !pip install -q evaluate !pip install -q accelerate !pip install -q gluonts ujson<jupyter_output> |████████████████████████████████| 1.0 MB 29.8 MB/s  |████████████████████████████████| 52 kB 1.5 MB/s [?25h<jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("time_series_transformers_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Load DatasetIn this blog post, we'll use the `tourism_monthly` dataset, which is available on the [Hugging Face Hub](https://huggingface.co/datasets/monash_tsf). This dataset contains monthly tourism volumes for 366 regions in Australia.This dataset is part of the [Monash Time Series Forecasting](https://forecastingdata.org/) repository, a collection of time series datasets from a number of domains. It can be viewed as the GLUE benchmark of time series forecasting.<jupyter_code>from datasets import load_dataset dataset = load_dataset("monash_tsf", "tourism_monthly")<jupyter_output><empty_output><jupyter_text>As can be seen, the dataset contains 3 splits: train, validation and test.<jupyter_code>dataset<jupyter_output><empty_output><jupyter_text>Each example contains a few keys, of which `start` and `target` are the most important ones. Let us have a look at the first time series in the dataset:<jupyter_code>train_example = dataset["train"][0] train_example.keys()<jupyter_output><empty_output><jupyter_text>The `start` simply indicates the start of the time series (as a datetime), and the `target` contains the actual values of the time series.The `start` will be useful to add time related features to the time series values, as extra input to the model (such as "month of year"). Since we know the frequency of the data is `monthly`, we know for instance that the second value has the timestamp `1979-02-01`, etc.<jupyter_code>print(train_example["start"]) print(train_example["target"])<jupyter_output>1979-01-01 00:00:00 [1149.8699951171875, 1053.8001708984375, 1388.8797607421875, 1783.3702392578125, 1921.025146484375, 2704.94482421875, 4184.41357421875, 4148.35400390625, 2620.72509765625, 1650.300048828125, 1115.9200439453125, 1370.6251220703125, 1096.31494140625, 978.4600219726562, 1294.68505859375, 1480.465087890625, 1748.865234375, 2216.920166015625, 4690.5185546875, 4682.8642578125, 2459.579833984375, 1484.4901123046875, 1028.985107421875, 1109.3648681640625, 960.8751220703125, 896.35009765625, 1118.6551513671875, 1619.9949951171875, 1847.994873046875, 2367.044921875, 4991.16015625, 4772.9443359375, 2894.678466796875, 1860.4801025390625, 1185.150146484375, 1313.659912109375, 1160.9150390625, 1061.5048828125, 1301.77001953125, 1794.3797607421875, 2106.455078125, 2789.034912109375, 4917.8466796875, 4994.4833984375, 3016.754150390625, 1941.505126953125, 1234.135009765625, 1378.72021484375, 1182.9749755859375, 1081.6600341796875, 1424.110107421875, 1774.5350341796875, 2115.42016601[...]<jupyter_text>The validation set contains the same data as the training set, just for a `prediction_length` longer amount of time. This allows us to validate the model's predictions against the ground truth.The test set is again one `prediction_length` longer data compared to the validation set (or some multiple of `prediction_length` longer data compared to the training set for testing on multiple rolling windows).<jupyter_code>validation_example = dataset["validation"][0] validation_example.keys()<jupyter_output><empty_output><jupyter_text>The initial values are exactly the same as the corresponding training example:<jupyter_code>print(validation_example["start"]) print(validation_example["target"])<jupyter_output>1979-01-01 00:00:00 [1149.8699951171875, 1053.8001708984375, 1388.8797607421875, 1783.3702392578125, 1921.025146484375, 2704.94482421875, 4184.41357421875, 4148.35400390625, 2620.72509765625, 1650.300048828125, 1115.9200439453125, 1370.6251220703125, 1096.31494140625, 978.4600219726562, 1294.68505859375, 1480.465087890625, 1748.865234375, 2216.920166015625, 4690.5185546875, 4682.8642578125, 2459.579833984375, 1484.4901123046875, 1028.985107421875, 1109.3648681640625, 960.8751220703125, 896.35009765625, 1118.6551513671875, 1619.9949951171875, 1847.994873046875, 2367.044921875, 4991.16015625, 4772.9443359375, 2894.678466796875, 1860.4801025390625, 1185.150146484375, 1313.659912109375, 1160.9150390625, 1061.5048828125, 1301.77001953125, 1794.3797607421875, 2106.455078125, 2789.034912109375, 4917.8466796875, 4994.4833984375, 3016.754150390625, 1941.505126953125, 1234.135009765625, 1378.72021484375, 1182.9749755859375, 1081.6600341796875, 1424.110107421875, 1774.5350341796875, 2115.42016601[...]<jupyter_text>However, this example has `prediction_length=24` additional values compared to the training example. Let us verify it.<jupyter_code>freq = "1M" prediction_length = 24 assert len(train_example["target"]) + prediction_length == len( validation_example["target"] )<jupyter_output><empty_output><jupyter_text>Let's visualize this:<jupyter_code>import matplotlib.pyplot as plt figure, axes = plt.subplots() axes.plot(train_example["target"], color="blue") axes.plot(validation_example["target"], color="red", alpha=0.5) plt.show()<jupyter_output><empty_output><jupyter_text>Let's split up the data:<jupyter_code>train_dataset = dataset["train"] test_dataset = dataset["test"]<jupyter_output><empty_output><jupyter_text>Update `start` to `pd.Period`The first thing we'll do is convert the `start` feature of each time series to a pandas `Period` index using the data's `freq`:<jupyter_code>from functools import lru_cache import pandas as pd import numpy as np @lru_cache(10_000) def convert_to_pandas_period(date, freq): return pd.Period(date, freq) def transform_start_field(batch, freq): batch["start"] = [convert_to_pandas_period(date, freq) for date in batch["start"]] return batch<jupyter_output><empty_output><jupyter_text>We now use `datasets`' [`set_transform`](https://huggingface.co/docs/datasets/v2.7.0/en/package_reference/main_classesdatasets.Dataset.set_transform) functionality to do this on-the-fly in place:<jupyter_code>from functools import partial train_dataset.set_transform(partial(transform_start_field, freq=freq)) test_dataset.set_transform(partial(transform_start_field, freq=freq))<jupyter_output><empty_output><jupyter_text>Define the modelNext, let's instantiate a model. The model will be trained from scratch, hence we won't use the `from_pretrained` method here, but rather randomly initialize the model from a [`config`](https://huggingface.co/docs/transformers/model_doc/time_series_transformertransformers.TimeSeriesTransformerConfig).We specify a couple of additional parameters to the model:- `prediction_length` (in our case, `24` months): this is the horizon that the decoder of the Transformer will learn to predict for;- `context_length`: the model will set the `context_length` (input of the encoder) equal to the `prediction_length`, if no `context_length` is specified;- `lags` for a given frequency: these specify how much we "look back", to be added as additional features. e.g. for a `Daily` frequency we might consider a look back of `[1, 2, 7, 30, ...]` or in other words look back 1, 2, ... days while for `Minute` data we might consider `[1, 30, 60, 60*24, ...]` etc.;- the number of time features: in our case, this will be `2` as we'll add `MonthOfYear` and `Age` features;- the number of static categorical features: in our case, this will be just `1` as we'll add a single "time series ID" feature;- the cardinality: the number of values of each static categorical feature, as a list which for our case will be `[366]` as we have 366 different time series- the embedding dimension: the embedding dimension for each static categorical feature, as a list, for example `[3]` meaning the model will learn an embedding vector of size `3` for each of the `366` time series (regions). Let's use the default lags provided by GluonTS for the given frequency ("monthly"):<jupyter_code>from gluonts.time_feature import get_lags_for_frequency lags_sequence = get_lags_for_frequency(freq) print(lags_sequence)<jupyter_output>[1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 23, 24, 25, 35, 36, 37]<jupyter_text>This means that we'll look back up to 37 months for each time step, as additional features.Let's also check the default time features which GluonTS provides us:<jupyter_code>from gluonts.time_feature import time_features_from_frequency_str time_features = time_features_from_frequency_str(freq) print(time_features)<jupyter_output>[<function month_of_year at 0x7f84840216c0>]<jupyter_text>In this case, there's only a single feature, namely "month of year". This means that for each time step, we'll add the month as a scalar value (e.g. `1` in case the timestamp is "january", `2` in case the timestamp is "february", etc.).We now have everything to define the model:<jupyter_code>from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerForPrediction config = TimeSeriesTransformerConfig( prediction_length=prediction_length, # context length: context_length=prediction_length * 2, # lags coming from helper given the freq: lags_sequence=lags_sequence, # we'll add 2 time features ("month of year" and "age", see further): num_time_features=len(time_features) + 1, # we have a single static categorical feature, namely time series ID: num_static_categorical_features=1, # it has 366 possible values: cardinality=[len(train_dataset)], # the model will learn an embedding of size 2 for each of the 366 possible values: embedding_dimension=[2], # transformer params: encoder_layers=4, decoder_layers=4, d_model=32, ) model = TimeSeriesTransformerForPrediction(config)<jupyter_output><empty_output><jupyter_text>Note that, similar to other models in the 🤗 Transformers library, [`TimeSeriesTransformerModel`](https://huggingface.co/docs/transformers/model_doc/time_series_transformertransformers.TimeSeriesTransformerModel) corresponds to the encoder-decoder Transformer without any head on top, and [`TimeSeriesTransformerForPrediction`](https://huggingface.co/docs/transformers/model_doc/time_series_transformertransformers.TimeSeriesTransformerForPrediction) corresponds to `TimeSeriesTransformerModel` with a **distribution head** on top. By default, the model uses a Student-t distribution (but this is configurable):<jupyter_code>model.config.distribution_output<jupyter_output><empty_output><jupyter_text>This is an important difference with Transformers for NLP, where the head typically consists of a fixed categorical distribution implemented as an `nn.Linear` layer. Define TransformationsNext, we define the transformations for the data, in particular for the creation of the time features (based on the dataset or universal ones).Again, we'll use the GluonTS library for this. We define a `Chain` of transformations (which is a bit comparable to `torchvision.transforms.Compose` for images). It allows us to combine several transformations into a single pipeline.<jupyter_code>from gluonts.time_feature import ( time_features_from_frequency_str, TimeFeature, get_lags_for_frequency, ) from gluonts.dataset.field_names import FieldName from gluonts.transform import ( AddAgeFeature, AddObservedValuesIndicator, AddTimeFeatures, AsNumpyArray, Chain, ExpectedNumInstanceSampler, InstanceSplitter, RemoveFields, SelectFields, SetField, TestSplitSampler, Transformation, ValidationSplitSampler, VstackFeatures, RenameFields, )<jupyter_output><empty_output><jupyter_text>The transformations below are annotated with comments, to explain what they do. At a high level, we will iterate over the individual time series of our dataset and add/remove fields or features:<jupyter_code>from transformers import PretrainedConfig def create_transformation(freq: str, config: PretrainedConfig) -> Transformation: remove_field_names = [] if config.num_static_real_features == 0: remove_field_names.append(FieldName.FEAT_STATIC_REAL) if config.num_dynamic_real_features == 0: remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL) if config.num_static_categorical_features == 0: remove_field_names.append(FieldName.FEAT_STATIC_CAT) # a bit like torchvision.transforms.Compose return Chain( # step 1: remove static/dynamic fields if not specified [RemoveFields(field_names=remove_field_names)] # step 2: convert the data to NumPy (potentially not needed) + ( [ AsNumpyArray( field=FieldName.FEAT_STATIC_CAT, expected_ndim=1, dtype=int, ) ] if config.num_static_categorical_features > 0 else [] ) + ( [ AsNumpyArray( field=FieldName.FEAT_STATIC_REAL, expected_ndim=1, ) ] if config.num_static_real_features > 0 else [] ) + [ AsNumpyArray( field=FieldName.TARGET, # we expect an extra dim for the multivariate case: expected_ndim=1 if config.input_size == 1 else 2, ), # step 3: handle the NaN's by filling in the target with zero # and return the mask (which is in the observed values) # true for observed values, false for nan's # the decoder uses this mask (no loss is incurred for unobserved values) # see loss_weights inside the xxxForPrediction model AddObservedValuesIndicator( target_field=FieldName.TARGET, output_field=FieldName.OBSERVED_VALUES, ), # step 4: add temporal features based on freq of the dataset # month of year in the case when freq="M" # these serve as positional encodings AddTimeFeatures( start_field=FieldName.START, target_field=FieldName.TARGET, output_field=FieldName.FEAT_TIME, time_features=time_features_from_frequency_str(freq), pred_length=config.prediction_length, ), # step 5: add another temporal feature (just a single number) # tells the model where in the life the value of the time series is # sort of running counter AddAgeFeature( target_field=FieldName.TARGET, output_field=FieldName.FEAT_AGE, pred_length=config.prediction_length, log_scale=True, ), # step 6: vertically stack all the temporal features into the key FEAT_TIME VstackFeatures( output_field=FieldName.FEAT_TIME, input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE] + ( [FieldName.FEAT_DYNAMIC_REAL] if config.num_dynamic_real_features > 0 else [] ), ), # step 7: rename to match HuggingFace names RenameFields( mapping={ FieldName.FEAT_STATIC_CAT: "static_categorical_features", FieldName.FEAT_STATIC_REAL: "static_real_features", FieldName.FEAT_TIME: "time_features", FieldName.TARGET: "values", FieldName.OBSERVED_VALUES: "observed_mask", } ), ] )<jupyter_output><empty_output><jupyter_text>Define `InstanceSplitter`For training/validation/testing we next create an `InstanceSplitter` which is used to sample windows from the dataset (as, remember, we can't pass the entire history of values to the Transformer due to time- and memory constraints).The instance splitter samples random `context_length` sized and subsequent `prediction_length` sized windows from the data, and appends a `past_` or `future_` key to any temporal keys in `time_series_fields` for the respective windows. The instance splitter can be configured into three different modes:1. `mode="train"`: Here we sample the context and prediction length windows randomly from the dataset given to it (the training dataset)2. `mode="validation"`: Here we sample the very last context length window and prediction window from the dataset given to it (for the back-testing or validation likelihood calculations)3. `mode="test"`: Here we sample the very last context length window only (for the prediction use case)<jupyter_code>from gluonts.transform.sampler import InstanceSampler from typing import Optional def create_instance_splitter( config: PretrainedConfig, mode: str, train_sampler: Optional[InstanceSampler] = None, validation_sampler: Optional[InstanceSampler] = None, ) -> Transformation: assert mode in ["train", "validation", "test"] instance_sampler = { "train": train_sampler or ExpectedNumInstanceSampler( num_instances=1.0, min_future=config.prediction_length ), "validation": validation_sampler or ValidationSplitSampler(min_future=config.prediction_length), "test": TestSplitSampler(), }[mode] return InstanceSplitter( target_field="values", is_pad_field=FieldName.IS_PAD, start_field=FieldName.START, forecast_start_field=FieldName.FORECAST_START, instance_sampler=instance_sampler, past_length=config.context_length + max(config.lags_sequence), future_length=config.prediction_length, time_series_fields=["time_features", "observed_mask"], )<jupyter_output><empty_output><jupyter_text>Create DataLoadersNext, it's time to create the DataLoaders, which allow us to have batches of (input, output pairs) - or in other words (`past_values`, `future_values`).<jupyter_code>from typing import Iterable import torch from gluonts.itertools import Cyclic, Cached from gluonts.dataset.loader import as_stacked_batches def create_train_dataloader( config: PretrainedConfig, freq, data, batch_size: int, num_batches_per_epoch: int, shuffle_buffer_length: Optional[int] = None, cache_data: bool = True, **kwargs, ) -> Iterable: PREDICTION_INPUT_NAMES = [ "past_time_features", "past_values", "past_observed_mask", "future_time_features", ] if config.num_static_categorical_features > 0: PREDICTION_INPUT_NAMES.append("static_categorical_features") if config.num_static_real_features > 0: PREDICTION_INPUT_NAMES.append("static_real_features") TRAINING_INPUT_NAMES = PREDICTION_INPUT_NAMES + [ "future_values", "future_observed_mask", ] transformation = create_transformation(freq, config) transformed_data = transformation.apply(data, is_train=True) if cache_data: transformed_data = Cached(transformed_data) # we initialize a Training instance instance_splitter = create_instance_splitter(config, "train") # the instance splitter will sample a window of # context length + lags + prediction length (from the 366 possible transformed time series) # randomly from within the target time series and return an iterator. stream = Cyclic(transformed_data).stream() training_instances = instance_splitter.apply(stream) return as_stacked_batches( training_instances, batch_size=batch_size, shuffle_buffer_length=shuffle_buffer_length, field_names=TRAINING_INPUT_NAMES, output_type=torch.tensor, num_batches_per_epoch=num_batches_per_epoch, ) def create_backtest_dataloader( config: PretrainedConfig, freq, data, batch_size: int, **kwargs, ): PREDICTION_INPUT_NAMES = [ "past_time_features", "past_values", "past_observed_mask", "future_time_features", ] if config.num_static_categorical_features > 0: PREDICTION_INPUT_NAMES.append("static_categorical_features") if config.num_static_real_features > 0: PREDICTION_INPUT_NAMES.append("static_real_features") transformation = create_transformation(freq, config) transformed_data = transformation.apply(data) # We create a Validation Instance splitter which will sample the very last # context window seen during training only for the encoder. instance_sampler = create_instance_splitter(config, "validation") # we apply the transformations in train mode testing_instances = instance_sampler.apply(transformed_data, is_train=True) return as_stacked_batches( testing_instances, batch_size=batch_size, output_type=torch.tensor, field_names=PREDICTION_INPUT_NAMES, )<jupyter_output><empty_output><jupyter_text>We have a test dataloader helper for completion, even though we will not use it here. This is useful in a production setting where we want to start forecasting from the end of a given time series. Thus, the test dataloader will sample the very last context window from the dataset provided and pass it to the model.<jupyter_code>def create_test_dataloader( config: PretrainedConfig, freq, data, batch_size: int, **kwargs, ): PREDICTION_INPUT_NAMES = [ "past_time_features", "past_values", "past_observed_mask", "future_time_features", ] if config.num_static_categorical_features > 0: PREDICTION_INPUT_NAMES.append("static_categorical_features") if config.num_static_real_features > 0: PREDICTION_INPUT_NAMES.append("static_real_features") transformation = create_transformation(freq, config) transformed_data = transformation.apply(data, is_train=False) # We create a test Instance splitter to sample the very last # context window from the dataset provided. instance_sampler = create_instance_splitter(config, "test") # We apply the transformations in test mode testing_instances = instance_sampler.apply(transformed_data, is_train=False) return as_stacked_batches( testing_instances, batch_size=batch_size, output_type=torch.tensor, field_names=PREDICTION_INPUT_NAMES, ) train_dataloader = create_train_dataloader( config=config, freq=freq, data=train_dataset, batch_size=256, num_batches_per_epoch=100, ) test_dataloader = create_backtest_dataloader( config=config, freq=freq, data=test_dataset, batch_size=64, )<jupyter_output><empty_output><jupyter_text>Let's check the first batch:<jupyter_code>batch = next(iter(train_dataloader)) for k, v in batch.items(): print(k, v.shape, v.type())<jupyter_output>past_time_features torch.Size([256, 85, 2]) torch.FloatTensor past_values torch.Size([256, 85]) torch.FloatTensor past_observed_mask torch.Size([256, 85]) torch.FloatTensor future_time_features torch.Size([256, 24, 2]) torch.FloatTensor static_categorical_features torch.Size([256, 1]) torch.LongTensor future_values torch.Size([256, 24]) torch.FloatTensor future_observed_mask torch.Size([256, 24]) torch.FloatTensor<jupyter_text>As can be seen, we don't feed `input_ids` and `attention_mask` to the encoder (as would be the case for NLP models), but rather `past_values`, along with `past_observed_mask`, `past_time_features`, and `static_categorical_features`.The decoder inputs consist of `future_values`, `future_observed_mask` and `future_time_features`. The `future_values` can be seen as the equivalent of `decoder_input_ids` in NLP.We refer to the [docs](https://huggingface.co/docs/transformers/model_doc/time_series_transformertransformers.TimeSeriesTransformerForPrediction.forward.past_values) for a detailed explanation for each of them. Forward passLet's perform a single forward pass with the batch we just created:<jupyter_code># perform forward pass outputs = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"] if config.num_static_categorical_features > 0 else None, static_real_features=batch["static_real_features"] if config.num_static_real_features > 0 else None, future_values=batch["future_values"], future_time_features=batch["future_time_features"], future_observed_mask=batch["future_observed_mask"], output_hidden_states=True, ) print("Loss:", outputs.loss.item())<jupyter_output>Loss: 9.069628715515137<jupyter_text>Note that the model is returning a loss. This is possible as the decoder automatically shifts the `future_values` one position to the right in order to have the labels. This allows computing a loss between the predicted values and the labels.Also note that the decoder uses a causal mask to not look into the future as the values it needs to predict are in the `future_values` tensor. Train the ModelIt's time to train the model! We'll use a standard PyTorch training loop.We will use the 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) library here, which automatically places the model, optimizer and dataloader on the appropriate `device`.<jupyter_code>from accelerate import Accelerator from torch.optim import AdamW accelerator = Accelerator() device = accelerator.device model.to(device) optimizer = AdamW(model.parameters(), lr=6e-4, betas=(0.9, 0.95), weight_decay=1e-1) model, optimizer, train_dataloader = accelerator.prepare( model, optimizer, train_dataloader, ) model.train() for epoch in range(40): for idx, batch in enumerate(train_dataloader): optimizer.zero_grad() outputs = model( static_categorical_features=batch["static_categorical_features"].to(device) if config.num_static_categorical_features > 0 else None, static_real_features=batch["static_real_features"].to(device) if config.num_static_real_features > 0 else None, past_time_features=batch["past_time_features"].to(device), past_values=batch["past_values"].to(device), future_time_features=batch["future_time_features"].to(device), future_values=batch["future_values"].to(device), past_observed_mask=batch["past_observed_mask"].to(device), future_observed_mask=batch["future_observed_mask"].to(device), ) loss = outputs.loss # Backpropagation accelerator.backward(loss) optimizer.step() if idx % 100 == 0: print(loss.item())<jupyter_output>9.312426567077637 7.79284143447876 7.852108001708984 7.6523308753967285 7.4140448570251465 7.391452789306641 7.355312824249268 7.018772125244141 6.6947102546691895 6.884510040283203 6.586727142333984 6.800746917724609 6.795780181884766 7.579933166503906 7.15477180480957 6.703517436981201 7.250757694244385 7.39132833480835 7.598387241363525 7.2024149894714355 7.323209285736084 6.823130130767822 6.757688045501709 7.494504451751709 7.513833522796631 7.290976047515869 6.932094097137451 7.130832672119141 7.020802974700928 6.652693271636963 6.758007049560547 7.680879592895508 7.614417552947998 6.844751834869385 6.809683322906494 6.6291022300720215 7.306612491607666 6.697507381439209 7.026710510253906 6.921131134033203<jupyter_text>InferenceAt inference time, it's recommended to use the `generate()` method for autoregressive generation, similar to NLP models.Forecasting involves getting data from the test instance sampler, which will sample the very last `context_length` sized window of values from each time series in the dataset, and pass it to the model. Note that we pass `future_time_features`, which are known ahead of time, to the decoder.The model will autoregressively sample a certain number of values from the predicted distribution and pass them back to the decoder to return the prediction outputs:<jupyter_code>model.eval() forecasts = [] for batch in test_dataloader: outputs = model.generate( static_categorical_features=batch["static_categorical_features"].to(device) if config.num_static_categorical_features > 0 else None, static_real_features=batch["static_real_features"].to(device) if config.num_static_real_features > 0 else None, past_time_features=batch["past_time_features"].to(device), past_values=batch["past_values"].to(device), future_time_features=batch["future_time_features"].to(device), past_observed_mask=batch["past_observed_mask"].to(device), ) forecasts.append(outputs.sequences.cpu().numpy())<jupyter_output><empty_output><jupyter_text>The model outputs a tensor of shape (`batch_size`, `number of samples`, `prediction length`). In this case, we get `100` possible values for the next `24` months (for each example in the batch which is of size `64`):<jupyter_code>forecasts[0].shape<jupyter_output><empty_output><jupyter_text>We'll stack them vertically, to get forecasts for all time-series in the test dataset:<jupyter_code>forecasts = np.vstack(forecasts) print(forecasts.shape)<jupyter_output>(366, 100, 24)<jupyter_text>We can evaluate the resulting forecast with respect to the ground truth out of sample values present in the test set. For that, we'll use the 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) library, which includes the [MASE](https://huggingface.co/spaces/evaluate-metric/mase) and [sMAPE](https://huggingface.co/spaces/evaluate-metric/smape) metrics.We calculate both metrics for each time series in the dataset:<jupyter_code>from evaluate import load from gluonts.time_feature import get_seasonality mase_metric = load("evaluate-metric/mase") smape_metric = load("evaluate-metric/smape") forecast_median = np.median(forecasts, 1) mase_metrics = [] smape_metrics = [] for item_id, ts in enumerate(test_dataset): training_data = ts["target"][:-prediction_length] ground_truth = ts["target"][-prediction_length:] mase = mase_metric.compute( predictions=forecast_median[item_id], references=np.array(ground_truth), training=np.array(training_data), periodicity=get_seasonality(freq), ) mase_metrics.append(mase["mase"]) smape = smape_metric.compute( predictions=forecast_median[item_id], references=np.array(ground_truth), ) smape_metrics.append(smape["smape"]) print(f"MASE: {np.mean(mase_metrics)}") print(f"sMAPE: {np.mean(smape_metrics)}")<jupyter_output>sMAPE: 0.1609541520852549<jupyter_text>We can also plot the individual metrics of each time series in the dataset and observe that a handful of time series contribute a lot to the final test metric:<jupyter_code>plt.scatter(mase_metrics, smape_metrics, alpha=0.3) plt.xlabel("MASE") plt.ylabel("sMAPE") plt.show()<jupyter_output><empty_output><jupyter_text>To plot the prediction for any time series with respect the ground truth test data we define the following helper:<jupyter_code>import matplotlib.dates as mdates def plot(ts_index): fig, ax = plt.subplots() index = pd.period_range( start=test_dataset[ts_index][FieldName.START], periods=len(test_dataset[ts_index][FieldName.TARGET]), freq=freq, ).to_timestamp() # Major ticks every half year, minor ticks every month, ax.xaxis.set_major_locator(mdates.MonthLocator(bymonth=(1, 7))) ax.xaxis.set_minor_locator(mdates.MonthLocator()) ax.plot( index[-2 * prediction_length :], test_dataset[ts_index]["target"][-2 * prediction_length :], label="actual", ) plt.plot( index[-prediction_length:], np.median(forecasts[ts_index], axis=0), label="median", ) plt.fill_between( index[-prediction_length:], forecasts[ts_index].mean(0) - forecasts[ts_index].std(axis=0), forecasts[ts_index].mean(0) + forecasts[ts_index].std(axis=0), alpha=0.3, interpolate=True, label="+/- 1-std", ) plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>For example:<jupyter_code>plot(334)<jupyter_output><empty_output>
notebooks/examples/time-series-transformers.ipynb/0
{ "file_path": "notebooks/examples/time-series-transformers.ipynb", "repo_id": "notebooks", "token_count": 13676 }
143
<jupyter_start><jupyter_text>Huggingface Sagemaker-sdk - Run a batch transform inference job with 🤗 Transformers 1. [Introduction](Introduction) 2. [Run Batch Transform after training a model](Run-Batch-Transform-after-training-a-model) 3. [Run Batch Transform Inference Job with a fine-tuned model using `jsonl`](Run-Batch-Transform-Inference-Job-with-a-fine-tuned-model-using-jsonl) Welcome to this getting started guide, we will use the new Hugging Face Inference DLCs and Amazon SageMaker Python SDK to deploy two transformer model for inference. In the first example we deploy a trained Hugging Face Transformer model on to SageMaker for inference.In the second example we directly deploy one of the 10 000+ Hugging Face Transformers from the [Hub](https://huggingface.co/models) to Amazon SageMaker for Inference.< Run Batch Transform after training a model _not included in the notebook_After you train a model, you can use [Amazon SageMaker Batch Transform](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html) to perform inferences with the model. In Batch Transform you provide your inference data as a S3 uri and SageMaker will care of downloading it, running the prediction and uploading the results afterwards to S3 again. You can find more documentation for Batch Transform [here](https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html)If you trained the model using the **HuggingFace estimator**, you can invoke `transformer()` method to create a transform job for a model based on the training job.```pythonbatch_job = huggingface_estimator.transformer( instance_count=1, instance_type='ml.c5.2xlarge', strategy='SingleRecord')batch_job.transform( data='s3://s3-uri-to-batch-data', content_type='application/json', split_type='Line')```For more details about what can be specified here, see [API docs](https://sagemaker.readthedocs.io/en/stable/overview.htmlsagemaker-batch-transform).<jupyter_code>!pip install "sagemaker>=2.48.0" "datasets==1.11" --upgrade<jupyter_output><empty_output><jupyter_text>Run Batch Transform Inference Job with a fine-tuned model using `jsonl` Data Pre-ProcessingIn this example we are using the provided `tweet_data.csv` as dataset. The `csv` contains ~1800 tweets about different airlines. The `csv` contains 1 column `"inputs"` with the tweets. To use this `csv` we need to convert it into a `jsonl` file and upload it to s3. Due to the complex structure of text are only `jsonl` file supported for batch transform. As pre-processing we are removing the `@` in the beginning of the tweet to get the names/identities correct._**NOTE**: While preprocessing you need to make sure that your `inputs` fit the `max_length`.<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}") import csv import json from sagemaker.s3 import S3Uploader,s3_path_join # datset files dataset_csv_file="tweet_data.csv" dataset_jsonl_file="tweet_data.jsonl" with open(dataset_csv_file, "r+") as infile, open(dataset_jsonl_file, "w+") as outfile: reader = csv.DictReader(infile) for row in reader: # remove @ row["inputs"] = row["inputs"].replace("@","") json.dump(row, outfile) outfile.write('\n') # uploads a given file to S3. input_s3_path = s3_path_join("s3://",sagemaker_session_bucket,"batch_transform/input") output_s3_path = s3_path_join("s3://",sagemaker_session_bucket,"batch_transform/output") s3_file_uri = S3Uploader.upload(dataset_jsonl_file,input_s3_path) print(f"{dataset_jsonl_file} uploaded to {s3_file_uri}")<jupyter_output>tweet_data.jsonl uploaded to s3://sagemaker-us-east-1-558105141721/batch_transform/input/tweet_data.jsonl<jupyter_text>The created file looks like this```json{"inputs": "VirginAmerica What dhepburn said."}{"inputs": "VirginAmerica plus you've added commercials to the experience... tacky."}{"inputs": "VirginAmerica I didn't today... Must mean I need to take another trip!"}{"inputs": "VirginAmerica it's really aggressive to blast obnoxious \"entertainment\"...."}{"inputs": "VirginAmerica and it's a really big bad thing about it"}{"inputs": "VirginAmerica seriously would pay $30 a flight for seats that didn't h...."}{"inputs": "VirginAmerica yes, nearly every time I fly VX this \u201cear worm\u201d won\u2019t go away :)"}{"inputs": "VirginAmerica Really missed a prime opportunity for Men Without ..."}{"inputs": "virginamerica Well, I didn't\u2026but NOW I DO! :-D"}{"inputs": "VirginAmerica it was amazing, and arrived an hour early. You're too good to me."}{"inputs": "VirginAmerica did you know that suicide is the second leading cause of death among teens 10-24"}{"inputs": "VirginAmerica I &lt;3 pretty graphics. so much better than minimal iconography. :D"}{"inputs": "VirginAmerica This is such a great deal! Already thinking about my 2nd trip ..."}....``` Create Inference Transformer to run the batch jobWe use the [twitter-roberta-base-sentiment](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment) model running our batch transform job. This is a RoBERTa-base model trained on ~58M tweets and finetuned for sentiment analysis with the TweetEval benchmark.<jupyter_code>from sagemaker.huggingface.model import HuggingFaceModel # Hub Model configuration. <https://huggingface.co/models> hub = { 'HF_MODEL_ID':'cardiffnlp/twitter-roberta-base-sentiment', 'HF_TASK':'text-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( env=hub, # configuration for loading model from Hub role=role, # iam role with permissions to create an Endpoint transformers_version="4.26", # transformers version used pytorch_version="1.13", # pytorch version used py_version='py39', # python version used ) # create Transformer to run our batch job batch_job = huggingface_model.transformer( instance_count=1, instance_type='ml.p3.2xlarge', output_path=output_s3_path, # we are using the same s3 path to save the output with the input strategy='SingleRecord') # starts batch transform job and uses s3 data as input batch_job.transform( data=s3_file_uri, content_type='application/json', split_type='Line') import json from sagemaker.s3 import S3Downloader from ast import literal_eval # creating s3 uri for result file -> input file + .out output_file = f"{dataset_jsonl_file}.out" output_path = s3_path_join(output_s3_path,output_file) # download file S3Downloader.download(output_path,'.') batch_transform_result = [] with open(output_file) as f: for line in f: # converts jsonline array to normal array line = "[" + line.replace("[","").replace("]",",") + "]" batch_transform_result = literal_eval(line) # print results print(batch_transform_result[:3])<jupyter_output>INFO:botocore.credentials:Found credentials from IAM Role: BaseNotebookInstanceEc2InstanceRole
notebooks/sagemaker/12_batch_transform_inference/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/12_batch_transform_inference/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 2457 }
144
# accelerate-aws-sagemaker Examples showcasing AWS SageMaker integration of 🤗 Accelerate. Just give the `accelerate config` and do `accelerate launch` 🚀. It's as simple as that! 1. Set up the accelerate config by running `accelerate config --config_file accelerate_config.yaml` and answer the SageMaker questions. 2. Below is a sample config which is using aws `profile` to launch training job using 🤗 SageMaker estimator. It also has the `iam_role_name` which has the needed SageMaker permissions specified. In this config it is replaced `xxxxx` as user needs to specify it based on their corresponding AWS setup. ```yaml base_job_name: accelerate-sagemaker-1 compute_environment: AMAZON_SAGEMAKER distributed_type: DATA_PARALLEL ec2_instance_type: ml.p3.16xlarge iam_role_name: xxxxx image_uri: null mixed_precision: fp16 num_machines: 1 profile: xxxxx py_version: py38 pytorch_version: 1.10.2 region: us-east-1 transformers_version: 4.17.0 use_cpu: false ``` 3. One can specify a custom docker image instead of Official 🤗 DLCs through the accelerate config questionnaire. When this isn't provided, the latest Official 🤗 DLC will be used. 4. Support for input channels pointing to S3 data locations via TSV file, e.g., below are the contents of sagemaker_inputs.tsv whose location is given as part of accelerate config setup. ```tsv channel_name data_location train s3://sagemaker-sample/samples/datasets/imdb/train test s3://sagemaker-sample/samples/datasets/imdb/test ``` 5. Support for SageMaker metrics logging via TSV file, e.g., below are the contents of the sagemaker_metrics_definition.tsv whose location is given as part of accelerate config setup. ```tsv metric_name metric_regex accuracy 'accuracy': ([0-9.]+) f1 'f1': ([0-9.]+) ``` 6. Example of accelerate config with above features setup [XXXXX values are AWS account specific]: ```yaml base_job_name: accelerate-sagemaker-1 compute_environment: AMAZON_SAGEMAKER distributed_type: DATA_PARALLEL ec2_instance_type: ml.p3.16xlarge iam_role_name: XXXXX image_uri: 763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.8.1-transformers4.10.2-gpu-py36-cu111-ubuntu18.04 mixed_precision: fp16 num_machines: 1 profile: XXXXX py_version: py38 pytorch_version: 1.10.2 region: us-east-1 sagemaker_inputs_file: sagemaker_inputs.tsv sagemaker_metrics_file: sagemaker_metrics_definition.tsv transformers_version: 4.17.0 use_cpu: false ``` 7. Put `requirements.txt` with all the needed libraries for running the training script. 8. Running `text-classification` example using s3 datasets (from the root directory): ```bash cd src/text-classification bash launch.sh ``` The contents of launch.sh ```bash accelerate launch --config_file accelerate_config.yaml train_using_s3_data.py \ --mixed_precision "fp16" ``` Output logs: ```bash ... [1,mpirank:0,algo-1]<stdout>:algo-1:79:1300 [0] NCCL INFO Launch mode Parallel [1,mpirank:0,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:3,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:1,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:2,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:6,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:5,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:7,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:4,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:0,algo-1]<stdout>:epoch 0: {'accuracy': 0.6838235294117647, 'f1': 0.8122270742358079} [1,mpirank:0,algo-1]<stdout>:epoch 1: {'accuracy': 0.7205882352941176, 'f1': 0.8256880733944955} [1,mpirank:0,algo-1]<stdout>:epoch 2: {'accuracy': 0.75, 'f1': 0.838095238095238} 2022-09-21 13:21:05,187 sagemaker-training-toolkit INFO Waiting for the process to finish and give a return code. 2022-09-21 13:21:05,188 sagemaker-training-toolkit INFO Done waiting for a return code. Received 0 from exiting process. 2022-09-21 13:21:05,188 sagemaker-training-toolkit INFO Reporting training SUCCESS ``` 9. Running `seq2seq` example: ```bash cd src/seq2seq bash launch.sh ``` The contents of launch.sh ```bash accelerate launch --config_file accelerate_config.yaml run_seq2seq_no_trainer.py \ --dataset_name "smangrul/MuDoConv" \ --max_source_length 128 \ --source_prefix "chatbot: " \ --max_target_length 64 \ --val_max_target_length 64 \ --val_min_target_length 20 \ --n_val_batch_generations 5 \ --n_train 10000 \ --n_val 1000 \ --pad_to_max_length True\ --num_beams 10 \ --model_name_or_path "facebook/blenderbot-400M-distill" \ --per_device_train_batch_size 16 \ --per_device_eval_batch_size 16 \ --learning_rate 1e-6 \ --weight_decay 0.0 \ --num_train_epochs 1 \ --gradient_accumulation_steps 1 \ --num_warmup_steps 100 \ --output_dir "/opt/ml/model" \ --seed 25 \ --logging_steps 100 \ --report_name "blenderbot_400M_finetuning" ``` Output logs: ``` [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:37:39 - INFO - __main__ - Distributed environment: MULTI_GPU Backend: smddp [1,mpirank:0,algo-1]<stderr>:Num processes: 8 [1,mpirank:0,algo-1]<stderr>:Process index: 0 [1,mpirank:0,algo-1]<stderr>:Local process index: 0 [1,mpirank:0,algo-1]<stderr>:Device: cuda:0 [1,mpirank:0,algo-1]<stderr>: ... [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - ***** Running training ***** [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Num examples = 10000 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Num Epochs = 1 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Instantaneous batch size per device = 16 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 128 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Gradient Accumulation steps = 1 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Total optimization steps = 79 ... [1,mpirank:0,algo-1]<stderr>:#015100%|██████████| 79/79 [00:19<00:00, 4.79it/s] [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:50 - INFO - __main__ - Epoch 0 training took 19.50162172317505 seconds [1,mpirank:0,algo-1]<stdout>:starting evaluation [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:55 - INFO - __main__ - printing few sample generations and corresponding labels from eval set [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:55 - INFO - __main__ - prompt | generated | label [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:55 - INFO - __main__ - chatbot:your persona: i work as an electrician. i always sleep 8 hours a day. </s> <s> Which level are you at?</s> | I'm at the top of the ladder. I work for an electrical company. | I received on-the-job training when i first started [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:55 - INFO - __main__ - chatbot:your persona: i work as an electrician. i always sleep 8 hours a day. </s> <s> Which level are you at? </s> <s> I received on-the-job training when i first started </s> <s> Thats great! How long have you been doing this work? </s> | I've been working as an Electrician for about 5 years now. It's a great job. | For a good number of years now. ... [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:39:02 - INFO - __main__ - chatbot:your persona: i'm a painter and love to create art. i'm a talented singer and have won several competitions. </s> <s> I love the Doors! They have such a unique sound. Do you have a favorite Doors song?</s> | My favorite song of theirs is "When I Was Your Man". What's yours? | It's a tie between People are strange and Love me two times. What's your favorite? [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:39:02 - INFO - __main__ - chatbot:your persona: i'm a painter and love to create art. i'm a talented singer and have won several competitions. </s> <s> I think my favorite is Love Street. It has such a haunting melody. Have you heard that one?</s> | No, I haven't. I'll have to check it out. What genre is it? | Yeah, it's a pretty great song, Jim Morrison was just an amazing songwriter, I aspire to make songs as good as his one day. ... [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:39:17 - INFO - __main__ - {'bleu': 1.7067114414104911} [1,mpirank:0,algo-1]<stdout>:evaluation completed [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:39:17 - INFO - __main__ - Epoch 0 evaluation took 24.294514417648315 seconds [1,mpirank:0,algo-1]<stderr>:Configuration saved in /opt/ml/model/config.json [1,mpirank:0,algo-1]<stderr>:Model weights saved in /opt/ml/model/pytorch_model.bin [1,mpirank:0,algo-1]<stderr>:tokenizer config file saved in /opt/ml/model/tokenizer_config.json [1,mpirank:0,algo-1]<stderr>:Special tokens file saved in /opt/ml/model/special_tokens_map.json [1,mpirank:0,algo-1]<stderr>:#015100%|██████████| 79/79 [00:47<00:00, 1.65it/s] 2022-09-21 13:39:27,753 sagemaker-training-toolkit INFO Waiting for the process to finish and give a return code. 2022-09-21 13:39:27,753 sagemaker-training-toolkit INFO Done waiting for a return code. Received 0 from exiting process. 2022-09-21 13:39:27,754 sagemaker-training-toolkit INFO Reporting training SUCCESS ```
notebooks/sagemaker/22_accelerate_sagemaker_examples/README.md/0
{ "file_path": "notebooks/sagemaker/22_accelerate_sagemaker_examples/README.md", "repo_id": "notebooks", "token_count": 3628 }
145
<jupyter_start><jupyter_text>Stable Diffusion on Amazon SageMakerWelcome to this Amazon SageMaker guide on how to use the [Stable Diffusion](https://huggingface.co/blog/stable_diffusion) to generate image for a given input prompt. We will deploy [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to Amazon SageMake for real-time inference using Hugging Faces [🧨 Diffusers library](https://huggingface.co/docs/diffusers/index).What we are going to do 1. Create Stable Diffusion inference script 2. Create SageMaker `model.tar.gz` artifact3. Deploy the model to Amazon SageMaker4. Generate images using the deployed model What is Stable Diffusion?Stable Diffusion is a text-to-image latent diffusion model created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/) and [LAION](https://laion.ai/). It is trained on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database. LAION-5B is the largest, freely accessible multi-modal dataset that currently exists.This guide will not explain how the model works. If you are interested you should checkout the [Stable Diffusion with 🧨 Diffusers](https://huggingface.co/blog/stable_diffusion) blog post or [The Annotated Diffusion Model](https://huggingface.co/blog/annotated-diffusion)--- Before we can get started, make sure you have [Hugging Face user account](https://huggingface.co/join). The account is needed to load the [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) from the [Hugging Face Hub](https://huggingface.co/).Create account: https://huggingface.co/joinBefore we can get started we have to install the missing dependencies to be able to create our `model.tar.gz` artifact and create our Amazon SageMaker endpoint. We also have to make sure we have the permission to create our SageMaker Endpoint.<jupyter_code>!pip install "sagemaker==2.116.0" "huggingface_hub==0.10.1" --upgrade --quiet<jupyter_output><empty_output><jupyter_text>_If you are going to use Sagemaker in a local environment (not SageMaker Studio or Notebook Instances). You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output>Couldn't call 'get_role' to get Role ARN from role name philippschmid to get Role path.<jupyter_text>Create Stable Diffusion inference script Amazon SageMaker allows us to customize the inference script by providing a `inference.py` file. The `inference.py` file is the entry point to our model. It is responsible for loading the model and handling the inference request. If you are used to deploying Hugging Face Transformers that might be knew to you. Usually, we just provide the `HF_MODEL_ID` and `HF_TASK` and the Hugging Face DLC takes care of the rest. For `diffusers` thats not yet possible. We have to provide the `inference.py` file and implement the `model_fn` and `predict_fn` functions. If you want to learn more about creating a custom inference script you can check out [Creating document embeddings with Hugging Face's Transformers & Amazon SageMaker](https://www.philschmid.de/custom-inference-huggingface-sagemaker)In addition to the `inference.py` file we also have to provide a `requirements.txt` file. The `requirements.txt` file is used to install the dependencies for our `inference.py` file.The first step is to create a `code/` directory.<jupyter_code>!mkdir code<jupyter_output><empty_output><jupyter_text>As next we create a `requirements.txt` file and add the `diffusers` library to it.<jupyter_code>%%writefile code/requirements.txt diffusers==0.6.0 transformers==4.23.1<jupyter_output><empty_output><jupyter_text>The last step for our inference handler is to create the `inference.py` file. The `inference.py` file is responsible for loading the model and handling the inference request. The `model_fn` function is called when the model is loaded. The `predict_fn` function is called when we want to do inference. We are using the `diffusers` library to load the model in the `model_fn` and generate 4 image for an input prompt with the `predict_fn`. The `predict_fn` function returns the `4` image as a `base64` encoded string.<jupyter_code>%%writefile code/inference.py import base64 import torch from io import BytesIO from diffusers import StableDiffusionPipeline def model_fn(model_dir): # Load stable diffusion and move it to the GPU pipe = StableDiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16) pipe = pipe.to("cuda") return pipe def predict_fn(data, pipe): # get prompt & parameters prompt = data.pop("inputs", data) # set valid HP for stable diffusion num_inference_steps = data.pop("num_inference_steps", 50) guidance_scale = data.pop("guidance_scale", 7.5) num_images_per_prompt = data.pop("num_images_per_prompt", 4) # run generation with parameters generated_images = pipe( prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, )["images"] # create response encoded_images = [] for image in generated_images: buffered = BytesIO() image.save(buffered, format="JPEG") encoded_images.append(base64.b64encode(buffered.getvalue()).decode()) # create response return {"generated_images": encoded_images}<jupyter_output><empty_output><jupyter_text>Create SageMaker `model.tar.gz` artifactTo use our `inference.py` we need to bundle it together with our model weights into a `model.tar.gz`. The archive includes all our model-artifcats to run inference. The `inference.py` script will be placed into a `code/` folder. We will use the `huggingface_hub` SDK to easily download [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) from [Hugging Face](https://huggingface.co/CompVis/stable-diffusion-v1-4) and then upload it to Amazon S3 with the `sagemaker` SDK.Before we can load our model from the Hugging Face Hub we have to make sure that we accepted the license of [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to be able to use it. [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) is published under the [CreativeML OpenRAIL-M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license). You can accept the license by clicking on the `Agree and access repository` button on the model page at: https://huggingface.co/CompVis/stable-diffusion-v1-4. _Note: This will give access to the repository for the logged in user. This user can then be used to generate [HF Tokens](https://huggingface.co/settings/tokens) to load the model programmatically._Before we can load the model make sure you have a valid [HF Token](https://huggingface.co/settings/token). You can create a token by going to your [Hugging Face Settings](https://huggingface.co/settings/token) and clicking on the `New token` button. Make sure the enviornment has enough diskspace to store the model, ~30GB should be enough.<jupyter_code>from distutils.dir_util import copy_tree from pathlib import Path from huggingface_hub import snapshot_download import random HF_MODEL_ID="CompVis/stable-diffusion-v1-4" HF_TOKEN="" # your hf token: https://huggingface.co/settings/tokens assert len(HF_TOKEN) > 0, "Please set HF_TOKEN to your huggingface token. You can find it here: https://huggingface.co/settings/tokens" # download snapshot snapshot_dir = snapshot_download(repo_id=HF_MODEL_ID,revision="fp16",use_auth_token=HF_TOKEN) # create model dir model_tar = Path(f"model-{random.getrandbits(16)}") model_tar.mkdir(exist_ok=True) # copy snapshot to model dir copy_tree(snapshot_dir, str(model_tar))<jupyter_output><empty_output><jupyter_text>The next step is to copy the `code/` directory into the `model/` directory.<jupyter_code># copy code/ to model dir copy_tree("code/", str(model_tar.joinpath("code")))<jupyter_output><empty_output><jupyter_text>Before we can upload the model to Amazon S3 we have to create a `model.tar.gz` archive. Important is that the archive should directly contain all files and not a folder with the files. For example, your file should look like this:```model.tar.gz/|- model_index.json|- unet/|- code/```<jupyter_code>import tarfile import os # helper to create the model.tar.gz def compress(tar_dir=None,output_file="model.tar.gz"): parent_dir=os.getcwd() os.chdir(tar_dir) with tarfile.open(os.path.join(parent_dir, output_file), "w:gz") as tar: for item in os.listdir('.'): print(item) tar.add(item, arcname=item) os.chdir(parent_dir) compress(str(model_tar))<jupyter_output><empty_output><jupyter_text>After we created the `model.tar.gz` archive we can upload it to Amazon S3. We will use the `sagemaker` SDK to upload the model to our sagemaker session bucket.<jupyter_code>from sagemaker.s3 import S3Uploader # upload model.tar.gz to s3 s3_model_uri=S3Uploader.upload(local_path="model.tar.gz", desired_s3_uri=f"s3://{sess.default_bucket()}/stable-diffusion-v1-4") print(f"model uploaded to: {s3_model_uri}")<jupyter_output><empty_output><jupyter_text>Deploy the model to Amazon SageMakerAfter we have uploaded our model archive we can deploy our model to Amazon SageMaker. We will use `HuggingfaceModel` to create our real-time inference endpoint.We are going to deploy the model to an `g4dn.xlarge` instance. The `g4dn.xlarge` instance is a GPU instance with 1 NVIDIA Tesla T4 GPUs. If you are interested in how you could add autoscaling to your endpoint you can check out [Going Production: Auto-scaling Hugging Face Transformers with Amazon SageMaker](https://www.philschmid.de/auto-scaling-sagemaker-huggingface).<jupyter_code>from sagemaker.huggingface.model import HuggingFaceModel # create Hugging Face Model Class huggingface_model = HuggingFaceModel( model_data=s3_model_uri, # path to your model and script role=role, # iam role with permissions to create an Endpoint transformers_version="4.17", # transformers version used pytorch_version="1.10", # pytorch version used py_version='py38', # python version used ) # deploy the endpoint endpoint predictor = huggingface_model.deploy( initial_instance_count=1, instance_type="ml.g4dn.xlarge" )<jupyter_output>--------------!<jupyter_text>Generate images using the deployed modelThe `.deploy()` returns an `HuggingFacePredictor` object which can be used to request inference. Our endpoint expects a `json` with at least `inputs` key. The `inputs` key is the input prompt for the model, which will be used to generate the image. Additionally, we can provide `num_inference_steps`, `guidance_scale` & `num_images_per_prompt` to controll the generation.The `predictor.predict()` function returns a `json` with the `generated_images` key. The `generated_images` key contains the `4` generated images as a `base64` encoded string. To decode our response we added a small helper function `decode_base64_to_image` which takes the `base64` encoded string and returns a `PIL.Image` object and `display_images`, which takes a list of `PIL.Image` objects and displays them.<jupyter_code>from PIL import Image from io import BytesIO from IPython.display import display import base64 import matplotlib.pyplot as plt # helper decoder def decode_base64_image(image_string): base64_image = base64.b64decode(image_string) buffer = BytesIO(base64_image) return Image.open(buffer) # display PIL images as grid def display_images(images=None,columns=3, width=100, height=100): plt.figure(figsize=(width, height)) for i, image in enumerate(images): plt.subplot(int(len(images) / columns + 1), columns, i + 1) plt.axis('off') plt.imshow(image)<jupyter_output><empty_output><jupyter_text>Now, lets generate some images. As example lets generate `3` images for the prompt `A dog trying catch a flying pizza art drawn by disney concept artists`. Generating `3` images takes around `30` seconds.<jupyter_code>num_images_per_prompt = 3 prompt = "A dog trying catch a flying pizza art drawn by disney concept artists, golden colour, high quality, highly detailed, elegant, sharp focus" # run prediction response = predictor.predict(data={ "inputs": prompt, "num_images_per_prompt" : num_images_per_prompt } ) # decode images decoded_images = [decode_base64_image(image) for image in response["generated_images"]] # visualize generation display_images(decoded_images)<jupyter_output>/tmp/ipykernel_5489/921716793.py:8: MatplotlibDeprecationWarning: Auto-removal of overlapping axes is deprecated since 3.6 and will be removed two minor releases later; explicitly call ax.remove() as needed. plt.subplot(int(len(images) / columns + 1), columns, i + 1)<jupyter_text>Delete model and endpointTo clean up, we can delete the model and endpoint.<jupyter_code>predictor.delete_model() predictor.delete_endpoint()<jupyter_output><empty_output>
notebooks/sagemaker/23_stable_diffusion_inference/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/23_stable_diffusion_inference/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 4469 }
146
import os import argparse from transformers import ( AutoModelForCausalLM, AutoTokenizer, set_seed, default_data_collator, BitsAndBytesConfig, Trainer, TrainingArguments, ) from datasets import load_from_disk import torch from peft import PeftConfig, PeftModel def parse_arge(): """Parse the arguments.""" parser = argparse.ArgumentParser() # add model id and dataset path argument parser.add_argument( "--model_id", type=str, default="google/flan-t5-xl", help="Model id to use for training.", ) parser.add_argument("--dataset_path", type=str, default="lm_dataset", help="Path to dataset.") # add training hyperparameters for epochs, batch size, learning rate, and seed parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for.") parser.add_argument( "--per_device_train_batch_size", type=int, default=1, help="Batch size to use for training.", ) parser.add_argument("--lr", type=float, default=5e-5, help="Learning rate to use for training.") parser.add_argument("--seed", type=int, default=42, help="Seed to use for training.") parser.add_argument( "--gradient_checkpointing", type=bool, default=True, help="Path to deepspeed config file.", ) parser.add_argument( "--bf16", type=bool, default=True if torch.cuda.get_device_capability()[0] == 8 else False, help="Whether to use bf16.", ) parser.add_argument( "--merge_weights", type=bool, default=True, help="Whether to merge LoRA weights with base model.", ) args = parser.parse_known_args() return args def create_peft_config(model, gradient_checkpointing=True): from peft import ( get_peft_model, LoraConfig, TaskType, prepare_model_for_kbit_training, ) peft_config = LoraConfig( r=64, lora_alpha=16, target_modules=[ "query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h", ], lora_dropout=0.1, bias="none", task_type=TaskType.CAUSAL_LM, ) # prepare int-4 model for training model = prepare_model_for_kbit_training(model) if gradient_checkpointing: model.gradient_checkpointing_enable() model = get_peft_model(model, peft_config) model.print_trainable_parameters() return model def training_function(args): # set seed set_seed(args.seed) dataset = load_from_disk(args.dataset_path) # load model from the hub with a bnb config bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, ) model = AutoModelForCausalLM.from_pretrained( args.model_id, use_cache=False if args.gradient_checkpointing else True, # this is needed for gradient checkpointing trust_remote_code=True, # ATTENTION: This allows remote code execution device_map="auto", quantization_config=bnb_config, ) # create peft config model = create_peft_config(model, args.gradient_checkpointing) # Define training args output_dir = "/tmp" training_args = TrainingArguments( output_dir=output_dir, overwrite_output_dir=True, per_device_train_batch_size=args.per_device_train_batch_size, bf16=args.bf16, # Use BF16 if available learning_rate=args.lr, num_train_epochs=args.epochs, gradient_checkpointing=args.gradient_checkpointing, # logging strategies logging_dir=f"{output_dir}/logs", logging_strategy="steps", logging_steps=10, save_strategy="no", ) # Create Trainer instance trainer = Trainer( model=model, args=training_args, train_dataset=dataset, data_collator=default_data_collator, ) # pre-process the model by upcasting the layer norms in float 32 for for name, module in trainer.model.named_modules(): if "norm" in name: module = module.to(torch.float32) # Start training trainer.train() if args.merge_weights: # merge adapter weights with base model and save # save int 4 model trainer.model.save_pretrained(output_dir, safe_serialization=False) # clear memory del model del trainer torch.cuda.empty_cache() from peft import AutoPeftModelForCausalLM # load PEFT model in fp16 offload_folder = "/tmp/offload" model = AutoPeftModelForCausalLM.from_pretrained( output_dir, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=True, # ATTENTION: This allows remote code execution ) # Merge LoRA and base model and save merged_model = model.merge_and_unload() merged_model.save_pretrained("/opt/ml/model/",safe_serialization=True) else: trainer.model.save_pretrained("/opt/ml/model/", safe_serialization=True) # save tokenizer for easy inference tokenizer = AutoTokenizer.from_pretrained(args.model_id, trust_remote_code=True) tokenizer.save_pretrained("/opt/ml/model/") def main(): args, _ = parse_arge() training_function(args) if __name__ == "__main__": main()
notebooks/sagemaker/28_train_llms_with_qlora/scripts/run_clm.py/0
{ "file_path": "notebooks/sagemaker/28_train_llms_with_qlora/scripts/run_clm.py", "repo_id": "notebooks", "token_count": 2378 }
147
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT as a utility library Let's cover in this section how you can leverage PEFT's low level API to inject trainable adapters into any `torch` module. The development of this API has been motivated by the need for super users to not rely on modeling classes that are exposed in PEFT library and still be able to use adapter methods such as LoRA, IA3 and AdaLoRA. ## Supported tuner types Currently the supported adapter types are the 'injectable' adapters, meaning adapters where an inplace modification of the model is sufficient to correctly perform the fine tuning. As such, only [LoRA](../conceptual_guides/lora), AdaLoRA and [IA3](../conceptual_guides/ia3) are currently supported in this API. ## `inject_adapter_in_model` method To perform the adapter injection, simply use `inject_adapter_in_model` method that takes 3 arguments, the PEFT config and the model itself and an optional adapter name. You can also attach multiple adapters in the model if you call multiple times `inject_adapter_in_model` with different adapter names. Below is a basic example usage of how to inject LoRA adapters into the submodule `linear` of the module `DummyModel`. ```python import torch from peft import inject_adapter_in_model, LoraConfig class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding = torch.nn.Embedding(10, 10) self.linear = torch.nn.Linear(10, 10) self.lm_head = torch.nn.Linear(10, 10) def forward(self, input_ids): x = self.embedding(input_ids) x = self.linear(x) x = self.lm_head(x) return x lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear"], ) model = DummyModel() model = inject_adapter_in_model(lora_config, model) dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]) dummy_outputs = model(dummy_inputs) ``` If you print the model, you will notice that the adapters have been correctly injected into the model ```bash DummyModel( (embedding): Embedding(10, 10) (linear): Linear( in_features=10, out_features=10, bias=True (lora_dropout): ModuleDict( (default): Dropout(p=0.1, inplace=False) ) (lora_A): ModuleDict( (default): Linear(in_features=10, out_features=64, bias=False) ) (lora_B): ModuleDict( (default): Linear(in_features=64, out_features=10, bias=False) ) (lora_embedding_A): ParameterDict() (lora_embedding_B): ParameterDict() ) (lm_head): Linear(in_features=10, out_features=10, bias=True) ) ``` Note that it should be up to users to properly take care of saving the adapters (in case they want to save adapters only), as `model.state_dict()` will return the full state dict of the model. In case you want to extract the adapters state dict you can use the `get_peft_model_state_dict` method: ```python from peft import get_peft_model_state_dict peft_state_dict = get_peft_model_state_dict(model) print(peft_state_dict) ``` ## Pros and cons When to use this API and when to not use it? Let's discuss in this section the pros and cons Pros: - The model gets modified in-place, meaning the model will preserve all its original attributes and methods - Works for any torch module, and any modality (vision, text, multi-modal) Cons: - You need to manually writing Hugging Face `from_pretrained` and `save_pretrained` utility methods if you want to easily save / load adapters from the Hugging Face Hub. - You cannot use any of the utility method provided by `PeftModel` such as disabling adapters, merging adapters, etc.
peft/docs/source/developer_guides/low_level_api.md/0
{ "file_path": "peft/docs/source/developer_guides/low_level_api.md", "repo_id": "peft", "token_count": 1389 }
148
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT integrations PEFT's practical benefits extends to other Hugging Face libraries like [Diffusers](https://hf.co/docs/diffusers) and [Transformers](https://hf.co/docs/transformers). One of the main benefits of PEFT is that an adapter file generated by a PEFT method is a lot smaller than the original model, which makes it super easy to manage and use multiple adapters. You can use one pretrained base model for multiple tasks by simply loading a new adapter finetuned for the task you're solving. Or you can combine multiple adapters with a text-to-image diffusion model to create new effects. This tutorial will show you how PEFT can help you manage adapters in Diffusers and Transformers. ## Diffusers Diffusers is a generative AI library for creating images and videos from text or images with diffusion models. LoRA is an especially popular training method for diffusion models because you can very quickly train and share diffusion models to generate images in new styles. To make it easier to use and try multiple LoRA models, Diffusers uses the PEFT library to help manage different adapters for inference. For example, load a base model and then load the [artificialguybr/3DRedmond-V1](https://huggingface.co/artificialguybr/3DRedmond-V1) adapter for inference with the [`load_lora_weights`](https://huggingface.co/docs/diffusers/v0.24.0/en/api/loaders/lora#diffusers.loaders.LoraLoaderMixin.load_lora_weights) method. The `adapter_name` argument in the loading method is enabled by PEFT and allows you to set a name for the adapter so it is easier to reference. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "peft-internal-testing/artificialguybr__3DRedmond-V1", weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors", adapter_name="3d" ) image = pipeline("sushi rolls shaped like kawaii cat faces").images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers.png"/> </div> Now let's try another cool LoRA model, [ostris/super-cereal-sdxl-lora](https://huggingface.co/ostris/super-cereal-sdxl-lora). All you need to do is load and name this new adapter with `adapter_name`, and use the [`set_adapters`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.set_adapters) method to set it as the currently active adapter. ```py pipeline.load_lora_weights( "ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors", adapter_name="cereal" ) pipeline.set_adapters("cereal") image = pipeline("sushi rolls shaped like kawaii cat faces").images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers-2.png"/> </div> Finally, you can call the [`disable_lora`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.disable_lora) method to restore the base model. ```py pipeline.disable_lora() ``` Learn more about how PEFT supports Diffusers in the [Inference with PEFT](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference) tutorial. ## Transformers Transformers is a collection of pretrained models for all types of tasks in all modalities. You can load these models for training or inference. Many of the models are large language models (LLMs), so it makes sense to integrate PEFT with Transformers to manage and train adapters. Load a base pretrained model to train. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") ``` Next, add an adapter configuration to specify how to adapt the model parameters. Call the [`~PeftModel.add_adapter`] method to add the configuration to the base model. ```py from peft import LoraConfig config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", task_type="CAUSAL_LM" ) model.add_adapter(peft_config) ``` Now you can train the model with Transformer's [`~transformers.Trainer`] class or whichever training framework you prefer. To use the newly trained model for inference, the [`~transformers.AutoModel`] class uses PEFT on the backend to load the adapter weights and configuration file into a base pretrained model. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora") ``` If you're interested in comparing or using more than one adapter, you can also call the [`~PeftModel.add_adapter`] method to add the adapter configuration to the base model. The only requirement is the adapter type must be the same (you can't mix a LoRA and LoHa adapter). ```py from transformers import AutoModelForCausalLM from peft import LoraConfig model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") model.add_adapter(lora_config_1, adapter_name="adapter_1") ``` Call [`~PeftModel.add_adapter`] again to attach a new adapter to the base model. ```py model.add_adapter(lora_config_2, adapter_name="adapter_2") ``` Then you can use [`~PeftModel.set_adapter`] to set the currently active adapter. ```py model.set_adapter("adapter_1") output = model.generate(**inputs) print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) ``` To disable the adapter, call the [`~PeftModel.disable_adapter`] method. ```py model.disable_adapter() ``` If you're curious, check out the [Load and train adapters with PEFT](https://huggingface.co/docs/transformers/main/peft) tutorial to learn more.
peft/docs/source/tutorial/peft_integrations.md/0
{ "file_path": "peft/docs/source/tutorial/peft_integrations.md", "repo_id": "peft", "token_count": 2014 }
149
import os import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup from peft import LoraConfig, TaskType, get_peft_model from peft.utils.other import fsdp_auto_wrap_policy def main(): accelerator = Accelerator() model_name_or_path = "t5-base" batch_size = 8 text_column = "sentence" label_column = "label" max_length = 64 lr = 1e-3 num_epochs = 1 base_path = "temp/data/FinancialPhraseBank-v1.0" peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) accelerator.print(model.print_trainable_parameters()) dataset = load_dataset( "json", data_files={ "train": os.path.join(base_path, "financial_phrase_bank_train.jsonl"), "validation": os.path.join(base_path, "financial_phrase_bank_val.jsonl"), }, ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer( inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt" ) labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs with accelerator.main_process_first(): processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) if getattr(accelerator.state, "fsdp_plugin", None) is not None: accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model) model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, optimizer, lr_scheduler ) accelerator.print(model) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() preds = accelerator.gather_for_metrics(torch.argmax(outputs.logits, -1)).detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 accelerator.print(f"{accuracy=}") accelerator.print(f"{eval_preds[:10]=}") accelerator.print(f"{dataset['validation'][label_column][:10]=}") accelerator.wait_for_everyone() # Option1: Pushing the model to Hugging Face Hub # model.push_to_hub( # f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"), # token = "hf_..." # ) # token (`bool` or `str`, *optional*): # `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated # when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` # is not specified. # Or you can get your token from https://huggingface.co/settings/token # Option2: Saving the model locally peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_") model.save_pretrained(peft_model_id) accelerator.wait_for_everyone() if __name__ == "__main__": main()
peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py/0
{ "file_path": "peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py", "repo_id": "peft", "token_count": 2543 }
150
<jupyter_start><jupyter_text>Finetuning Whisper-large-V2 on Colab using PEFT-Lora + BNB INT8 training In this Colab, we present a step-by-step guide on how to fine-tune Whisper for any multilingual ASR dataset using Hugging Face 🤗 Transformers and 🤗 PEFT. Using 🤗 PEFT and `bitsandbytes`, you can train the `whisper-large-v2` seamlessly on a colab with T4 GPU (16 GB VRAM). In this notebook, with most parts from [fine_tune_whisper.ipynb](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/fine_tune_whisper.ipynbscrollTo=BRdrdFIeU78w) is adapted to train using PEFT LoRA+BNB INT8.For more details on model, datasets and metrics, refer blog [Fine-Tune Whisper For Multilingual ASR with 🤗 Transformers](https://huggingface.co/blog/fine-tune-whisper) Inital Setup<jupyter_code>!add-apt-repository -y ppa:jonathonf/ffmpeg-4 !apt update !apt install -y ffmpeg !pip install datasets>=2.6.1 !pip install git+https://github.com/huggingface/transformers !pip install librosa !pip install evaluate>=0.30 !pip install jiwer !pip install gradio !pip install -q bitsandbytes datasets accelerate !pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git@main<jupyter_output><empty_output><jupyter_text>Linking the notebook to the Hub is straightforward - it simply requires entering your Hub authentication token when prompted. Find your Hub authentication token [here](https://huggingface.co/settings/tokens):<jupyter_code>from huggingface_hub import notebook_login notebook_login() # Select CUDA device index import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" model_name_or_path = "openai/whisper-large-v2" language = "Marathi" language_abbr = "mr" task = "transcribe" dataset_name = "mozilla-foundation/common_voice_11_0"<jupyter_output><empty_output><jupyter_text>Load Dataset<jupyter_code>from datasets import load_dataset, DatasetDict common_voice = DatasetDict() common_voice["train"] = load_dataset(dataset_name, language_abbr, split="train+validation", use_auth_token=True) common_voice["test"] = load_dataset(dataset_name, language_abbr, split="test", use_auth_token=True) print(common_voice) common_voice = common_voice.remove_columns( ["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"] ) print(common_voice)<jupyter_output>DatasetDict({ train: Dataset({ features: ['audio', 'sentence'], num_rows: 3927 }) test: Dataset({ features: ['audio', 'sentence'], num_rows: 1816 }) })<jupyter_text>Prepare Feature Extractor, Tokenizer and Data<jupyter_code>from transformers import WhisperFeatureExtractor feature_extractor = WhisperFeatureExtractor.from_pretrained(model_name_or_path) from transformers import WhisperTokenizer tokenizer = WhisperTokenizer.from_pretrained(model_name_or_path, language=language, task=task) from transformers import WhisperProcessor processor = WhisperProcessor.from_pretrained(model_name_or_path, language=language, task=task)<jupyter_output><empty_output><jupyter_text>Prepare Data<jupyter_code>print(common_voice["train"][0])<jupyter_output>{'audio': {'path': '/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3', 'array': array([-1.3727526e-15, -1.2400461e-13, -1.5159097e-13, ..., 4.7928120e-06, 3.5631349e-06, 1.6352631e-06], dtype=float32), 'sampling_rate': 48000}, 'sentence': 'आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.'}<jupyter_text>Since our input audio is sampled at 48kHz, we need to _downsample_ it to 16kHz prior to passing it to the Whisper feature extractor, 16kHz being the sampling rate expected by the Whisper model. We'll set the audio inputs to the correct sampling rate using dataset's [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=cast_columndatasets.DatasetDict.cast_column)method. This operation does not change the audio in-place, but rather signals to `datasets` to resample audio samples _on the fly_ the first time that they are loaded:<jupyter_code>from datasets import Audio common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000))<jupyter_output><empty_output><jupyter_text>Re-loading the first audio sample in the Common Voice dataset will resample it to the desired sampling rate:<jupyter_code>print(common_voice["train"][0])<jupyter_output>{'audio': {'path': '/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3', 'array': array([-4.4097186e-14, -9.4153831e-14, 3.4645775e-13, ..., -7.6018655e-06, -1.8617659e-06, 4.4520480e-06], dtype=float32), 'sampling_rate': 16000}, 'sentence': 'आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.'}<jupyter_text>Now we can write a function to prepare our data ready for the model:1. We load and resample the audio data by calling `batch["audio"]`. As explained above, 🤗 Datasets performs any necessary resampling operations on the fly.2. We use the feature extractor to compute the log-Mel spectrogram input features from our 1-dimensional audio array.3. We encode the transcriptions to label ids through the use of the tokenizer.<jupyter_code>def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch["sentence"]).input_ids return batch<jupyter_output><empty_output><jupyter_text>We can apply the data preparation function to all of our training examples using dataset's `.map` method. The argument `num_proc` specifies how many CPU cores to use. Setting `num_proc` > 1 will enable multiprocessing. If the `.map` method hangs with multiprocessing, set `num_proc=1` and process the dataset sequentially.<jupyter_code>common_voice = common_voice.map(prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2) common_voice["train"]<jupyter_output><empty_output><jupyter_text>Training and Evaluation Define a Data Collator<jupyter_code>import torch from dataclasses import dataclass from typing import Any, Dict, List, Union @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch<jupyter_output><empty_output><jupyter_text>Let's initialise the data collator we've just defined:<jupyter_code>data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)<jupyter_output><empty_output><jupyter_text>Evaluation Metrics We'll use the word error rate (WER) metric, the 'de-facto' metric for assessing ASR systems. For more information, refer to the WER [docs](https://huggingface.co/metrics/wer). We'll load the WER metric from 🤗 Evaluate:<jupyter_code>import evaluate metric = evaluate.load("wer")<jupyter_output><empty_output><jupyter_text>We then simply have to define a function that takes our model predictions and returns the WER metric. This function, called`compute_metrics`, first replaces `-100` with the `pad_token_id`in the `label_ids` (undoing the step we applied in the data collator to ignore padded tokens correctly in the loss).It then decodes the predicted and label ids to strings. Finally,it computes the WER between the predictions and reference labels:<jupyter_code>def compute_metrics(pred): pred_ids = pred.predictions label_ids = pred.label_ids # replace -100 with the pad_token_id label_ids[label_ids == -100] = tokenizer.pad_token_id # we do not want to group tokens when computing the metrics pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True) wer = 100 * metric.compute(predictions=pred_str, references=label_str) return {"wer": wer}<jupyter_output><empty_output><jupyter_text>Load a Pre-Trained Checkpoint Now let's load the pre-trained Whisper `small` checkpoint. Again, this is trivial through use of 🤗 Transformers!<jupyter_code>from transformers import WhisperForConditionalGeneration model = WhisperForConditionalGeneration.from_pretrained(model_name_or_path, load_in_8bit=True) # model.hf_device_map - this should be {" ": 0}<jupyter_output><empty_output><jupyter_text>Override generation arguments - no tokens are forced as decoder outputs (see [`forced_decoder_ids`](https://huggingface.co/docs/transformers/main_classes/text_generationtransformers.generation_utils.GenerationMixin.generate.forced_decoder_ids)), no tokens are suppressed during generation (see [`suppress_tokens`](https://huggingface.co/docs/transformers/main_classes/text_generationtransformers.generation_utils.GenerationMixin.generate.suppress_tokens)):<jupyter_code>model.config.forced_decoder_ids = None model.config.suppress_tokens = []<jupyter_output><empty_output><jupyter_text>Post-processing on the modelFinally, we need to apply some post-processing on the 8-bit model to enable training, let's freeze all our layers, and cast all non `int8` layers in `float32` for stability.<jupyter_code>from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model)<jupyter_output><empty_output><jupyter_text>Apply LoRAHere comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`.<jupyter_code>from peft import LoraConfig, PeftModel, LoraModel, LoraConfig, get_peft_model config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") model = get_peft_model(model, config) model.print_trainable_parameters()<jupyter_output>trainable params: 15728640 || all params: 1559033600 || trainable%: 1.0088711365810203<jupyter_text>We are ONLY using **1%** of the total trainable parameters, thereby performing **Parameter-Efficient Fine-Tuning** Define the Training Configuration In the final step, we define all the parameters related to training. For more detail on the training arguments, refer to the Seq2SeqTrainingArguments [docs](https://huggingface.co/docs/transformers/main_classes/trainertransformers.Seq2SeqTrainingArguments).<jupyter_code>from transformers import Seq2SeqTrainingArguments training_args = Seq2SeqTrainingArguments( output_dir="temp", # change to a repo name of your choice per_device_train_batch_size=8, gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size learning_rate=1e-3, warmup_steps=50, num_train_epochs=3, evaluation_strategy="epoch", fp16=True, per_device_eval_batch_size=8, generation_max_length=128, logging_steps=25, remove_unused_columns=False, # required as the PeftModel forward doesn't have the signature of the wrapped model's forward label_names=["labels"], # same reason as above )<jupyter_output><empty_output><jupyter_text>**Few Important Notes:**1. `remove_unused_columns=False` and `label_names=["labels"]` are required as the PeftModel's forward doesn't have the signature of the base model's forward.2. INT8 training required autocasting. `predict_with_generate` can't be passed to Trainer because it internally calls transformer's `generate` without autocasting leading to errors. 3. Because of point 2, `compute_metrics` shouldn't be passed to `Seq2SeqTrainer` as seen below. (commented out)<jupyter_code>from transformers import Seq2SeqTrainer, TrainerCallback, TrainingArguments, TrainerState, TrainerControl from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "adapter_model") kwargs["model"].save_pretrained(peft_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) return control trainer = Seq2SeqTrainer( args=training_args, model=model, train_dataset=common_voice["train"], eval_dataset=common_voice["test"], data_collator=data_collator, # compute_metrics=compute_metrics, tokenizer=processor.feature_extractor, callbacks=[SavePeftModelCallback], ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train() model_name_or_path = "openai/whisper-large-v2" peft_model_id = "smangrul/" + f"{model_name_or_path}-{model.peft_config.peft_type}-colab".replace("/", "-") model.push_to_hub(peft_model_id) print(peft_model_id)<jupyter_output>Uploading the following files to smangrul/openai-whisper-large-v2-LORA-colab: adapter_model.bin,adapter_config.json<jupyter_text>Evaluation and Inference **Important points to note while inferencing**:1. As `predict_with_generate` can't be used, we will write the eval loop with `torch.cuda.amp.autocast()` as shown below. 2. As the base model is frozen, PEFT model sometimes fails ot recognise the language while decoding.Hence, we force the starting tokens to mention the language we are transcribing. This is done via `forced_decoder_ids = processor.get_decoder_prompt_ids(language="Marathi", task="transcribe")` and passing that too the `model.generate` call.3. Please note that [AutoEvaluate Leaderboard](https://huggingface.co/spaces/autoevaluate/leaderboards?dataset=mozilla-foundation%2Fcommon_voice_11_0&only_verified=0&task=automatic-speech-recognition&config=mr&split=test&metric=wer) for `mr` language on `common_voice_11_0` has a bug wherein openai's `BasicTextNormalizer` normalizer is used while evaluation leading to degerated output text, an example is shown below:```without normalizer: 'स्विच्चान नरुवित्तीची पद्दत मोठ्या प्रमाणात आमलात आणल्या बसोन या दुपन्याने अनेक राथ प्रवेश केला आहे.'with normalizer: 'स व च च न नर व त त च पद दत म ठ य प रम ण त आमल त आणल य बस न य द पन य न अन क र थ प रव श क ल आह'```Post fixing this bug, we report the 2 metrics for the top model of the leaderboard and the PEFT model:1. `wer`: `wer` without using the `BasicTextNormalizer` as it doesn't cater to most indic languages. This is want we consider as true performance metric.2. `normalized_wer`: `wer` using the `BasicTextNormalizer` to be comparable to the leaderboard metrics.Below are the results:| Model | DrishtiSharma/whisper-large-v2-marathi | smangrul/openai-whisper-large-v2-LORA-colab ||----------------|----------------------------------------|---------------------------------------------|| wer | 35.6457 | 36.1356 || normalized_wer | 13.6440 | 14.0165 |We see that PEFT model's performance is comparable to the fully fine-tuned model on the top of the leaderboard. At the same time, we are able to train the large model in Colab notebook with limited GPU memory and the added advantage of resulting checkpoint being jsut `63` MB.<jupyter_code>from peft import PeftModel, PeftConfig from transformers import WhisperForConditionalGeneration, Seq2SeqTrainer peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, load_in_8bit=True, device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) from torch.utils.data import DataLoader from tqdm import tqdm import numpy as np import gc eval_dataloader = DataLoader(common_voice["test"], batch_size=8, collate_fn=data_collator) model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): with torch.cuda.amp.autocast(): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"].to("cuda"), decoder_input_ids=batch["labels"][:, :4].to("cuda"), max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) metric.add_batch( predictions=decoded_preds, references=decoded_labels, ) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute() print(f"{wer=}")<jupyter_output><empty_output><jupyter_text>Using AutomaticSpeechRecognitionPipeline **Few important notes:**1. `pipe()` should be in the autocast context manager `with torch.cuda.amp.autocast():`2. `forced_decoder_ids` specifying the `language` being transcribed should be provided in `generate_kwargs` dict.3. You will get warning along the below lines which is **safe to ignore**.```The model 'PeftModel' is not supported for . Supported models are ['SpeechEncoderDecoderModel', 'Speech2TextForConditionalGeneration', 'SpeechT5ForSpeechToText', 'WhisperForConditionalGeneration', 'Data2VecAudioForCTC', 'HubertForCTC', 'MCTCTForCTC', 'SEWForCTC', 'SEWDForCTC', 'UniSpeechForCTC', 'UniSpeechSatForCTC', 'Wav2Vec2ForCTC', 'Wav2Vec2ConformerForCTC', 'WavLMForCTC'].```<jupyter_code>import torch import gradio as gr from transformers import ( AutomaticSpeechRecognitionPipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, ) from peft import PeftModel, PeftConfig peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" language = "Marathi" task = "transcribe" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, load_in_8bit=True, device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) feature_extractor = processor.feature_extractor forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) pipe = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) def transcribe(audio): with torch.cuda.amp.autocast(): text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] return text iface = gr.Interface( fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text", title="PEFT LoRA + INT8 Whisper Large V2 Marathi", description="Realtime demo for Marathi speech recognition using `PEFT-LoRA+INT8` fine-tuned Whisper Large V2 model.", ) iface.launch(share=True)<jupyter_output><empty_output>
peft/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb/0
{ "file_path": "peft/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb", "repo_id": "peft", "token_count": 7675 }
151
<jupyter_start><jupyter_code>%env CUDA_VISIBLE_DEVICES=0 %env TOKENIZERS_PARALLELISM=false<jupyter_output>env: CUDA_VISIBLE_DEVICES=0 env: TOKENIZERS_PARALLELISM=false<jupyter_text>Initialize PolyModel<jupyter_code>import torch from transformers import ( AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, Seq2SeqTrainingArguments, Seq2SeqTrainer, ) from datasets import load_dataset, concatenate_datasets from peft import PolyConfig, get_peft_model, TaskType, PeftModel, PeftConfig model_name_or_path = "google/flan-t5-xl" r = 8 # rank of lora in poly n_tasks = 4 # number of tasks n_skills = 2 # number of skills (loras) n_splits = 4 # number of heads batch_size = 8 lr = 5e-5 num_epochs = 8 tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path, trust_remote_code=True) peft_config = PolyConfig( task_type=TaskType.SEQ_2_SEQ_LM, poly_type="poly", r=r, n_tasks=n_tasks, n_skills=n_skills, n_splits=n_splits, ) model = get_peft_model(base_model, peft_config) model.print_trainable_parameters()<jupyter_output>trainable params: 9,441,792 || all params: 2,859,198,976 || trainable%: 0.33022507629773296<jupyter_text>Prepare datasetsFor this example, we selected four `SuperGLUE` benchmark datasets: `boolq`, `multirc`, `rte`, and `wic`, each with a training set of 1,000 examples and an evaluation set of 100 examples.<jupyter_code># boolq boolq_dataset = ( load_dataset("super_glue", "boolq") .map( lambda x: { "input": f"{x['passage']}\nQuestion: {x['question']}\nA. Yes\nB. No\nAnswer:", # 0 - False # 1 - True "output": ["B", "A"][int(x["label"])], "task_name": "boolq", } ) .select_columns(["input", "output", "task_name"]) ) print("boolq example: ") print(boolq_dataset["train"][0]) # multirc multirc_dataset = ( load_dataset("super_glue", "multirc") .map( lambda x: { "input": ( f"{x['paragraph']}\nQuestion: {x['question']}\nAnswer: {x['answer']}\nIs it" " true?\nA. Yes\nB. No\nAnswer:" ), # 0 - False # 1 - True "output": ["B", "A"][int(x["label"])], "task_name": "multirc", } ) .select_columns(["input", "output", "task_name"]) ) print("multirc example: ") print(multirc_dataset["train"][0]) # rte rte_dataset = ( load_dataset("super_glue", "rte") .map( lambda x: { "input": ( f"{x['premise']}\n{x['hypothesis']}\nIs the sentence below entailed by the" " sentence above?\nA. Yes\nB. No\nAnswer:" ), # 0 - entailment # 1 - not_entailment "output": ["A", "B"][int(x["label"])], "task_name": "rte", } ) .select_columns(["input", "output", "task_name"]) ) print("rte example: ") print(rte_dataset["train"][0]) # wic wic_dataset = ( load_dataset("super_glue", "wic") .map( lambda x: { "input": ( f"Sentence 1: {x['sentence1']}\nSentence 2: {x['sentence2']}\nAre '{x['word']}'" " in the above two sentences the same?\nA. Yes\nB. No\nAnswer:" ), # 0 - False # 1 - True "output": ["B", "A"][int(x["label"])], "task_name": "wic", } ) .select_columns(["input", "output", "task_name"]) ) print("wic example: ") print(wic_dataset["train"][0]) # define a task2id map TASK2ID = { "boolq": 0, "multirc": 1, "rte": 2, "wic": 3, } def tokenize(examples): inputs, targets = examples["input"], examples["output"] features = tokenizer(inputs, max_length=512, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 features["labels"] = labels features["task_ids"] = torch.tensor([[TASK2ID[t]] for t in examples["task_name"]]).long() return features def get_superglue_dataset( split="train", n_samples=500, ): ds = concatenate_datasets( [ boolq_dataset[split].shuffle().select(range(n_samples)), multirc_dataset[split].shuffle().select(range(n_samples)), rte_dataset[split].shuffle().select(range(n_samples)), wic_dataset[split].shuffle().select(range(n_samples)), ] ) ds = ds.map( tokenize, batched=True, remove_columns=["input", "output", "task_name"], load_from_cache_file=False, ) return ds<jupyter_output><empty_output><jupyter_text>As a toy example, we only select 1,000 from each subdataset for training and 100 each for eval.<jupyter_code>superglue_train_dataset = get_superglue_dataset(split="train", n_samples=1000) superglue_eval_dataset = get_superglue_dataset(split="test", n_samples=100)<jupyter_output>Map: 100%|██████████| 4000/4000 [00:02<00:00, 1365.07 examples/s] Map: 100%|██████████| 400/400 [00:00<00:00, 548.46 examples/s]<jupyter_text>Train and evaluate<jupyter_code># training and evaluation def compute_metrics(eval_preds): preds, labels = eval_preds preds = [[i for i in seq if i != -100] for seq in preds] labels = [[i for i in seq if i != -100] for seq in labels] preds = tokenizer.batch_decode(preds, skip_special_tokens=True) labels = tokenizer.batch_decode(labels, skip_special_tokens=True) correct = 0 total = 0 for pred, true in zip(preds, labels): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total return {"accuracy": accuracy} training_args = Seq2SeqTrainingArguments( "output", per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, learning_rate=lr, num_train_epochs=num_epochs, evaluation_strategy="epoch", logging_strategy="epoch", save_strategy="no", report_to=[], predict_with_generate=True, generation_max_length=2, remove_unused_columns=False, ) trainer = Seq2SeqTrainer( model=model, tokenizer=tokenizer, args=training_args, train_dataset=superglue_train_dataset, eval_dataset=superglue_eval_dataset, data_collator=default_data_collator, compute_metrics=compute_metrics, ) trainer.train() # saving model model_name_or_path = "google/flan-t5-xl" peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id) !ls -lh $peft_model_id<jupyter_output>total 37M -rw-r--r-- 1 root root 374 12月 22 14:59 adapter_config.json -rw-r--r-- 1 root root 37M 12月 22 14:59 adapter_model.safetensors -rw-r--r-- 1 root root 5.0K 12月 22 14:58 README.md<jupyter_text>Load and infer<jupyter_code>device = "cuda:0" if torch.cuda.is_available() else "cpu" peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) model = model.to(device) model = model.eval() i = 5 inputs = tokenizer(rte_dataset["validation"]["input"][i], return_tensors="pt") inputs["task_ids"] = torch.LongTensor([TASK2ID["rte"]]) inputs = {k: v.to(device) for k, v in inputs.items()} print(rte_dataset["validation"]["input"][i]) print(rte_dataset["validation"]["output"][i]) print(inputs) with torch.no_grad(): outputs = model.generate(**inputs, max_new_tokens=2) print(outputs[0]) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])<jupyter_output>In 1979, the leaders signed the Egypt-Israel peace treaty on the White House lawn. Both President Begin and Sadat received the Nobel Peace Prize for their work. The two nations have enjoyed peaceful relations to this day. The Israel-Egypt Peace Agreement was signed in 1979. Is the sentence below entailed by the sentence above? A. Yes B. No Answer: A {'input_ids': tensor([[ 86, 15393, 6, 8, 2440, 3814, 8, 10438, 18, 30387, 3065, 2665, 63, 30, 8, 1945, 1384, 8652, 5, 2867, 1661, 10129, 77, 11, 18875, 144, 1204, 8, 22232, 11128, 11329, 21, 70, 161, 5, 37, 192, 9352, 43, 2994, 9257, 5836, 12, 48, 239, 5, 37, 3352, 18, 427, 122, 63, 102, 17, 11128, 7139, 47, 3814, 16, 15393, 5, 27, 7, 8, 7142, 666, 3, 295, 10990, 57, 8, 7142, 756, 58, 71, 5, 2163, 272, 5, 465, [...]
peft/examples/poly/peft_poly_seq2seq_with_generate.ipynb/0
{ "file_path": "peft/examples/poly/peft_poly_seq2seq_with_generate.ipynb", "repo_id": "peft", "token_count": 4104 }
152
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a minimal example of launching PEFT with Accelerate. This used to cause issues because PEFT would eagerly # import bitsandbytes, which initializes CUDA, resulting in: # > RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the # > 'spawn' start method # This script exists to ensure that this issue does not reoccur. import torch import peft from accelerate import notebook_launcher def init(): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(1, 2) def forward(self, x): return self.linear(x) model = MyModule().to("cuda") peft.get_peft_model(model, peft.LoraConfig(target_modules=["linear"])) def main(): notebook_launcher(init, (), num_processes=2) if __name__ == "__main__": main()
peft/scripts/launch_notebook_mp.py/0
{ "file_path": "peft/scripts/launch_notebook_mp.py", "repo_id": "peft", "token_count": 480 }
153
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from peft.tuners.lora import LoraConfig from peft.utils import PeftType @dataclass class AdaLoraConfig(LoraConfig): """ This is the configuration class to store the configuration of a [`~peft.AdaLora`]. Args: target_r (`int`): The target average rank of incremental matrix. init_r (`int`): The initial rank for each incremental matrix. tinit (`int`): The steps of initial fine-tuning warmup. tfinal (`int`): The step of final fine-tuning. deltaT (`int`): The time internval between two budget allocations. beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing. beta2 (`float`): The hyperparameter of EMA for undertainty quantification. orth_reg_weight (`float`): The coefficient of orthogonal regularization. total_step (`int`): The total training steps that should be specified before training. rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator. """ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."}) init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."}) tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."}) tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."}) deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."}) beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."}) total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."}) rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."}) def __post_init__(self): self.peft_type = PeftType.ADALORA
peft/src/peft/tuners/adalora/config.py/0
{ "file_path": "peft/src/peft/tuners/adalora/config.py", "repo_id": "peft", "token_count": 868 }
154
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any, Set, Tuple import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer class LoHaLayer(nn.Module, LycorisLayer): # All names of layers that may contain adapter weights adapter_layer_names = ("hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b", "hada_t1", "hada_t2") # other_param_names is defined on parent class def __init__(self, base_layer: nn.Module): super().__init__() LycorisLayer.__init__(self, base_layer) # LoHa info self.hada_w1_a = nn.ParameterDict({}) self.hada_w1_b = nn.ParameterDict({}) self.hada_w2_a = nn.ParameterDict({}) self.hada_w2_b = nn.ParameterDict({}) self.hada_t1 = nn.ParameterDict({}) self.hada_t2 = nn.ParameterDict({}) @property def _available_adapters(self) -> Set[str]: return {*self.hada_w1_a, *self.hada_w1_b, *self.hada_w2_a, *self.hada_w2_b, *self.hada_t1, *self.hada_t2} def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...]): # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L130C9-L143C75 if len(shape) == 4: self.hada_t1[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode self.hada_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode else: self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r)) self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r)) self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) def reset_adapter_parameters(self, adapter_name: str): # Original implementation performs initialization with normal distribution # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158 # FedPara paper proposes to perform He initialization, let's stick with it # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization if adapter_name in self.hada_w1_a.keys(): nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5)) nn.init.zeros_(self.hada_w2_b[adapter_name]) if adapter_name in self.hada_t1.keys(): nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5)) def reset_adapter_parameters_random(self, adapter_name: str): # Original implementation performs initialization with normal distribution # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158 # FedPara paper proposes to perform He initialization, let's stick with it # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization if adapter_name in self.hada_w1_a.keys(): nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.hada_t1.keys(): nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5)) def update_layer( self, adapter_name: str, r: int, alpha: float, rank_dropout: float, module_dropout: float, init_weights: bool, use_effective_conv2d: bool = False, **kwargs, ) -> None: """Internal function to create loha adapter Args: adapter_name (`str`): Name for the adapter to add. r (`int`): Rank for the added adapter. alpha (`float`): Alpha for the added adapter. rank_dropout (`float`): The dropout probability for rank dimension during training. module_dropout (`float`): The dropout probability for disabling adapter during training. init_weights (`bool`): Whether to initialize weights. use_effective_conv2d (`bool`, *optional*, defaults to `False`): Use parameter effective decomposition for Conv2d with ksize > 1. """ if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.alpha[adapter_name] = alpha self.scaling[adapter_name] = alpha / r self.rank_dropout[adapter_name] = rank_dropout self.module_dropout[adapter_name] = module_dropout # Determine shape of LoHa weights base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): shape = tuple(base_layer.weight.shape) elif isinstance(base_layer, nn.Conv2d): use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1) if use_effective_conv2d: shape = (base_layer.out_channels, base_layer.in_channels, *base_layer.kernel_size) else: shape = ( base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], ) else: raise TypeError(f"LoHa is not implemented for base layers of type {type(base_layer).__name__}") # Create weights with provided shape self.create_adapter_parameters(adapter_name, r, shape) # Initialize weights if init_weights: self.reset_adapter_parameters(adapter_name) else: self.reset_adapter_parameters_random(adapter_name) # Move new weights to device weight = getattr(self.get_base_layer(), "weight", None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) self.set_adapter(self.active_adapters) def get_delta_weight(self, adapter_name: str) -> torch.Tensor: # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L178 if adapter_name in self.hada_t1.keys(): weight = make_weight_cp( self.hada_t1[adapter_name], self.hada_w1_a[adapter_name], self.hada_w1_b[adapter_name], self.hada_t2[adapter_name], self.hada_w2_a[adapter_name], self.hada_w2_b[adapter_name], scale=torch.tensor(self.scaling[adapter_name]), ) else: weight = make_weight( self.hada_w1_a[adapter_name], self.hada_w1_b[adapter_name], self.hada_w2_a[adapter_name], self.hada_w2_b[adapter_name], scale=torch.tensor(self.scaling[adapter_name]), ) base_layer = self.get_base_layer() weight = weight.reshape(base_layer.weight.shape) # Perform rank dropout during training - drop rows of addition weights rank_dropout = self.rank_dropout[adapter_name] if self.training and rank_dropout: drop = (torch.rand(weight.size(0)) > rank_dropout).to(weight.dtype) drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device) # TODO: Investigate if there should be a scaler like in normal dropout during training # Original implementation doesn't have it # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L193 drop /= drop.mean() weight *= drop return weight def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) # Execute all the adapters for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue module_dropout = self.module_dropout[active_adapter] # Modify current execution weights if (not self.training) or (self.training and torch.rand(1) > module_dropout): result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs) result = result.to(previous_dtype) return result class Linear(LoHaLayer): """LoHa implemented in Linear layer""" def __init__( self, base_layer: nn.Module, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) # don't add bias here, because the bias is already included in the output of the base_layer return F.linear(input, delta_weight) def __repr__(self) -> str: rep = super().__repr__() return "loha." + rep class Conv2d(LoHaLayer): """LoHa implemented in Conv2d layer""" def __init__( self, base_layer: nn.Module, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, use_effective_conv2d: bool = False, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer( adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs ) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) # don't add bias here, because the bias is already included in the output of the base_layer base_layer = self.get_base_layer() return F.conv2d( input, delta_weight, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups, ) def __repr__(self) -> str: rep = super().__repr__() return "loha." + rep # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L9 class HadaWeight(torch.autograd.Function): @staticmethod def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)): ctx.save_for_backward(w1a, w1b, w2a, w2b, scale) diff_weight = ((w1a @ w1b) * (w2a @ w2b)) * scale return diff_weight @staticmethod def backward(ctx, grad_out): (w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors grad_out = grad_out * scale temp = grad_out * (w2a @ w2b) grad_w1a = temp @ w1b.T grad_w1b = w1a.T @ temp temp = grad_out * (w1a @ w1b) grad_w2a = temp @ w2b.T grad_w2b = w2a.T @ temp del temp return grad_w1a, grad_w1b, grad_w2a, grad_w2b, None class HadaWeightCP(torch.autograd.Function): @staticmethod def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)): ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale) rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", t1, w1b, w1a) rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", t2, w2b, w2a) return rebuild1 * rebuild2 * scale @staticmethod def backward(ctx, grad_out): (t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors grad_out = grad_out * scale temp = torch.einsum("i j k l, j r -> i r k l", t2, w2b) rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w2a) grad_w = rebuild * grad_out del rebuild grad_w1a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w1a.T) del grad_w, temp grad_w1b = torch.einsum("i r k l, i j k l -> r j", t1, grad_temp) grad_t1 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w1b.T) del grad_temp temp = torch.einsum("i j k l, j r -> i r k l", t1, w1b) rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w1a) grad_w = rebuild * grad_out del rebuild grad_w2a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w2a.T) del grad_w, temp grad_w2b = torch.einsum("i r k l, i j k l -> r j", t2, grad_temp) grad_t2 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w2b.T) del grad_temp return grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None def make_weight(w1a, w1b, w2a, w2b, scale): return HadaWeight.apply(w1a, w1b, w2a, w2b, scale) def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale): return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale)
peft/src/peft/tuners/loha/layer.py/0
{ "file_path": "peft/src/peft/tuners/loha/layer.py", "repo_id": "peft", "token_count": 7478 }
155
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from peft.config import PromptLearningConfig from peft.utils import PeftType @dataclass class PrefixTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PrefixEncoder`]. Args: encoder_hidden_size (`int`): The hidden size of the prompt encoder. prefix_projection (`bool`): Whether to project the prefix embeddings. """ encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the encoder"}, ) prefix_projection: bool = field( default=False, metadata={"help": "Whether to project the prefix tokens"}, ) def __post_init__(self): self.peft_type = PeftType.PREFIX_TUNING
peft/src/peft/tuners/prefix_tuning/config.py/0
{ "file_path": "peft/src/peft/tuners/prefix_tuning/config.py", "repo_id": "peft", "token_count": 454 }
156
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from peft import ( AutoPeftModel, AutoPeftModelForCausalLM, AutoPeftModelForFeatureExtraction, AutoPeftModelForQuestionAnswering, AutoPeftModelForSeq2SeqLM, AutoPeftModelForSequenceClassification, AutoPeftModelForTokenClassification, PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) class PeftAutoModelTester(unittest.TestCase): def test_peft_causal_lm(self): model_id = "peft-internal-testing/tiny-OPTForCausalLM-lora" model = AutoPeftModelForCausalLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForCausalLM)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForCausalLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForCausalLM)) # check if kwargs are passed correctly model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForCausalLM)) self.assertTrue(model.base_model.lm_head.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16) def test_peft_causal_lm_extended_vocab(self): model_id = "peft-internal-testing/tiny-random-OPTForCausalLM-extended-vocab" model = AutoPeftModelForCausalLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForCausalLM)) # check if kwargs are passed correctly model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForCausalLM)) self.assertTrue(model.base_model.lm_head.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16) def test_peft_seq2seq_lm(self): model_id = "peft-internal-testing/tiny_T5ForSeq2SeqLM-lora" model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) # check if kwargs are passed correctly model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) self.assertTrue(model.base_model.lm_head.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16) def test_peft_sequence_cls(self): model_id = "peft-internal-testing/tiny_OPTForSequenceClassification-lora" model = AutoPeftModelForSequenceClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForSequenceClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) # check if kwargs are passed correctly model = AutoPeftModelForSequenceClassification.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) self.assertTrue(model.score.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForSequenceClassification.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_token_classification(self): model_id = "peft-internal-testing/tiny_GPT2ForTokenClassification-lora" model = AutoPeftModelForTokenClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForTokenClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) # check if kwargs are passed correctly model = AutoPeftModelForTokenClassification.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) self.assertTrue(model.base_model.classifier.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForTokenClassification.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_question_answering(self): model_id = "peft-internal-testing/tiny_OPTForQuestionAnswering-lora" model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) # check if kwargs are passed correctly model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) self.assertTrue(model.base_model.qa_outputs.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForQuestionAnswering.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_feature_extraction(self): model_id = "peft-internal-testing/tiny_OPTForFeatureExtraction-lora" model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) # check if kwargs are passed correctly model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) self.assertTrue(model.base_model.model.decoder.embed_tokens.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForFeatureExtraction.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_whisper(self): model_id = "peft-internal-testing/tiny_WhisperForConditionalGeneration-lora" model = AutoPeftModel.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModel)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModel.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModel)) # check if kwargs are passed correctly model = AutoPeftModel.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue(model.base_model.model.model.encoder.embed_positions.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModel.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16)
peft/tests/test_auto.py/0
{ "file_path": "peft/tests/test_auto.py", "repo_id": "peft", "token_count": 3750 }
157
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from copy import deepcopy from diffusers import StableDiffusionPipeline from parameterized import parameterized from torch import nn from transformers import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM from peft import IA3Config, LoHaConfig, LoraConfig, get_peft_model from peft.tuners.tuners_utils import ( _maybe_include_all_linear_layers, check_target_module_exists, inspect_matched_modules, ) from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND from .testing_utils import require_bitsandbytes, require_torch_gpu # Implements tests for regex matching logic common for all BaseTuner subclasses, and # tests for correct behaviour with different config kwargs for BaseTuners (Ex: feedforward for IA3, etc) and # tests for utlity function to include all linear layers REGEX_TEST_CASES = [ # tuple of # 1. key # 2. target_modules # 3. layers_to_transform # 4. layers_pattern # 5. expected result # some basic examples ("", [], None, None, False), ("", ["foo"], None, None, False), ("foo", [], None, None, False), ("foo", ["foo"], None, None, True), ("foo", ["bar"], None, None, False), ("foo", ["foo", "bar"], None, None, True), # with regex ("foo", "foo", None, None, True), ("foo", ".*oo", None, None, True), ("foo", "fo.*", None, None, True), ("foo", ".*bar.*", None, None, False), ("foobar", ".*oba.*", None, None, True), # with layers_to_transform ("foo.bar.1.baz", ["baz"], [1], ["bar"], True), ("foo.bar.1.baz", ["baz"], [0], ["bar"], False), ("foo.bar.1.baz", ["baz"], [2], ["bar"], False), ("foo.bar.10.baz", ["baz"], [0], ["bar"], False), ("foo.bar.10.baz", ["baz"], [1], ["bar"], False), ("foo.bar.1.baz", ["baz"], [0, 1, 2], ["bar"], True), ("foo.bar.1.baz", ["baz", "spam"], [1], ["bar"], True), ("foo.bar.1.baz", ["baz", "spam"], [0, 1, 2], ["bar"], True), # empty layers_to_transform ("foo.bar.7.baz", ["baz"], [], ["bar"], True), ("foo.bar.7.baz", ["baz"], None, ["bar"], True), # empty layers_pattern ("foo.whatever.1.baz", ["baz"], [1], [], True), ("foo.whatever.1.baz", ["baz"], [0], [], False), ("foo.whatever.1.baz", ["baz"], [1], "", True), ("foo.whatever.1.baz", ["baz"], [0], "", False), ("foo.whatever.1.baz", ["baz"], [1], None, True), ("foo.whatever.1.baz", ["baz"], [0], None, False), # some realistic examples: transformers model ("transformer.h.1.attn.attention.q_proj.foo", ["q_proj"], None, [], False), ("transformer.h.1.attn.attention.q_proj", [], None, [], False), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], None, [], True), ("transformer.h.1.attn.attention.q_proj", ["q_proj", "v_proj"], None, [], True), ("transformer.h.1.attn.attention.resid_dropout", ["q_proj", "v_proj"], None, [], False), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [1], ["h"], True), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [0], ["h"], False), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [2], ["h"], False), ("transformer.h.1.attn.attention.q_proj", ["q_proj"], [0, 1, 2], ["h"], True), ("transformer.h.1.attn.attention.q_proj", ["q_proj", "v_proj"], [0, 1, 2], ["h"], True), ("foo.bar.q_proj", ["q_proj"], None, [], True), ("foo.bar.1.baz", ["baz"], [1], ["foo"], False), # other corner cases. For ex, below is a case where layers_pattern # is one of the target nn.modules ("foo.bar.1.baz", ["baz"], [1], ["baz"], False), # here, layers_pattern is 'bar', but only keys that contain '.bar' are valid. ("bar.1.baz", ["baz"], [1], ["bar"], False), ("foo.bar.001.baz", ["baz"], [1], ["bar"], True), ("foo.bar.1.spam.2.baz", ["baz"], [1], ["bar"], True), ("foo.bar.2.spam.1.baz", ["baz"], [1], ["bar"], False), # some realistic examples: module using nn.Sequential # for the below test case, key should contain '.blocks' to be valid, because of how layers_pattern is matched ("blocks.1.weight", ["weight"], [1], ["blocks"], False), ("blocks.1.bias", ["weight"], [1], ["blocks"], False), ("mlp.blocks.1.weight", ["weight"], [1], ["blocks"], True), ("mlp.blocks.1.bias", ["weight"], [1], ["blocks"], False), ] MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES = [ # model_name, model_type, initial_target_modules, expected_target_modules # test for a causal Llama model ( "HuggingFaceH4/tiny-random-LlamaForCausalLM", "causal", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"], ), # test for a Llama model without the LM head ( "HuggingFaceH4/tiny-random-LlamaForCausalLM", "base", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"], ), # test for gpt2 with Conv1D layers ("hf-internal-testing/tiny-random-gpt2", "causal", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["c_attn", "c_proj", "c_fc"]), # test for T5 model ( "hf-internal-testing/tiny-random-t5", "seq2seq", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["k", "q", "v", "o", "wi", "wo"], ), # test for GPTNeoX. output module list should exclude classification head - which is named as "embed_out" instead of the usual "lm_head" for GPTNeoX ( "hf-internal-testing/tiny-random-GPTNeoXForCausalLM", "causal", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"], ), ] # tests for a few args that should remain unchanged MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_INTERNALS = [ # initial_target_modules, expected_target_modules (["k_proj"], ["k_proj"]), # test with target_modules as None (None, None), # test with target_modules as a regex expression (".*(q_proj|v_proj)$", ".*(q_proj|v_proj)$"), ] BNB_QUANTIZATIONS = [("4bit",), ("8bit",)] BNB_TEST_CASES = [(x + y) for x in MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES for y in BNB_QUANTIZATIONS] class PeftCustomKwargsTester(unittest.TestCase): r""" Test if the PeftModel is instantiated with correct behaviour for custom kwargs. This includes: - test if regex matching works correctly - test if adapters handle custom kwargs the right way e.g. IA3 for `feedforward_modules` """ transformers_class_map = {"causal": AutoModelForCausalLM, "seq2seq": AutoModelForSeq2SeqLM, "base": AutoModel} @parameterized.expand(REGEX_TEST_CASES) def test_regex_matching_valid(self, key, target_modules, layers_to_transform, layers_pattern, expected_result): # We use a LoRA Config for testing, but the regex matching function is common for all BaseTuner subclasses. # example model_id for config initialization. key is matched only against the target_modules given, so this can be any model model_id = "peft-internal-testing/tiny-OPTForCausalLM-lora" config = LoraConfig( base_model_name_or_path=model_id, target_modules=target_modules, layers_pattern=layers_pattern, layers_to_transform=layers_to_transform, ) actual_result = bool(check_target_module_exists(config, key)) self.assertEqual(actual_result, expected_result) def test_module_matching_lora(self): # peft models that have a module matching method to inspect the matching modules to allow # users to easily debug their configuration. Here we only test a single case, not all possible combinations of # configs that could exist. This is okay as the method calls `check_target_module_exists` internally, which # has been extensively tested above. model_id = "hf-internal-testing/tiny-random-BloomForCausalLM" model = AutoModel.from_pretrained(model_id) # by default, this model matches query_key_value config = LoraConfig() peft_model = get_peft_model(model, config) output = inspect_matched_modules(peft_model) # inspects default adapter for peft_model matched = output["matched"] expected = [ "h.0.self_attention.query_key_value", "h.1.self_attention.query_key_value", "h.2.self_attention.query_key_value", "h.3.self_attention.query_key_value", "h.4.self_attention.query_key_value", ] self.assertEqual(matched, expected) # module lists should match exactly # no overlap with matched modules unmatched = output["unmatched"] for key in expected: self.assertFalse(key in unmatched) def test_feedforward_matching_ia3(self): model_id = "hf-internal-testing/tiny-random-T5ForConditionalGeneration" model = AutoModelForSeq2SeqLM.from_pretrained(model_id) # simple example for just one t5 block for testing config_kwargs = { "target_modules": ".*encoder.*block.0.*(SelfAttention|EncDecAttention|DenseReluDense).(k|q|v|wo|wi)$", "feedforward_modules": ["wo", "wi"], } config = IA3Config(base_model_name_or_path=model_id, **config_kwargs) peft_model = get_peft_model(model, config) output = inspect_matched_modules(peft_model) # inspects default adapter for peft_model matched = output["matched"] expected = [ "encoder.block.0.layer.0.SelfAttention.q", "encoder.block.0.layer.0.SelfAttention.k", "encoder.block.0.layer.0.SelfAttention.v", "encoder.block.0.layer.1.DenseReluDense.wi", "encoder.block.0.layer.1.DenseReluDense.wo", ] expected_feedforward = [ "encoder.block.0.layer.1.DenseReluDense.wi", "encoder.block.0.layer.1.DenseReluDense.wo", ] self.assertEqual(matched, expected) # not required since we do similar checks above, but just to be sure module_dict = dict(model.named_modules()) for key in matched: module = module_dict[key] if key in expected_feedforward: self.assertTrue(module.is_feedforward) else: # other IA3 modules should not be marked as feedforward self.assertFalse(module.is_feedforward) @parameterized.expand(MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_CASES) def test_maybe_include_all_linear_layers_lora( self, model_id, model_type, initial_target_modules, expected_target_modules ): model = self.transformers_class_map[model_type].from_pretrained(model_id) config_cls = LoraConfig self._check_match_with_expected_target_modules( model_id, model, config_cls, initial_target_modules, expected_target_modules ) @parameterized.expand(BNB_TEST_CASES) @require_torch_gpu @require_bitsandbytes def test_maybe_include_all_linear_layers_lora_bnb( self, model_id, model_type, initial_target_modules, expected_target_modules, quantization ): if quantization == "4bit": config_kwargs = {"load_in_4bit": True} elif quantization == "8bit": config_kwargs = {"load_in_8bit": True} model = self.transformers_class_map[model_type].from_pretrained(model_id, device_map="auto", **config_kwargs) config_cls = LoraConfig self._check_match_with_expected_target_modules( model_id, model, config_cls, initial_target_modules, expected_target_modules ) def _check_match_with_expected_target_modules( self, model_id, model, config_cls, initial_target_modules, expected_target_modules ): """ Helper function for the test for `_maybe_include_all_linear_layers` """ actual_config = config_cls(base_model_name_or_path=model_id, target_modules=initial_target_modules) expected_config = config_cls(base_model_name_or_path=model_id, target_modules=expected_target_modules) model_copy = deepcopy(model) actual_model = get_peft_model(model, peft_config=actual_config) expected_model = get_peft_model(model_copy, peft_config=expected_config) expected_model_module_dict = dict(expected_model.named_modules()) # compare the two models and assert that all layers are of the same type for name, actual_module in actual_model.named_modules(): expected_module = expected_model_module_dict[name] self.assertEqual(type(actual_module), type(expected_module)) def test_maybe_include_all_linear_layers_ia3_loha(self): model_id, initial_target_modules, expected_target_modules = ( "HuggingFaceH4/tiny-random-LlamaForCausalLM", INCLUDE_LINEAR_LAYERS_SHORTHAND, ["k_proj", "v_proj", "q_proj", "o_proj", "down_proj", "up_proj", "gate_proj"], ) model_ia3 = AutoModelForCausalLM.from_pretrained(model_id) model_loha = deepcopy(model_ia3) config_classes = [IA3Config, LoHaConfig] models = [model_ia3, model_loha] for config_cls, model in zip(config_classes, models): self._check_match_with_expected_target_modules( model_id, model, config_cls, initial_target_modules, expected_target_modules ) @parameterized.expand(MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_INTERNALS) def test_maybe_include_all_linear_layers_internals(self, initial_target_modules, expected_target_modules): model_id = "HuggingFaceH4/tiny-random-LlamaForCausalLM" model = AutoModelForCausalLM.from_pretrained(model_id) config = LoraConfig(base_model_name_or_path=model_id, target_modules=initial_target_modules) new_config = _maybe_include_all_linear_layers(config, model) if isinstance(expected_target_modules, list): # assert that expected and actual target_modules have the same items self.assertCountEqual(new_config.target_modules, expected_target_modules) else: self.assertEqual(new_config.target_modules, expected_target_modules) def test_maybe_include_all_linear_layers_diffusion(self): model_id = "hf-internal-testing/tiny-stable-diffusion-torch" model = StableDiffusionPipeline.from_pretrained(model_id) config = LoraConfig(base_model_name_or_path=model_id, target_modules="all-linear") with self.assertRaisesRegex( ValueError, "Only instances of PreTrainedModel support `target_modules='all-linear'`", ): model.unet = get_peft_model(model.unet, config) class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.drop = nn.Dropout(0.5) self.lin1 = nn.Linear(20, 2, bias=bias) self.sm = nn.LogSoftmax(dim=-1) class TestTargetedModuleNames(unittest.TestCase): """Check that the attribute targeted_module_names is correctly set. This checks LoRA and IA³, but this should be sufficient, testing all other tuners is not necessary. """ def test_one_targeted_module_regex(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules="lin0")) self.assertEqual(model.targeted_module_names, ["lin0"]) def test_two_targeted_module_regex(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules="lin.*")) self.assertEqual(model.targeted_module_names, ["lin0", "lin1"]) def test_one_targeted_module_list(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules=["lin0"])) self.assertEqual(model.targeted_module_names, ["lin0"]) def test_two_targeted_module_list(self): model = MLP() model = get_peft_model(model, LoraConfig(target_modules=["lin0", "lin1"])) self.assertEqual(model.targeted_module_names, ["lin0", "lin1"]) def test_ia3_targeted_module_regex(self): model = MLP() model = get_peft_model(model, IA3Config(target_modules=".*lin.*", feedforward_modules=".*lin.*")) self.assertEqual(model.targeted_module_names, ["lin0", "lin1"]) def test_ia3_targeted_module_list(self): model = MLP() model = get_peft_model(model, IA3Config(target_modules=["lin0", "lin1"], feedforward_modules=["lin0", "lin1"])) self.assertEqual(model.targeted_module_names, ["lin0", "lin1"]) def test_realistic_example(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-BloomForCausalLM") config = LoraConfig(task_type="CAUSAL_LM") model = get_peft_model(model, config) expected = [ f"transformer.h.{i}.self_attention.query_key_value" for i in range(len(model.base_model.transformer.h)) ] self.assertEqual(model.targeted_module_names, expected)
peft/tests/test_tuners_utils.py/0
{ "file_path": "peft/tests/test_tuners_utils.py", "repo_id": "peft", "token_count": 7395 }
158
#!/bin/bash NUM_PROC=$1 shift torchrun --nproc_per_node=$NUM_PROC train.py "$@"
pytorch-image-models/distributed_train.sh/0
{ "file_path": "pytorch-image-models/distributed_train.sh", "repo_id": "pytorch-image-models", "token_count": 37 }
159
# DenseNet **DenseNet** is a type of convolutional neural network that utilises dense connections between layers, through [Dense Blocks](http://www.paperswithcode.com/method/dense-block), where we connect *all layers* (with matching feature-map sizes) directly with each other. To preserve the feed-forward nature, each layer obtains additional inputs from all preceding layers and passes on its own feature-maps to all subsequent layers. The **DenseNet Blur** variant in this collection by Ross Wightman employs [Blur Pooling](http://www.paperswithcode.com/method/blur-pooling) {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/HuangLW16a, author = {Gao Huang and Zhuang Liu and Kilian Q. Weinberger}, title = {Densely Connected Convolutional Networks}, journal = {CoRR}, volume = {abs/1608.06993}, year = {2016}, url = {http://arxiv.org/abs/1608.06993}, archivePrefix = {arXiv}, eprint = {1608.06993}, timestamp = {Mon, 10 Sep 2018 15:49:32 +0200}, biburl = {https://dblp.org/rec/journals/corr/HuangLW16a.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` ``` @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} } ``` <!-- Type: model-index Collections: - Name: DenseNet Paper: Title: Densely Connected Convolutional Networks URL: https://paperswithcode.com/paper/densely-connected-convolutional-networks Models: - Name: densenet121 In Collection: DenseNet Metadata: FLOPs: 3641843200 Parameters: 7980000 File Size: 32376726 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Kaiming Initialization - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet ID: densenet121 LR: 0.1 Epochs: 90 Layers: 121 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L295 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.56% Top 5 Accuracy: 92.65% - Name: densenet161 In Collection: DenseNet Metadata: FLOPs: 9931959264 Parameters: 28680000 File Size: 115730790 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Kaiming Initialization - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet ID: densenet161 LR: 0.1 Epochs: 90 Layers: 161 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L347 Weights: https://download.pytorch.org/models/densenet161-8d451a50.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.36% Top 5 Accuracy: 93.63% - Name: densenet169 In Collection: DenseNet Metadata: FLOPs: 4316945792 Parameters: 14150000 File Size: 57365526 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Kaiming Initialization - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet ID: densenet169 LR: 0.1 Epochs: 90 Layers: 169 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L327 Weights: https://download.pytorch.org/models/densenet169-b2777c0a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.9% Top 5 Accuracy: 93.02% - Name: densenet201 In Collection: DenseNet Metadata: FLOPs: 5514321024 Parameters: 20010000 File Size: 81131730 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - Kaiming Initialization - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet ID: densenet201 LR: 0.1 Epochs: 90 Layers: 201 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L337 Weights: https://download.pytorch.org/models/densenet201-c1103571.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.29% Top 5 Accuracy: 93.48% - Name: densenetblur121d In Collection: DenseNet Metadata: FLOPs: 3947812864 Parameters: 8000000 File Size: 32456500 Architecture: - 1x1 Convolution - Batch Normalization - Blur Pooling - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: densenetblur121d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L305 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.59% Top 5 Accuracy: 93.2% - Name: tv_densenet121 In Collection: DenseNet Metadata: FLOPs: 3641843200 Parameters: 7980000 File Size: 32342954 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Block - Dense Connections - Dropout - Max Pooling - ReLU - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_densenet121 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/densenet.py#L379 Weights: https://download.pytorch.org/models/densenet121-a639ec97.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.74% Top 5 Accuracy: 92.15% -->
pytorch-image-models/docs/models/.templates/models/densenet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/densenet.md", "repo_id": "pytorch-image-models", "token_count": 3382 }
160
# Instagram ResNeXt WSL A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. This model was trained on billions of Instagram images using thousands of distinct hashtags as labels exhibit excellent transfer learning performance. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{mahajan2018exploring, title={Exploring the Limits of Weakly Supervised Pretraining}, author={Dhruv Mahajan and Ross Girshick and Vignesh Ramanathan and Kaiming He and Manohar Paluri and Yixuan Li and Ashwin Bharambe and Laurens van der Maaten}, year={2018}, eprint={1805.00932}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: IG ResNeXt Paper: Title: Exploring the Limits of Weakly Supervised Pretraining URL: https://paperswithcode.com/paper/exploring-the-limits-of-weakly-supervised Models: - Name: ig_resnext101_32x16d In Collection: IG ResNeXt Metadata: FLOPs: 46623691776 Parameters: 194030000 File Size: 777518664 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - IG-3.5B-17k - ImageNet Training Resources: 336x GPUs ID: ig_resnext101_32x16d Epochs: 100 Layers: 101 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8064 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L874 Weights: https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.16% Top 5 Accuracy: 97.19% - Name: ig_resnext101_32x32d In Collection: IG ResNeXt Metadata: FLOPs: 112225170432 Parameters: 468530000 File Size: 1876573776 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - IG-3.5B-17k - ImageNet Training Resources: 336x GPUs ID: ig_resnext101_32x32d Epochs: 100 Layers: 101 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8064 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Minibatch Size: 8064 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L885 Weights: https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.09% Top 5 Accuracy: 97.44% - Name: ig_resnext101_32x48d In Collection: IG ResNeXt Metadata: FLOPs: 197446554624 Parameters: 828410000 File Size: 3317136976 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - IG-3.5B-17k - ImageNet Training Resources: 336x GPUs ID: ig_resnext101_32x48d Epochs: 100 Layers: 101 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8064 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L896 Weights: https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.42% Top 5 Accuracy: 97.58% - Name: ig_resnext101_32x8d In Collection: IG ResNeXt Metadata: FLOPs: 21180417024 Parameters: 88790000 File Size: 356056638 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - IG-3.5B-17k - ImageNet Training Resources: 336x GPUs ID: ig_resnext101_32x8d Epochs: 100 Layers: 101 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 8064 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L863 Weights: https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.7% Top 5 Accuracy: 96.64% -->
pytorch-image-models/docs/models/.templates/models/ig-resnext.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/ig-resnext.md", "repo_id": "pytorch-image-models", "token_count": 2409 }
161
# SWSL ResNeXt A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-00546, author = {I. Zeki Yalniz and Herv{\'{e}} J{\'{e}}gou and Kan Chen and Manohar Paluri and Dhruv Mahajan}, title = {Billion-scale semi-supervised learning for image classification}, journal = {CoRR}, volume = {abs/1905.00546}, year = {2019}, url = {http://arxiv.org/abs/1905.00546}, archivePrefix = {arXiv}, eprint = {1905.00546}, timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: SWSL ResNext Paper: Title: Billion-scale semi-supervised learning for image classification URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for Models: - Name: swsl_resnext101_32x16d In Collection: SWSL ResNext Metadata: FLOPs: 46623691776 Parameters: 194030000 File Size: 777518664 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x16d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L1009 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.34% Top 5 Accuracy: 96.84% - Name: swsl_resnext101_32x4d In Collection: SWSL ResNext Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177341913 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x4d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L987 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.22% Top 5 Accuracy: 96.77% - Name: swsl_resnext101_32x8d In Collection: SWSL ResNext Metadata: FLOPs: 21180417024 Parameters: 88790000 File Size: 356056638 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x8d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L998 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.27% Top 5 Accuracy: 97.17% - Name: swsl_resnext50_32x4d In Collection: SWSL ResNext Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100428550 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext50_32x4d LR: 0.0015 Epochs: 30 Layers: 50 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L976 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.17% Top 5 Accuracy: 96.23% -->
pytorch-image-models/docs/models/.templates/models/swsl-resnext.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/swsl-resnext.md", "repo_id": "pytorch-image-models", "token_count": 2646 }
162
- sections: - local: index title: Home - local: quickstart title: Quickstart - local: installation title: Installation title: Get started - sections: - local: feature_extraction title: Using Pretrained Models as Feature Extractors - local: training_script title: Training With The Official Training Script - local: hf_hub title: Share and Load Models from the 🤗 Hugging Face Hub title: Tutorials - sections: - local: models title: Model Summaries - local: results title: Results - local: models/adversarial-inception-v3 title: Adversarial Inception v3 - local: models/advprop title: AdvProp (EfficientNet) - local: models/big-transfer title: Big Transfer (BiT) - local: models/csp-darknet title: CSP-DarkNet - local: models/csp-resnet title: CSP-ResNet - local: models/csp-resnext title: CSP-ResNeXt - local: models/densenet title: DenseNet - local: models/dla title: Deep Layer Aggregation - local: models/dpn title: Dual Path Network (DPN) - local: models/ecaresnet title: ECA-ResNet - local: models/efficientnet title: EfficientNet - local: models/efficientnet-pruned title: EfficientNet (Knapsack Pruned) - local: models/ensemble-adversarial title: Ensemble Adversarial Inception ResNet v2 - local: models/ese-vovnet title: ESE-VoVNet - local: models/fbnet title: FBNet - local: models/gloun-inception-v3 title: (Gluon) Inception v3 - local: models/gloun-resnet title: (Gluon) ResNet - local: models/gloun-resnext title: (Gluon) ResNeXt - local: models/gloun-senet title: (Gluon) SENet - local: models/gloun-seresnext title: (Gluon) SE-ResNeXt - local: models/gloun-xception title: (Gluon) Xception - local: models/hrnet title: HRNet - local: models/ig-resnext title: Instagram ResNeXt WSL - local: models/inception-resnet-v2 title: Inception ResNet v2 - local: models/inception-v3 title: Inception v3 - local: models/inception-v4 title: Inception v4 - local: models/legacy-se-resnet title: (Legacy) SE-ResNet - local: models/legacy-se-resnext title: (Legacy) SE-ResNeXt - local: models/legacy-senet title: (Legacy) SENet - local: models/mixnet title: MixNet - local: models/mnasnet title: MnasNet - local: models/mobilenet-v2 title: MobileNet v2 - local: models/mobilenet-v3 title: MobileNet v3 - local: models/nasnet title: NASNet - local: models/noisy-student title: Noisy Student (EfficientNet) - local: models/pnasnet title: PNASNet - local: models/regnetx title: RegNetX - local: models/regnety title: RegNetY - local: models/res2net title: Res2Net - local: models/res2next title: Res2NeXt - local: models/resnest title: ResNeSt - local: models/resnet title: ResNet - local: models/resnet-d title: ResNet-D - local: models/resnext title: ResNeXt - local: models/rexnet title: RexNet - local: models/se-resnet title: SE-ResNet - local: models/selecsls title: SelecSLS - local: models/seresnext title: SE-ResNeXt - local: models/skresnet title: SK-ResNet - local: models/skresnext title: SK-ResNeXt - local: models/spnasnet title: SPNASNet - local: models/ssl-resnet title: SSL ResNet - local: models/swsl-resnet title: SWSL ResNet - local: models/swsl-resnext title: SWSL ResNeXt - local: models/tf-efficientnet title: (Tensorflow) EfficientNet - local: models/tf-efficientnet-condconv title: (Tensorflow) EfficientNet CondConv - local: models/tf-efficientnet-lite title: (Tensorflow) EfficientNet Lite - local: models/tf-inception-v3 title: (Tensorflow) Inception v3 - local: models/tf-mixnet title: (Tensorflow) MixNet - local: models/tf-mobilenet-v3 title: (Tensorflow) MobileNet v3 - local: models/tresnet title: TResNet - local: models/wide-resnet title: Wide ResNet - local: models/xception title: Xception title: Model Pages isExpanded: false - sections: - local: reference/models title: Models - local: reference/data title: Data - local: reference/optimizers title: Optimizers - local: reference/schedulers title: Learning Rate Schedulers title: Reference
pytorch-image-models/hfdocs/source/_toctree.yml/0
{ "file_path": "pytorch-image-models/hfdocs/source/_toctree.yml", "repo_id": "pytorch-image-models", "token_count": 1686 }
163
""" ONNX export script Export PyTorch models as ONNX graphs. This export script originally started as an adaptation of code snippets found at https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html The default parameters work with PyTorch 1.6 and ONNX 1.7 and produce an optimal ONNX graph for hosting in the ONNX runtime (see onnx_validate.py). To export an ONNX model compatible with caffe2 (see caffe2_benchmark.py and caffe2_validate.py), the --keep-init and --aten-fallback flags are currently required. Older versions of PyTorch/ONNX (tested PyTorch 1.4, ONNX 1.5) do not need extra flags for caffe2 compatibility, but they produce a model that isn't as fast running on ONNX runtime. Most new release of PyTorch and ONNX cause some sort of breakage in the export / usage of ONNX models. Please do your research and search ONNX and PyTorch issue tracker before asking me. Thanks. Copyright 2020 Ross Wightman """ import argparse import timm from timm.utils.model import reparameterize_model from timm.utils.onnx import onnx_export parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') parser.add_argument('output', metavar='ONNX_FILE', help='output model filename') parser.add_argument('--model', '-m', metavar='MODEL', default='mobilenetv3_large_100', help='model architecture (default: mobilenetv3_large_100)') parser.add_argument('--opset', type=int, default=None, help='ONNX opset to use (default: 10)') parser.add_argument('--keep-init', action='store_true', default=False, help='Keep initializers as input. Needed for Caffe2 compatible export in newer PyTorch/ONNX.') parser.add_argument('--aten-fallback', action='store_true', default=False, help='Fallback to ATEN ops. Helps fix AdaptiveAvgPool issue with Caffe2 in newer PyTorch/ONNX.') parser.add_argument('--dynamic-size', action='store_true', default=False, help='Export model width dynamic width/height. Not recommended for "tf" models with SAME padding.') parser.add_argument('--check-forward', action='store_true', default=False, help='Do a full check of torch vs onnx forward after export.') parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size (default: 1)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--num-classes', type=int, default=1000, help='Number classes in dataset') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to checkpoint (default: none)') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--training', default=False, action='store_true', help='Export in training mode (default is eval)') parser.add_argument('--verbose', default=False, action='store_true', help='Extra stdout output') def main(): args = parser.parse_args() args.pretrained = True if args.checkpoint: args.pretrained = False print("==> Creating PyTorch {} model".format(args.model)) # NOTE exportable=True flag disables autofn/jit scripted activations and uses Conv2dSameExport layers # for models using SAME padding model = timm.create_model( args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, exportable=True, ) if args.reparam: model = reparameterize_model(model) onnx_export( model, args.output, opset=args.opset, dynamic_size=args.dynamic_size, aten_fallback=args.aten_fallback, keep_initializers=args.keep_init, check_forward=args.check_forward, training=args.training, verbose=args.verbose, input_size=(3, args.img_size, args.img_size), batch_size=args.batch_size, ) if __name__ == '__main__': main()
pytorch-image-models/onnx_export.py/0
{ "file_path": "pytorch-image-models/onnx_export.py", "repo_id": "pytorch-image-models", "token_count": 1740 }
164
import torch import torch.nn as nn from timm.layers import create_act_layer, set_layer_config import importlib import os torch_backend = os.environ.get('TORCH_BACKEND') if torch_backend is not None: importlib.import_module(torch_backend) torch_device = os.environ.get('TORCH_DEVICE', 'cpu') class MLP(nn.Module): def __init__(self, act_layer="relu", inplace=True): super(MLP, self).__init__() self.fc1 = nn.Linear(1000, 100) self.act = create_act_layer(act_layer, inplace=inplace) self.fc2 = nn.Linear(100, 10) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.fc2(x) return x def _run_act_layer_grad(act_type, inplace=True): x = torch.rand(10, 1000) * 10 m = MLP(act_layer=act_type, inplace=inplace) def _run(x, act_layer=''): if act_layer: # replace act layer if set m.act = create_act_layer(act_layer, inplace=inplace) out = m(x) l = (out - 0).pow(2).sum() return l x = x.to(device=torch_device) m.to(device=torch_device) out_me = _run(x) with set_layer_config(scriptable=True): out_jit = _run(x, act_type) assert torch.isclose(out_jit, out_me) with set_layer_config(no_jit=True): out_basic = _run(x, act_type) assert torch.isclose(out_basic, out_jit) def test_swish_grad(): for _ in range(100): _run_act_layer_grad('swish') def test_mish_grad(): for _ in range(100): _run_act_layer_grad('mish') def test_hard_sigmoid_grad(): for _ in range(100): _run_act_layer_grad('hard_sigmoid', inplace=None) def test_hard_swish_grad(): for _ in range(100): _run_act_layer_grad('hard_swish') def test_hard_mish_grad(): for _ in range(100): _run_act_layer_grad('hard_mish')
pytorch-image-models/tests/test_layers.py/0
{ "file_path": "pytorch-image-models/tests/test_layers.py", "repo_id": "pytorch-image-models", "token_count": 871 }
165
import csv import os import pkgutil import re from typing import Dict, List, Optional, Union from .dataset_info import DatasetInfo # NOTE no ambiguity wrt to mapping from # classes to ImageNet subset so far, but likely to change _NUM_CLASSES_TO_SUBSET = { 1000: 'imagenet-1k', 11221: 'imagenet-21k-miil', # miil subset of fall11 11821: 'imagenet-12k', # timm specific 12k subset of fall11 21841: 'imagenet-22k', # as in fall11.tar 21842: 'imagenet-22k-ms', # a Microsoft (for FocalNet) remapping of 22k w/ moves ImageNet-1k classes to first 1000 21843: 'imagenet-21k-goog', # Google's ImageNet full has two classes not in fall11 } _SUBSETS = { 'imagenet1k': 'imagenet_synsets.txt', 'imagenet12k': 'imagenet12k_synsets.txt', 'imagenet22k': 'imagenet22k_synsets.txt', 'imagenet21k': 'imagenet21k_goog_synsets.txt', 'imagenet21kgoog': 'imagenet21k_goog_synsets.txt', 'imagenet21kmiil': 'imagenet21k_miil_synsets.txt', 'imagenet22kms': 'imagenet22k_ms_synsets.txt', } _LEMMA_FILE = 'imagenet_synset_to_lemma.txt' _DEFINITION_FILE = 'imagenet_synset_to_definition.txt' def infer_imagenet_subset(model_or_cfg) -> Optional[str]: if isinstance(model_or_cfg, dict): num_classes = model_or_cfg.get('num_classes', None) else: num_classes = getattr(model_or_cfg, 'num_classes', None) if not num_classes: pretrained_cfg = getattr(model_or_cfg, 'pretrained_cfg', {}) # FIXME at some point pretrained_cfg should include dataset-tag, # which will be more robust than a guess based on num_classes num_classes = pretrained_cfg.get('num_classes', None) if not num_classes or num_classes not in _NUM_CLASSES_TO_SUBSET: return None return _NUM_CLASSES_TO_SUBSET[num_classes] class ImageNetInfo(DatasetInfo): def __init__(self, subset: str = 'imagenet-1k'): super().__init__() subset = re.sub(r'[-_\s]', '', subset.lower()) assert subset in _SUBSETS, f'Unknown imagenet subset {subset}.' # WordNet synsets (part-of-speach + offset) are the unique class label names for ImageNet classifiers synset_file = _SUBSETS[subset] synset_data = pkgutil.get_data(__name__, os.path.join('_info', synset_file)) self._synsets = synset_data.decode('utf-8').splitlines() # WordNet lemmas (canonical dictionary form of word) and definitions are used to build # the class descriptions. If detailed=True both are used, otherwise just the lemmas. lemma_data = pkgutil.get_data(__name__, os.path.join('_info', _LEMMA_FILE)) reader = csv.reader(lemma_data.decode('utf-8').splitlines(), delimiter='\t') self._lemmas = dict(reader) definition_data = pkgutil.get_data(__name__, os.path.join('_info', _DEFINITION_FILE)) reader = csv.reader(definition_data.decode('utf-8').splitlines(), delimiter='\t') self._definitions = dict(reader) def num_classes(self): return len(self._synsets) def label_names(self): return self._synsets def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]: if as_dict: return {label: self.label_name_to_description(label, detailed=detailed) for label in self._synsets} else: return [self.label_name_to_description(label, detailed=detailed) for label in self._synsets] def index_to_label_name(self, index) -> str: assert 0 <= index < len(self._synsets), \ f'Index ({index}) out of range for dataset with {len(self._synsets)} classes.' return self._synsets[index] def index_to_description(self, index: int, detailed: bool = False) -> str: label = self.index_to_label_name(index) return self.label_name_to_description(label, detailed=detailed) def label_name_to_description(self, label: str, detailed: bool = False) -> str: if detailed: description = f'{self._lemmas[label]}: {self._definitions[label]}' else: description = f'{self._lemmas[label]}' return description
pytorch-image-models/timm/data/imagenet_info.py/0
{ "file_path": "pytorch-image-models/timm/data/imagenet_info.py", "repo_id": "pytorch-image-models", "token_count": 1733 }
166
from multiprocessing import Value class SharedCount: def __init__(self, epoch: int = 0): self.shared_epoch = Value('i', epoch) @property def value(self): return self.shared_epoch.value @value.setter def value(self, epoch): self.shared_epoch.value = epoch
pytorch-image-models/timm/data/readers/shared_count.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/shared_count.py", "repo_id": "pytorch-image-models", "token_count": 122 }
167
""" PyTorch Conditionally Parameterized Convolution (CondConv) Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference (https://arxiv.org/abs/1904.04971) Hacked together by / Copyright 2020 Ross Wightman """ import math from functools import partial import numpy as np import torch from torch import nn as nn from torch.nn import functional as F from .helpers import to_2tuple from .conv2d_same import conv2d_same from .padding import get_padding_value def get_condconv_initializer(initializer, num_experts, expert_shape): def condconv_initializer(weight): """CondConv initializer function.""" num_params = np.prod(expert_shape) if (len(weight.shape) != 2 or weight.shape[0] != num_experts or weight.shape[1] != num_params): raise (ValueError( 'CondConv variables must have shape [num_experts, num_params]')) for i in range(num_experts): initializer(weight[i].view(expert_shape)) return condconv_initializer class CondConv2d(nn.Module): """ Conditionally Parameterized Convolution Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: https://github.com/pytorch/pytorch/issues/17983 """ __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = to_2tuple(kernel_size) self.stride = to_2tuple(stride) padding_val, is_padding_dynamic = get_padding_value( padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript self.padding = to_2tuple(padding_val) self.dilation = to_2tuple(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer( partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if self.bias is not None: fan_in = np.prod(self.weight_shape[1:]) bound = 1 / math.sqrt(fan_in) init_bias = get_condconv_initializer( partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): B, C, H, W = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size weight = weight.view(new_weight_shape) bias = None if self.bias is not None: bias = torch.matmul(routing_weights, self.bias) bias = bias.view(B * self.out_channels) # move batch elements with channels so each batch element can be efficiently convolved with separate kernel # reshape instead of view to work with channels_last input x = x.reshape(1, B * C, H, W) if self.dynamic_padding: out = conv2d_same( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) else: out = F.conv2d( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) # Literal port (from TF definition) # x = torch.split(x, 1, 0) # weight = torch.split(weight, 1, 0) # if self.bias is not None: # bias = torch.matmul(routing_weights, self.bias) # bias = torch.split(bias, 1, 0) # else: # bias = [None] * B # out = [] # for xi, wi, bi in zip(x, weight, bias): # wi = wi.view(*self.weight_shape) # if bi is not None: # bi = bi.view(*self.bias_shape) # out.append(self.conv_fn( # xi, wi, bi, stride=self.stride, padding=self.padding, # dilation=self.dilation, groups=self.groups)) # out = torch.cat(out, 0) return out
pytorch-image-models/timm/layers/cond_conv2d.py/0
{ "file_path": "pytorch-image-models/timm/layers/cond_conv2d.py", "repo_id": "pytorch-image-models", "token_count": 2314 }
168
""" Global Context Attention Block Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` - https://arxiv.org/abs/1904.11492 Official code consulted as reference: https://github.com/xvjiarui/GCNet Hacked together by / Copyright 2021 Ross Wightman """ from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible from .mlp import ConvMlp from .norm import LayerNorm2d class GlobalContext(nn.Module): def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): super(GlobalContext, self).__init__() act_layer = get_act_layer(act_layer) self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) if fuse_add: self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_add = None if fuse_scale: self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_scale = None self.gate = create_act_layer(gate_layer) self.init_last_zero = init_last_zero self.reset_parameters() def reset_parameters(self): if self.conv_attn is not None: nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') if self.mlp_add is not None: nn.init.zeros_(self.mlp_add.fc2.weight) def forward(self, x): B, C, H, W = x.shape if self.conv_attn is not None: attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) context = x.reshape(B, C, H * W).unsqueeze(1) @ attn context = context.view(B, C, 1, 1) else: context = x.mean(dim=(2, 3), keepdim=True) if self.mlp_scale is not None: mlp_x = self.mlp_scale(context) x = x * self.gate(mlp_x) if self.mlp_add is not None: mlp_x = self.mlp_add(context) x = x + mlp_x return x
pytorch-image-models/timm/layers/global_context.py/0
{ "file_path": "pytorch-image-models/timm/layers/global_context.py", "repo_id": "pytorch-image-models", "token_count": 1169 }
169
""" Padding Helpers Hacked together by / Copyright 2020 Ross Wightman """ import math from typing import List, Tuple import torch import torch.nn.functional as F # Calculate symmetric padding for a convolution def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int): if isinstance(x, torch.Tensor): return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0) else: return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) # Can SAME padding for given args be done statically? def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 def pad_same_arg( input_size: List[int], kernel_size: List[int], stride: List[int], dilation: List[int] = (1, 1), ) -> List[int]: ih, iw = input_size kh, kw = kernel_size pad_h = get_same_padding(ih, kh, stride[0], dilation[0]) pad_w = get_same_padding(iw, kw, stride[1], dilation[1]) return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] # Dynamically pad input x with 'SAME' padding for conv with specified args def pad_same( x, kernel_size: List[int], stride: List[int], dilation: List[int] = (1, 1), value: float = 0, ): ih, iw = x.size()[-2:] pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0]) pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1]) x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value) return x def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: dynamic = False if isinstance(padding, str): # for any string padding, the padding will be calculated for you, one of three ways padding = padding.lower() if padding == 'same': # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact if is_static_pad(kernel_size, **kwargs): # static case, no extra overhead padding = get_padding(kernel_size, **kwargs) else: # dynamic 'SAME' padding, has runtime/GPU memory overhead padding = 0 dynamic = True elif padding == 'valid': # 'VALID' padding, same as padding=0 padding = 0 else: # Default to PyTorch style 'same'-ish symmetric padding padding = get_padding(kernel_size, **kwargs) return padding, dynamic
pytorch-image-models/timm/layers/padding.py/0
{ "file_path": "pytorch-image-models/timm/layers/padding.py", "repo_id": "pytorch-image-models", "token_count": 1200 }
170
from typing import Callable, Tuple, Type, Union import torch LayerType = Union[str, Callable, Type[torch.nn.Module]] PadType = Union[str, int, Tuple[int, int]]
pytorch-image-models/timm/layers/typing.py/0
{ "file_path": "pytorch-image-models/timm/layers/typing.py", "repo_id": "pytorch-image-models", "token_count": 55 }
171
import collections.abc import math import re from collections import defaultdict from itertools import chain from typing import Any, Callable, Dict, Iterator, Tuple, Type, Union import torch from torch import nn as nn from torch.utils.checkpoint import checkpoint __all__ = ['model_parameters', 'named_apply', 'named_modules', 'named_modules_with_params', 'adapt_input_conv', 'group_with_matcher', 'group_modules', 'group_parameters', 'flatten_modules', 'checkpoint_seq'] def model_parameters(model: nn.Module, exclude_head: bool = False): if exclude_head: # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering return [p for p in model.parameters()][:-2] else: return model.parameters() def named_apply( fn: Callable, module: nn.Module, name='', depth_first: bool = True, include_root: bool = False, ) -> nn.Module: if not depth_first and include_root: fn(module=module, name=name) for child_name, child_module in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) if depth_first and include_root: fn(module=module, name=name) return module def named_modules( module: nn.Module, name: str = '', depth_first: bool = True, include_root: bool = False, ): if not depth_first and include_root: yield name, module for child_name, child_module in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name yield from named_modules( module=child_module, name=child_name, depth_first=depth_first, include_root=True) if depth_first and include_root: yield name, module def named_modules_with_params( module: nn.Module, name: str = '', depth_first: bool = True, include_root: bool = False, ): if module._parameters and not depth_first and include_root: yield name, module for child_name, child_module in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name yield from named_modules_with_params( module=child_module, name=child_name, depth_first=depth_first, include_root=True) if module._parameters and depth_first and include_root: yield name, module MATCH_PREV_GROUP = (99999,) def group_with_matcher( named_objects: Iterator[Tuple[str, Any]], group_matcher: Union[Dict, Callable], return_values: bool = False, reverse: bool = False ): if isinstance(group_matcher, dict): # dictionary matcher contains a dict of raw-string regex expr that must be compiled compiled = [] for group_ordinal, (group_name, mspec) in enumerate(group_matcher.items()): if mspec is None: continue # map all matching specifications into 3-tuple (compiled re, prefix, suffix) if isinstance(mspec, (tuple, list)): # multi-entry match specifications require each sub-spec to be a 2-tuple (re, suffix) for sspec in mspec: compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])] else: compiled += [(re.compile(mspec), (group_ordinal,), None)] group_matcher = compiled def _get_grouping(name): if isinstance(group_matcher, (list, tuple)): for match_fn, prefix, suffix in group_matcher: r = match_fn.match(name) if r: parts = (prefix, r.groups(), suffix) # map all tuple elem to int for numeric sort, filter out None entries return tuple(map(float, chain.from_iterable(filter(None, parts)))) return float('inf'), # un-matched layers (neck, head) mapped to largest ordinal else: ord = group_matcher(name) if not isinstance(ord, collections.abc.Iterable): return ord, return tuple(ord) # map layers into groups via ordinals (ints or tuples of ints) from matcher grouping = defaultdict(list) for k, v in named_objects: grouping[_get_grouping(k)].append(v if return_values else k) # remap to integers layer_id_to_param = defaultdict(list) lid = -1 for k in sorted(filter(lambda x: x is not None, grouping.keys())): if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]: lid += 1 layer_id_to_param[lid].extend(grouping[k]) if reverse: assert not return_values, "reverse mapping only sensible for name output" # output reverse mapping param_to_layer_id = {} for lid, lm in layer_id_to_param.items(): for n in lm: param_to_layer_id[n] = lid return param_to_layer_id return layer_id_to_param def group_parameters( module: nn.Module, group_matcher, return_values: bool = False, reverse: bool = False, ): return group_with_matcher( module.named_parameters(), group_matcher, return_values=return_values, reverse=reverse) def group_modules( module: nn.Module, group_matcher, return_values: bool = False, reverse: bool = False, ): return group_with_matcher( named_modules_with_params(module), group_matcher, return_values=return_values, reverse=reverse) def flatten_modules( named_modules: Iterator[Tuple[str, nn.Module]], depth: int = 1, prefix: Union[str, Tuple[str, ...]] = '', module_types: Union[str, Tuple[Type[nn.Module]]] = 'sequential', ): prefix_is_tuple = isinstance(prefix, tuple) if isinstance(module_types, str): if module_types == 'container': module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict) else: module_types = (nn.Sequential,) for name, module in named_modules: if depth and isinstance(module, module_types): yield from flatten_modules( module.named_children(), depth - 1, prefix=(name,) if prefix_is_tuple else name, module_types=module_types, ) else: if prefix_is_tuple: name = prefix + (name,) yield name, module else: if prefix: name = '.'.join([prefix, name]) yield name, module def checkpoint_seq( functions, x, every=1, flatten=False, skip_last=False, preserve_rng_state=True ): r"""A helper function for checkpointing sequential models. Sequential models execute a list of modules/functions in order (sequentially). Therefore, we can divide such a sequence into segments and checkpoint each segment. All segments except run in :func:`torch.no_grad` manner, i.e., not storing the intermediate activations. The inputs of each checkpointed segment will be saved for re-running the segment in the backward pass. See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works. .. warning:: Checkpointing currently only supports :func:`torch.autograd.backward` and only if its `inputs` argument is not passed. :func:`torch.autograd.grad` is not supported. .. warning: At least one of the inputs needs to have :code:`requires_grad=True` if grads are needed for model inputs, otherwise the checkpointed part of the model won't have gradients. Args: functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially. x: A Tensor that is input to :attr:`functions` every: checkpoint every-n functions (default: 1) flatten (bool): flatten nn.Sequential of nn.Sequentials skip_last (bool): skip checkpointing the last function in the sequence if True preserve_rng_state (bool, optional, default=True): Omit stashing and restoring the RNG state during each checkpoint. Returns: Output of running :attr:`functions` sequentially on :attr:`*inputs` Example: >>> model = nn.Sequential(...) >>> input_var = checkpoint_seq(model, input_var, every=2) """ def run_function(start, end, functions): def forward(_x): for j in range(start, end + 1): _x = functions[j](_x) return _x return forward if isinstance(functions, torch.nn.Sequential): functions = functions.children() if flatten: functions = chain.from_iterable(functions) if not isinstance(functions, (tuple, list)): functions = tuple(functions) num_checkpointed = len(functions) if skip_last: num_checkpointed -= 1 end = -1 for start in range(0, num_checkpointed, every): end = min(start + every - 1, num_checkpointed - 1) x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state) if skip_last: return run_function(end + 1, len(functions) - 1, functions)(x) return x def adapt_input_conv(in_chans, conv_weight): conv_type = conv_weight.dtype conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU O, I, J, K = conv_weight.shape if in_chans == 1: if I > 3: assert conv_weight.shape[1] % 3 == 0 # For models with space2depth stems conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) conv_weight = conv_weight.sum(dim=2, keepdim=False) else: conv_weight = conv_weight.sum(dim=1, keepdim=True) elif in_chans != 3: if I != 3: raise NotImplementedError('Weight format not supported by conversion.') else: # NOTE this strategy should be better than random init, but there could be other combinations of # the original RGB input layer weights that'd work better for specific cases. repeat = int(math.ceil(in_chans / 3)) conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] conv_weight *= (3 / float(in_chans)) conv_weight = conv_weight.to(conv_type) return conv_weight
pytorch-image-models/timm/models/_manipulate.py/0
{ "file_path": "pytorch-image-models/timm/models/_manipulate.py", "repo_id": "pytorch-image-models", "token_count": 4393 }
172
""" ConvNeXt Papers: * `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf @Article{liu2022convnet, author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, title = {A ConvNet for the 2020s}, journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2022}, } * `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 @article{Woo2023ConvNeXtV2, title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders}, author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie}, year={2023}, journal={arXiv preprint arXiv:2301.00808}, } Original code and weights from: * https://github.com/facebookresearch/ConvNeXt, original copyright below * https://github.com/facebookresearch/ConvNeXt-V2, original copyright below Model defs atto, femto, pico, nano and _ols / _hnf variants are timm originals. Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman """ # ConvNeXt # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the MIT license # ConvNeXt-V2 # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree (Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)) # No code was used directly from ConvNeXt-V2, however the weights are CC BY-NC 4.0 so beware if using commercially. from collections import OrderedDict from functools import partial from typing import Callable, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import trunc_normal_, AvgPool2dSame, DropPath, Mlp, GlobalResponseNormMlp, \ LayerNorm2d, LayerNorm, create_conv2d, get_act_layer, make_divisible, to_ntuple from timm.layers import NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this class Downsample(nn.Module): def __init__(self, in_chs, out_chs, stride=1, dilation=1): super().__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() if in_chs != out_chs: self.conv = create_conv2d(in_chs, out_chs, 1, stride=1) else: self.conv = nn.Identity() def forward(self, x): x = self.pool(x) x = self.conv(x) return x class ConvNeXtBlock(nn.Module): """ ConvNeXt Block There are two equivalent implementations: (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW. """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 7, stride: int = 1, dilation: Union[int, Tuple[int, int]] = (1, 1), mlp_ratio: float = 4, conv_mlp: bool = False, conv_bias: bool = True, use_grn: bool = False, ls_init_value: Optional[float] = 1e-6, act_layer: Union[str, Callable] = 'gelu', norm_layer: Optional[Callable] = None, drop_path: float = 0., ): """ Args: in_chs: Block input channels. out_chs: Block output channels (same as in_chs if None). kernel_size: Depthwise convolution kernel size. stride: Stride of depthwise convolution. dilation: Tuple specifying input and output dilation of block. mlp_ratio: MLP expansion ratio. conv_mlp: Use 1x1 convolutions for MLP and a NCHW compatible norm layer if True. conv_bias: Apply bias for all convolution (linear) layers. use_grn: Use GlobalResponseNorm in MLP (from ConvNeXt-V2) ls_init_value: Layer-scale init values, layer-scale applied if not None. act_layer: Activation layer. norm_layer: Normalization layer (defaults to LN if not specified). drop_path: Stochastic depth probability. """ super().__init__() out_chs = out_chs or in_chs dilation = to_ntuple(2)(dilation) act_layer = get_act_layer(act_layer) if not norm_layer: norm_layer = LayerNorm2d if conv_mlp else LayerNorm mlp_layer = partial(GlobalResponseNormMlp if use_grn else Mlp, use_conv=conv_mlp) self.use_conv_mlp = conv_mlp self.conv_dw = create_conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation[0], depthwise=True, bias=conv_bias, ) self.norm = norm_layer(out_chs) self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value is not None else None if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: self.shortcut = Downsample(in_chs, out_chs, stride=stride, dilation=dilation[0]) else: self.shortcut = nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.conv_dw(x) if self.use_conv_mlp: x = self.norm(x) x = self.mlp(x) else: x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) x = x.permute(0, 3, 1, 2) if self.gamma is not None: x = x.mul(self.gamma.reshape(1, -1, 1, 1)) x = self.drop_path(x) + self.shortcut(shortcut) return x class ConvNeXtStage(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=7, stride=2, depth=2, dilation=(1, 1), drop_path_rates=None, ls_init_value=1.0, conv_mlp=False, conv_bias=True, use_grn=False, act_layer='gelu', norm_layer=None, norm_layer_cl=None ): super().__init__() self.grad_checkpointing = False if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]: ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1 pad = 'same' if dilation[1] > 1 else 0 # same padding needed if dilation used self.downsample = nn.Sequential( norm_layer(in_chs), create_conv2d( in_chs, out_chs, kernel_size=ds_ks, stride=stride, dilation=dilation[0], padding=pad, bias=conv_bias, ), ) in_chs = out_chs else: self.downsample = nn.Identity() drop_path_rates = drop_path_rates or [0.] * depth stage_blocks = [] for i in range(depth): stage_blocks.append(ConvNeXtBlock( in_chs=in_chs, out_chs=out_chs, kernel_size=kernel_size, dilation=dilation[1], drop_path=drop_path_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, conv_bias=conv_bias, use_grn=use_grn, act_layer=act_layer, norm_layer=norm_layer if conv_mlp else norm_layer_cl, )) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class ConvNeXt(nn.Module): r""" ConvNeXt A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf """ def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', output_stride: int = 32, depths: Tuple[int, ...] = (3, 3, 9, 3), dims: Tuple[int, ...] = (96, 192, 384, 768), kernel_sizes: Union[int, Tuple[int, ...]] = 7, ls_init_value: Optional[float] = 1e-6, stem_type: str = 'patch', patch_size: int = 4, head_init_scale: float = 1., head_norm_first: bool = False, head_hidden_size: Optional[int] = None, conv_mlp: bool = False, conv_bias: bool = True, use_grn: bool = False, act_layer: Union[str, Callable] = 'gelu', norm_layer: Optional[Union[str, Callable]] = None, norm_eps: Optional[float] = None, drop_rate: float = 0., drop_path_rate: float = 0., ): """ Args: in_chans: Number of input image channels. num_classes: Number of classes for classification head. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). depths: Number of blocks at each stage. dims: Feature dimension at each stage. kernel_sizes: Depthwise convolution kernel-sizes for each stage. ls_init_value: Init value for Layer Scale, disabled if None. stem_type: Type of stem. patch_size: Stem patch size for patch stem. head_init_scale: Init scaling value for classifier weights and biases. head_norm_first: Apply normalization before global pool + head. head_hidden_size: Size of MLP hidden layer in head if not None and head_norm_first == False. conv_mlp: Use 1x1 conv in MLP, improves speed for small networks w/ chan last. conv_bias: Use bias layers w/ all convolutions. use_grn: Use Global Response Norm (ConvNeXt-V2) in MLP. act_layer: Activation layer type. norm_layer: Normalization layer type. drop_rate: Head pre-classifier dropout rate. drop_path_rate: Stochastic depth drop rate. """ super().__init__() assert output_stride in (8, 16, 32) kernel_sizes = to_ntuple(4)(kernel_sizes) if norm_layer is None: norm_layer = LayerNorm2d norm_layer_cl = norm_layer if conv_mlp else LayerNorm if norm_eps is not None: norm_layer = partial(norm_layer, eps=norm_eps) norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) else: assert conv_mlp,\ 'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input' norm_layer_cl = norm_layer if norm_eps is not None: norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) self.num_classes = num_classes self.drop_rate = drop_rate self.feature_info = [] assert stem_type in ('patch', 'overlap', 'overlap_tiered') if stem_type == 'patch': # NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4 self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias), norm_layer(dims[0]), ) stem_stride = patch_size else: mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0] self.stem = nn.Sequential( nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias), norm_layer(dims[0]), ) stem_stride = 4 self.stages = nn.Sequential() dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] prev_chs = dims[0] curr_stride = stem_stride dilation = 1 # 4 feature resolution stages, each consisting of multiple residual blocks for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 if curr_stride >= output_stride and stride > 1: dilation *= stride stride = 1 curr_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 out_chs = dims[i] stages.append(ConvNeXtStage( prev_chs, out_chs, kernel_size=kernel_sizes[i], stride=stride, dilation=(first_dilation, dilation), depth=depths[i], drop_path_rates=dp_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, conv_bias=conv_bias, use_grn=use_grn, act_layer=act_layer, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, )) prev_chs = out_chs # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = prev_chs # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets # otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights) if head_norm_first: assert not head_hidden_size self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, act_layer='gelu', ) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm_pre', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes=0, global_pool=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_(module.weight, std=.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): """ Remap FB checkpoints -> timm """ if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict # non-FB checkpoint if 'model' in state_dict: state_dict = state_dict['model'] out_dict = {} if 'visual.trunk.stem.0.weight' in state_dict: out_dict = {k.replace('visual.trunk.', ''): v for k, v in state_dict.items() if k.startswith('visual.trunk.')} if 'visual.head.proj.weight' in state_dict: out_dict['head.fc.weight'] = state_dict['visual.head.proj.weight'] out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0]) elif 'visual.head.mlp.fc1.weight' in state_dict: out_dict['head.pre_logits.fc.weight'] = state_dict['visual.head.mlp.fc1.weight'] out_dict['head.pre_logits.fc.bias'] = state_dict['visual.head.mlp.fc1.bias'] out_dict['head.fc.weight'] = state_dict['visual.head.mlp.fc2.weight'] out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.mlp.fc2.weight'].shape[0]) return out_dict import re for k, v in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') if 'grn' in k: k = k.replace('grn.beta', 'mlp.grn.bias') k = k.replace('grn.gamma', 'mlp.grn.weight') v = v.reshape(v.shape[-1]) k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_convnext(variant, pretrained=False, **kwargs): if kwargs.get('pretrained_cfg', '') == 'fcmae': # NOTE fcmae pretrained weights have no classifier or final norm-layer (`head.norm`) # This is workaround loading with num_classes=0 w/o removing norm-layer. kwargs.setdefault('pretrained_strict', False) model = build_model_with_cfg( ConvNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs } def _cfgv2(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', 'license': 'cc-by-nc-4.0', 'paper_ids': 'arXiv:2301.00808', 'paper_name': 'ConvNeXt-V2: Co-designing and Scaling ConvNets with Masked Autoencoders', 'origin_url': 'https://github.com/facebookresearch/ConvNeXt-V2', **kwargs } default_cfgs = generate_default_cfgs({ # timm specific variants 'convnext_tiny.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_atto.d2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_atto_ols.a2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_femto.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_femto_ols.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_pico.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_pico_ols.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.d1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano_ols.d1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny_hnf.a2h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.in12k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_small.in12k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_nano.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_tiny.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_small.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_tiny.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_base.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_large.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_xlarge.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_base.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_large.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_small.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_base.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_xlarge.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_tiny.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_small.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_base.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_large.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_xlarge.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnextv2_nano.fcmae_ft_in22k_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_nano.fcmae_ft_in22k_in1k_384': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_tiny.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_tiny.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_base.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_base.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_large.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_large.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_huge.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_huge.fcmae_ft_in22k_in1k_512': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt", hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(15, 15), crop_pct=1.0, crop_mode='squash'), 'convnextv2_atto.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_femto.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_pico.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_nano.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_tiny.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_base.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_large.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_huge.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_atto.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_femto.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_pico.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_nano.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_tiny.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_base.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_large.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_huge.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_small.untrained': _cfg(), # CLIP weights, fine-tuned on in1k or in12k + in1k 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_base.clip_laion2b_augreg_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laiona_augreg_ft_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0 ), 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash' ), 'convnext_xxlarge.clip_laion2b_soup_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b_augreg_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in12k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_xxlarge.clip_laion2b_soup_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), # CLIP original image tower weights 'convnext_base.clip_laion2b': _cfg( hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laion2b_augreg': _cfg( hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona': _cfg( hf_hub_id='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona_320': _cfg( hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona_augreg_320': _cfg( hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), 'convnext_large_mlp.clip_laion2b_augreg': _cfg( hf_hub_id='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=768), 'convnext_large_mlp.clip_laion2b_ft_320': _cfg( hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), 'convnext_large_mlp.clip_laion2b_ft_soup_320': _cfg( hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), 'convnext_xxlarge.clip_laion2b_soup': _cfg( hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), 'convnext_xxlarge.clip_laion2b_rewind': _cfg( hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-rewind', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), }) @register_model def convnext_atto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True) model = _create_convnext('convnext_atto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_atto_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant with overlapping 3x3 conv stem, wider than non-ols femto above, current param count 3.7M model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_femto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True) model = _create_convnext('convnext_femto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_femto_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_pico(pretrained=False, **kwargs) -> ConvNeXt: # timm pico variant model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True) model = _create_convnext('convnext_pico', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_pico_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with overlapping 3x3 conv stem model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_nano(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with standard stem and head model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True) model = _create_convnext('convnext_nano', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_nano_ols(pretrained=False, **kwargs) -> ConvNeXt: # experimental nano variant with overlapping conv stem model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap') model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_tiny_hnf(pretrained=False, **kwargs) -> ConvNeXt: # experimental tiny variant with norm before pooling in head (head norm first) model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True) model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_tiny(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768)) model = _create_convnext('convnext_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_small(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768]) model = _create_convnext('convnext_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_base(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024]) model = _create_convnext('convnext_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_large(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536]) model = _create_convnext('convnext_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_large_mlp(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], head_hidden_size=1536) model = _create_convnext('convnext_large_mlp', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_xlarge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048]) model = _create_convnext('convnext_xlarge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_xxlarge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 4, 30, 3], dims=[384, 768, 1536, 3072], norm_eps=kwargs.pop('norm_eps', 1e-5)) model = _create_convnext('convnext_xxlarge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_atto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict( depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_atto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_femto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict( depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_femto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_pico(pretrained=False, **kwargs) -> ConvNeXt: # timm pico variant model_args = dict( depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_pico', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_nano(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with standard stem and head model_args = dict( depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_nano', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_tiny(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_small(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_base(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_large(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_huge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_huge', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'convnext_tiny_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k', 'convnext_small_in22ft1k': 'convnext_small.fb_in22k_ft_in1k', 'convnext_base_in22ft1k': 'convnext_base.fb_in22k_ft_in1k', 'convnext_large_in22ft1k': 'convnext_large.fb_in22k_ft_in1k', 'convnext_xlarge_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k', 'convnext_tiny_384_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k_384', 'convnext_small_384_in22ft1k': 'convnext_small.fb_in22k_ft_in1k_384', 'convnext_base_384_in22ft1k': 'convnext_base.fb_in22k_ft_in1k_384', 'convnext_large_384_in22ft1k': 'convnext_large.fb_in22k_ft_in1k_384', 'convnext_xlarge_384_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k_384', 'convnext_tiny_in22k': 'convnext_tiny.fb_in22k', 'convnext_small_in22k': 'convnext_small.fb_in22k', 'convnext_base_in22k': 'convnext_base.fb_in22k', 'convnext_large_in22k': 'convnext_large.fb_in22k', 'convnext_xlarge_in22k': 'convnext_xlarge.fb_in22k', })
pytorch-image-models/timm/models/convnext.py/0
{ "file_path": "pytorch-image-models/timm/models/convnext.py", "repo_id": "pytorch-image-models", "token_count": 24539 }
173
# FastViT for PyTorch # # Original implementation and weights from https://github.com/apple/ml-fastvit # # For licensing see accompanying LICENSE file at https://github.com/apple/ml-fastvit/tree/main # Original work is copyright (C) 2023 Apple Inc. All Rights Reserved. # import os from functools import partial from typing import Tuple, Optional, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, create_conv2d, ConvNormAct, SqueezeExcite, use_fused_attn, \ ClassifierHead from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs def num_groups(group_size, channels): if not group_size: # 0 or None return 1 # normal conv with 1 group else: # NOTE group_size == 1 -> depthwise conv assert channels % group_size == 0 return channels // group_size class MobileOneBlock(nn.Module): """MobileOne building block. This block has a multi-branched architecture at train-time and plain-CNN style architecture at inference time For more details, please refer to our paper: `An Improved One millisecond Mobile Backbone` - https://arxiv.org/pdf/2206.04040.pdf """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int, stride: int = 1, dilation: int = 1, group_size: int = 0, inference_mode: bool = False, use_se: bool = False, use_act: bool = True, use_scale_branch: bool = True, num_conv_branches: int = 1, act_layer: nn.Module = nn.GELU, ) -> None: """Construct a MobileOneBlock module. Args: in_chs: Number of channels in the input. out_chs: Number of channels produced by the block. kernel_size: Size of the convolution kernel. stride: Stride size. dilation: Kernel dilation factor. group_size: Convolution group size. inference_mode: If True, instantiates model in inference mode. use_se: Whether to use SE-ReLU activations. use_act: Whether to use activation. Default: ``True`` use_scale_branch: Whether to use scale branch. Default: ``True`` num_conv_branches: Number of linear conv branches. """ super(MobileOneBlock, self).__init__() self.inference_mode = inference_mode self.groups = num_groups(group_size, in_chs) self.stride = stride self.dilation = dilation self.kernel_size = kernel_size self.in_chs = in_chs self.out_chs = out_chs self.num_conv_branches = num_conv_branches # Check if SE-ReLU is requested self.se = SqueezeExcite(out_chs, rd_divisor=1) if use_se else nn.Identity() if inference_mode: self.reparam_conv = create_conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=self.groups, bias=True, ) else: # Re-parameterizable skip connection self.reparam_conv = None self.identity = ( nn.BatchNorm2d(num_features=in_chs) if out_chs == in_chs and stride == 1 else None ) # Re-parameterizable conv branches if num_conv_branches > 0: self.conv_kxk = nn.ModuleList([ ConvNormAct( self.in_chs, self.out_chs, kernel_size=kernel_size, stride=self.stride, groups=self.groups, apply_act=False, ) for _ in range(self.num_conv_branches) ]) else: self.conv_kxk = None # Re-parameterizable scale branch self.conv_scale = None if kernel_size > 1 and use_scale_branch: self.conv_scale = ConvNormAct( self.in_chs, self.out_chs, kernel_size=1, stride=self.stride, groups=self.groups, apply_act=False ) self.act = act_layer() if use_act else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: """Apply forward pass.""" # Inference mode forward pass. if self.reparam_conv is not None: return self.act(self.se(self.reparam_conv(x))) # Multi-branched train-time forward pass. # Identity branch output identity_out = 0 if self.identity is not None: identity_out = self.identity(x) # Scale branch output scale_out = 0 if self.conv_scale is not None: scale_out = self.conv_scale(x) # Other kxk conv branches out = scale_out + identity_out if self.conv_kxk is not None: for rc in self.conv_kxk: out += rc(x) return self.act(self.se(out)) def reparameterize(self): """Following works like `RepVGG: Making VGG-style ConvNets Great Again` - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched architecture used at training time to obtain a plain CNN-like structure for inference. """ if self.reparam_conv is not None: return kernel, bias = self._get_kernel_bias() self.reparam_conv = create_conv2d( in_channels=self.in_chs, out_channels=self.out_chs, kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation, groups=self.groups, bias=True, ) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias # Delete un-used branches for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__("conv_kxk") self.__delattr__("conv_scale") if hasattr(self, "identity"): self.__delattr__("identity") self.inference_mode = True def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: """Method to obtain re-parameterized kernel and bias. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 Returns: Tuple of (kernel, bias) after fusing branches. """ # get weights and bias of scale branch kernel_scale = 0 bias_scale = 0 if self.conv_scale is not None: kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale) # Pad scale branch kernel to match conv branch kernel size. pad = self.kernel_size // 2 kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) # get weights and bias of skip branch kernel_identity = 0 bias_identity = 0 if self.identity is not None: kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) # get weights and bias of conv branches kernel_conv = 0 bias_conv = 0 if self.conv_kxk is not None: for ix in range(self.num_conv_branches): _kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix]) kernel_conv += _kernel bias_conv += _bias kernel_final = kernel_conv + kernel_scale + kernel_identity bias_final = bias_conv + bias_scale + bias_identity return kernel_final, bias_final def _fuse_bn_tensor( self, branch: Union[nn.Sequential, nn.BatchNorm2d] ) -> Tuple[torch.Tensor, torch.Tensor]: """Method to fuse batchnorm layer with preceeding conv layer. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 Args: branch: Sequence of ops to be fused. Returns: Tuple of (kernel, bias) after fusing batchnorm. """ if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, "id_tensor"): input_dim = self.in_chs // self.groups kernel_value = torch.zeros( (self.in_chs, input_dim, self.kernel_size, self.kernel_size), dtype=branch.weight.dtype, device=branch.weight.device, ) for i in range(self.in_chs): kernel_value[ i, i % input_dim, self.kernel_size // 2, self.kernel_size // 2 ] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std class ReparamLargeKernelConv(nn.Module): """Building Block of RepLKNet This class defines overparameterized large kernel conv block introduced in `RepLKNet <https://arxiv.org/abs/2203.06717>`_ Reference: https://github.com/DingXiaoH/RepLKNet-pytorch """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int, stride: int, group_size: int, small_kernel: Optional[int] = None, inference_mode: bool = False, act_layer: Optional[nn.Module] = None, ) -> None: """Construct a ReparamLargeKernelConv module. Args: in_chs: Number of input channels. out_chs: Number of output channels. kernel_size: Kernel size of the large kernel conv branch. stride: Stride size. Default: 1 group_size: Group size. Default: 1 small_kernel: Kernel size of small kernel conv branch. inference_mode: If True, instantiates model in inference mode. Default: ``False`` act_layer: Activation module. Default: ``nn.GELU`` """ super(ReparamLargeKernelConv, self).__init__() self.stride = stride self.groups = num_groups(group_size, in_chs) self.in_chs = in_chs self.out_chs = out_chs self.kernel_size = kernel_size self.small_kernel = small_kernel if inference_mode: self.reparam_conv = create_conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=1, groups=self.groups, bias=True, ) else: self.reparam_conv = None self.large_conv = ConvNormAct( in_chs, out_chs, kernel_size=kernel_size, stride=self.stride, groups=self.groups, apply_act=False, ) if small_kernel is not None: assert ( small_kernel <= kernel_size ), "The kernel size for re-param cannot be larger than the large kernel!" self.small_conv = ConvNormAct( in_chs, out_chs, kernel_size=small_kernel, stride=self.stride, groups=self.groups, apply_act=False, ) # FIXME output of this act was not used in original impl, likely due to bug self.act = act_layer() if act_layer is not None else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: if self.reparam_conv is not None: out = self.reparam_conv(x) else: out = self.large_conv(x) if self.small_conv is not None: out = out + self.small_conv(x) out = self.act(out) return out def get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: """Method to obtain re-parameterized kernel and bias. Reference: https://github.com/DingXiaoH/RepLKNet-pytorch Returns: Tuple of (kernel, bias) after fusing branches. """ eq_k, eq_b = self._fuse_bn(self.large_conv.conv, self.large_conv.bn) if hasattr(self, "small_conv"): small_k, small_b = self._fuse_bn(self.small_conv.conv, self.small_conv.bn) eq_b += small_b eq_k += nn.functional.pad( small_k, [(self.kernel_size - self.small_kernel) // 2] * 4 ) return eq_k, eq_b def reparameterize(self) -> None: """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched architecture used at training time to obtain a plain CNN-like structure for inference. """ eq_k, eq_b = self.get_kernel_bias() self.reparam_conv = create_conv2d( self.in_chs, self.out_chs, kernel_size=self.kernel_size, stride=self.stride, groups=self.groups, bias=True, ) self.reparam_conv.weight.data = eq_k self.reparam_conv.bias.data = eq_b self.__delattr__("large_conv") if hasattr(self, "small_conv"): self.__delattr__("small_conv") @staticmethod def _fuse_bn( conv: torch.Tensor, bn: nn.BatchNorm2d ) -> Tuple[torch.Tensor, torch.Tensor]: """Method to fuse batchnorm layer with conv layer. Args: conv: Convolutional kernel weights. bn: Batchnorm 2d layer. Returns: Tuple of (kernel, bias) after fusing batchnorm. """ kernel = conv.weight running_mean = bn.running_mean running_var = bn.running_var gamma = bn.weight beta = bn.bias eps = bn.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std def convolutional_stem( in_chs: int, out_chs: int, act_layer: nn.Module = nn.GELU, inference_mode: bool = False ) -> nn.Sequential: """Build convolutional stem with MobileOne blocks. Args: in_chs: Number of input channels. out_chs: Number of output channels. inference_mode: Flag to instantiate model in inference mode. Default: ``False`` Returns: nn.Sequential object with stem elements. """ return nn.Sequential( MobileOneBlock( in_chs=in_chs, out_chs=out_chs, kernel_size=3, stride=2, act_layer=act_layer, inference_mode=inference_mode, ), MobileOneBlock( in_chs=out_chs, out_chs=out_chs, kernel_size=3, stride=2, group_size=1, act_layer=act_layer, inference_mode=inference_mode, ), MobileOneBlock( in_chs=out_chs, out_chs=out_chs, kernel_size=1, stride=1, act_layer=act_layer, inference_mode=inference_mode, ), ) class Attention(nn.Module): """Multi-headed Self Attention module. Source modified from: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py """ fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, head_dim: int = 32, qkv_bias: bool = False, attn_drop: float = 0.0, proj_drop: float = 0.0, ) -> None: """Build MHSA module that can handle 3D or 4D input tensors. Args: dim: Number of embedding dimensions. head_dim: Number of hidden dimensions per head. Default: ``32`` qkv_bias: Use bias or not. Default: ``False`` attn_drop: Dropout rate for attention tensor. proj_drop: Dropout rate for projection tensor. """ super().__init__() assert dim % head_dim == 0, "dim should be divisible by head_dim" self.head_dim = head_dim self.num_heads = dim // head_dim self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x: torch.Tensor) -> torch.Tensor: B, C, H, W = x.shape N = H * W x = x.flatten(2).transpose(-2, -1) # (B, N, C) qkv = ( self.qkv(x) .reshape(B, N, 3, self.num_heads, self.head_dim) .permute(2, 0, 3, 1, 4) ) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) x = x.transpose(-2, -1).reshape(B, C, H, W) return x class PatchEmbed(nn.Module): """Convolutional patch embedding layer.""" def __init__( self, patch_size: int, stride: int, in_chs: int, embed_dim: int, act_layer: nn.Module = nn.GELU, lkc_use_act: bool = False, inference_mode: bool = False, ) -> None: """Build patch embedding layer. Args: patch_size: Patch size for embedding computation. stride: Stride for convolutional embedding layer. in_chs: Number of channels of input tensor. embed_dim: Number of embedding dimensions. inference_mode: Flag to instantiate model in inference mode. Default: ``False`` """ super().__init__() self.proj = nn.Sequential( ReparamLargeKernelConv( in_chs=in_chs, out_chs=embed_dim, kernel_size=patch_size, stride=stride, group_size=1, small_kernel=3, inference_mode=inference_mode, act_layer=act_layer if lkc_use_act else None, # NOTE original weights didn't use this act ), MobileOneBlock( in_chs=embed_dim, out_chs=embed_dim, kernel_size=1, stride=1, act_layer=act_layer, inference_mode=inference_mode, ) ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) return x class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim, 1, 1)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class RepMixer(nn.Module): """Reparameterizable token mixer. For more details, please refer to our paper: `FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization <https://arxiv.org/pdf/2303.14189.pdf>`_ """ def __init__( self, dim, kernel_size=3, layer_scale_init_value=1e-5, inference_mode: bool = False, ): """Build RepMixer Module. Args: dim: Input feature map dimension. :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, H, W)`. kernel_size: Kernel size for spatial mixing. Default: 3 layer_scale_init_value: Initial value for layer scale. Default: 1e-5 inference_mode: If True, instantiates model in inference mode. Default: ``False`` """ super().__init__() self.dim = dim self.kernel_size = kernel_size self.inference_mode = inference_mode if inference_mode: self.reparam_conv = nn.Conv2d( self.dim, self.dim, kernel_size=self.kernel_size, stride=1, padding=self.kernel_size // 2, groups=self.dim, bias=True, ) else: self.reparam_conv = None self.norm = MobileOneBlock( dim, dim, kernel_size, group_size=1, use_act=False, use_scale_branch=False, num_conv_branches=0, ) self.mixer = MobileOneBlock( dim, dim, kernel_size, group_size=1, use_act=False, ) if layer_scale_init_value is not None: self.layer_scale = LayerScale2d(dim, layer_scale_init_value) else: self.layer_scale = nn.Identity def forward(self, x: torch.Tensor) -> torch.Tensor: if self.reparam_conv is not None: x = self.reparam_conv(x) else: x = x + self.layer_scale(self.mixer(x) - self.norm(x)) return x def reparameterize(self) -> None: """Reparameterize mixer and norm into a single convolutional layer for efficient inference. """ if self.inference_mode: return self.mixer.reparameterize() self.norm.reparameterize() if isinstance(self.layer_scale, LayerScale2d): w = self.mixer.id_tensor + self.layer_scale.gamma.unsqueeze(-1) * ( self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight ) b = torch.squeeze(self.layer_scale.gamma) * ( self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias ) else: w = ( self.mixer.id_tensor + self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight ) b = self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias self.reparam_conv = create_conv2d( self.dim, self.dim, kernel_size=self.kernel_size, stride=1, groups=self.dim, bias=True, ) self.reparam_conv.weight.data = w self.reparam_conv.bias.data = b for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__("mixer") self.__delattr__("norm") self.__delattr__("layer_scale") class ConvMlp(nn.Module): """Convolutional FFN Module.""" def __init__( self, in_chs: int, hidden_channels: Optional[int] = None, out_chs: Optional[int] = None, act_layer: nn.Module = nn.GELU, drop: float = 0.0, ) -> None: """Build convolutional FFN module. Args: in_chs: Number of input channels. hidden_channels: Number of channels after expansion. Default: None out_chs: Number of output channels. Default: None act_layer: Activation layer. Default: ``GELU`` drop: Dropout rate. Default: ``0.0``. """ super().__init__() out_chs = out_chs or in_chs hidden_channels = hidden_channels or in_chs self.conv = ConvNormAct( in_chs, out_chs, kernel_size=7, groups=in_chs, apply_act=False, ) self.fc1 = nn.Conv2d(in_chs, hidden_channels, kernel_size=1) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_channels, out_chs, kernel_size=1) self.drop = nn.Dropout(drop) self.apply(self._init_weights) def _init_weights(self, m: nn.Module) -> None: if isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.conv(x) x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class RepConditionalPosEnc(nn.Module): """Implementation of conditional positional encoding. For more details refer to paper: `Conditional Positional Encodings for Vision Transformers <https://arxiv.org/pdf/2102.10882.pdf>`_ In our implementation, we can reparameterize this module to eliminate a skip connection. """ def __init__( self, dim: int, dim_out: Optional[int] = None, spatial_shape: Union[int, Tuple[int, int]] = (7, 7), inference_mode=False, ) -> None: """Build reparameterizable conditional positional encoding Args: dim: Number of input channels. dim_out: Number of embedding dimensions. Default: 768 spatial_shape: Spatial shape of kernel for positional encoding. Default: (7, 7) inference_mode: Flag to instantiate block in inference mode. Default: ``False`` """ super(RepConditionalPosEnc, self).__init__() if isinstance(spatial_shape, int): spatial_shape = tuple([spatial_shape] * 2) assert isinstance(spatial_shape, Tuple), ( f'"spatial_shape" must by a sequence or int, ' f"get {type(spatial_shape)} instead." ) assert len(spatial_shape) == 2, ( f'Length of "spatial_shape" should be 2, ' f"got {len(spatial_shape)} instead." ) self.spatial_shape = spatial_shape self.dim = dim self.dim_out = dim_out or dim self.groups = dim if inference_mode: self.reparam_conv = nn.Conv2d( self.dim, self.dim_out, kernel_size=self.spatial_shape, stride=1, padding=spatial_shape[0] // 2, groups=self.groups, bias=True, ) else: self.reparam_conv = None self.pos_enc = nn.Conv2d( self.dim, self.dim_out, spatial_shape, 1, int(spatial_shape[0] // 2), groups=self.groups, bias=True, ) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.reparam_conv is not None: x = self.reparam_conv(x) else: x = self.pos_enc(x) + x return x def reparameterize(self) -> None: # Build equivalent Id tensor input_dim = self.dim // self.groups kernel_value = torch.zeros( ( self.dim, input_dim, self.spatial_shape[0], self.spatial_shape[1], ), dtype=self.pos_enc.weight.dtype, device=self.pos_enc.weight.device, ) for i in range(self.dim): kernel_value[ i, i % input_dim, self.spatial_shape[0] // 2, self.spatial_shape[1] // 2, ] = 1 id_tensor = kernel_value # Reparameterize Id tensor and conv w_final = id_tensor + self.pos_enc.weight b_final = self.pos_enc.bias # Introduce reparam conv self.reparam_conv = nn.Conv2d( self.dim, self.dim_out, kernel_size=self.spatial_shape, stride=1, padding=int(self.spatial_shape[0] // 2), groups=self.groups, bias=True, ) self.reparam_conv.weight.data = w_final self.reparam_conv.bias.data = b_final for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__("pos_enc") class RepMixerBlock(nn.Module): """Implementation of Metaformer block with RepMixer as token mixer. For more details on Metaformer structure, please refer to: `MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_ """ def __init__( self, dim: int, kernel_size: int = 3, mlp_ratio: float = 4.0, act_layer: nn.Module = nn.GELU, proj_drop: float = 0.0, drop_path: float = 0.0, layer_scale_init_value: float = 1e-5, inference_mode: bool = False, ): """Build RepMixer Block. Args: dim: Number of embedding dimensions. kernel_size: Kernel size for repmixer. Default: 3 mlp_ratio: MLP expansion ratio. Default: 4.0 act_layer: Activation layer. Default: ``nn.GELU`` proj_drop: Dropout rate. Default: 0.0 drop_path: Drop path rate. Default: 0.0 layer_scale_init_value: Layer scale value at initialization. Default: 1e-5 inference_mode: Flag to instantiate block in inference mode. Default: ``False`` """ super().__init__() self.token_mixer = RepMixer( dim, kernel_size=kernel_size, layer_scale_init_value=layer_scale_init_value, inference_mode=inference_mode, ) self.mlp = ConvMlp( in_chs=dim, hidden_channels=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) if layer_scale_init_value is not None: self.layer_scale = LayerScale2d(dim, layer_scale_init_value) else: self.layer_scale = nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = self.token_mixer(x) x = x + self.drop_path(self.layer_scale(self.mlp(x))) return x class AttentionBlock(nn.Module): """Implementation of metaformer block with MHSA as token mixer. For more details on Metaformer structure, please refer to: `MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_ """ def __init__( self, dim: int, mlp_ratio: float = 4.0, act_layer: nn.Module = nn.GELU, norm_layer: nn.Module = nn.BatchNorm2d, proj_drop: float = 0.0, drop_path: float = 0.0, layer_scale_init_value: float = 1e-5, ): """Build Attention Block. Args: dim: Number of embedding dimensions. mlp_ratio: MLP expansion ratio. Default: 4.0 act_layer: Activation layer. Default: ``nn.GELU`` norm_layer: Normalization layer. Default: ``nn.BatchNorm2d`` proj_drop: Dropout rate. Default: 0.0 drop_path: Drop path rate. Default: 0.0 layer_scale_init_value: Layer scale value at initialization. Default: 1e-5 """ super().__init__() self.norm = norm_layer(dim) self.token_mixer = Attention(dim=dim) if layer_scale_init_value is not None: self.layer_scale_1 = LayerScale2d(dim, layer_scale_init_value) else: self.layer_scale_1 = nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.mlp = ConvMlp( in_chs=dim, hidden_channels=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) if layer_scale_init_value is not None: self.layer_scale_2 = LayerScale2d(dim, layer_scale_init_value) else: self.layer_scale_2 = nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.layer_scale_1(self.token_mixer(self.norm(x)))) x = x + self.drop_path2(self.layer_scale_2(self.mlp(x))) return x class FastVitStage(nn.Module): def __init__( self, dim: int, dim_out: int, depth: int, token_mixer_type: str, downsample: bool = True, down_patch_size: int = 7, down_stride: int = 2, pos_emb_layer: Optional[nn.Module] = None, kernel_size: int = 3, mlp_ratio: float = 4.0, act_layer: nn.Module = nn.GELU, norm_layer: nn.Module = nn.BatchNorm2d, proj_drop_rate: float = 0.0, drop_path_rate: float = 0.0, layer_scale_init_value: Optional[float] = 1e-5, lkc_use_act=False, inference_mode=False, ): """FastViT stage. Args: dim: Number of embedding dimensions. depth: Number of blocks in stage token_mixer_type: Token mixer type. kernel_size: Kernel size for repmixer. mlp_ratio: MLP expansion ratio. act_layer: Activation layer. norm_layer: Normalization layer. proj_drop_rate: Dropout rate. drop_path_rate: Drop path rate. layer_scale_init_value: Layer scale value at initialization. inference_mode: Flag to instantiate block in inference mode. """ super().__init__() self.grad_checkpointing = False if downsample: self.downsample = PatchEmbed( patch_size=down_patch_size, stride=down_stride, in_chs=dim, embed_dim=dim_out, act_layer=act_layer, lkc_use_act=lkc_use_act, inference_mode=inference_mode, ) else: assert dim == dim_out self.downsample = nn.Identity() if pos_emb_layer is not None: self.pos_emb = pos_emb_layer(dim_out, inference_mode=inference_mode) else: self.pos_emb = nn.Identity() blocks = [] for block_idx in range(depth): if token_mixer_type == "repmixer": blocks.append(RepMixerBlock( dim_out, kernel_size=kernel_size, mlp_ratio=mlp_ratio, act_layer=act_layer, proj_drop=proj_drop_rate, drop_path=drop_path_rate[block_idx], layer_scale_init_value=layer_scale_init_value, inference_mode=inference_mode, )) elif token_mixer_type == "attention": blocks.append(AttentionBlock( dim_out, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer, proj_drop=proj_drop_rate, drop_path=drop_path_rate[block_idx], layer_scale_init_value=layer_scale_init_value, )) else: raise ValueError( "Token mixer type: {} not supported".format(token_mixer_type) ) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.pos_emb(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class FastVit(nn.Module): fork_feat: torch.jit.Final[bool] """ This class implements `FastViT architecture <https://arxiv.org/pdf/2303.14189.pdf>`_ """ def __init__( self, in_chans: int = 3, layers: Tuple[int, ...] = (2, 2, 6, 2), token_mixers: Tuple[str, ...] = ("repmixer", "repmixer", "repmixer", "repmixer"), embed_dims: Tuple[int, ...] = (64, 128, 256, 512), mlp_ratios: Tuple[float, ...] = (4,) * 4, downsamples: Tuple[bool, ...] = (False, True, True, True), repmixer_kernel_size: int = 3, num_classes: int = 1000, pos_embs: Tuple[Optional[nn.Module], ...] = (None,) * 4, down_patch_size: int = 7, down_stride: int = 2, drop_rate: float = 0.0, proj_drop_rate: float = 0.0, drop_path_rate: float = 0.0, layer_scale_init_value: float = 1e-5, fork_feat: bool = False, cls_ratio: float = 2.0, global_pool: str = 'avg', norm_layer: nn.Module = nn.BatchNorm2d, act_layer: nn.Module = nn.GELU, lkc_use_act: bool = False, inference_mode: bool = False, ) -> None: super().__init__() self.num_classes = 0 if fork_feat else num_classes self.fork_feat = fork_feat self.global_pool = global_pool self.feature_info = [] # Convolutional stem self.stem = convolutional_stem( in_chans, embed_dims[0], act_layer, inference_mode, ) # Build the main stages of the network architecture prev_dim = embed_dims[0] scale = 1 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] stages = [] for i in range(len(layers)): downsample = downsamples[i] or prev_dim != embed_dims[i] stage = FastVitStage( dim=prev_dim, dim_out=embed_dims[i], depth=layers[i], downsample=downsample, down_patch_size=down_patch_size, down_stride=down_stride, pos_emb_layer=pos_embs[i], token_mixer_type=token_mixers[i], kernel_size=repmixer_kernel_size, mlp_ratio=mlp_ratios[i], act_layer=act_layer, norm_layer=norm_layer, proj_drop_rate=proj_drop_rate, drop_path_rate=dpr[i], layer_scale_init_value=layer_scale_init_value, lkc_use_act=lkc_use_act, inference_mode=inference_mode, ) stages.append(stage) prev_dim = embed_dims[i] if downsample: scale *= 2 self.feature_info += [dict(num_chs=prev_dim, reduction=4 * scale, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = prev_dim # For segmentation and detection, extract intermediate output if self.fork_feat: # Add a norm layer for each output. self.stages is slightly different than self.network # in the original code, the PatchEmbed layer is part of self.stages in this code where # it was part of self.network in the original code. So we do not need to skip out indices. self.out_indices = [0, 1, 2, 3] for i_emb, i_layer in enumerate(self.out_indices): if i_emb == 0 and os.environ.get("FORK_LAST3", None): """For RetinaNet, `start_level=1`. The first norm layer will not used. cmd: `FORK_LAST3=1 python -m torch.distributed.launch ...` """ layer = nn.Identity() else: layer = norm_layer(embed_dims[i_emb]) layer_name = f"norm{i_layer}" self.add_module(layer_name, layer) else: # Classifier head self.num_features = final_features = int(embed_dims[-1] * cls_ratio) self.final_conv = MobileOneBlock( in_chs=embed_dims[-1], out_chs=final_features, kernel_size=3, stride=1, group_size=1, inference_mode=inference_mode, use_se=True, act_layer=act_layer, num_conv_branches=1, ) self.head = ClassifierHead( final_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) self.apply(self._init_weights) def _init_weights(self, m: nn.Module) -> None: """Init. for classification""" if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return set() @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', # stem and embed blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+).pos_emb', (0,)), (r'^stages\.(\d+)\.\w+\.(\d+)', None), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x: torch.Tensor) -> torch.Tensor: # input embedding x = self.stem(x) outs = [] for idx, block in enumerate(self.stages): x = block(x) if self.fork_feat: if idx in self.out_indices: norm_layer = getattr(self, f"norm{idx}") x_out = norm_layer(x) outs.append(x_out) if self.fork_feat: # output the features of four stages for dense prediction return outs x = self.final_conv(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) if self.fork_feat: return x x = self.forward_head(x) return x def _cfg(url="", **kwargs): return { "url": url, "num_classes": 1000, "input_size": (3, 256, 256), "pool_size": (8, 8), "crop_pct": 0.9, "interpolation": "bicubic", "mean": IMAGENET_DEFAULT_MEAN, "std": IMAGENET_DEFAULT_STD, 'first_conv': ('stem.0.conv_kxk.0.conv', 'stem.0.conv_scale.conv'), "classifier": "head.fc", **kwargs, } default_cfgs = generate_default_cfgs({ "fastvit_t8.apple_in1k": _cfg( hf_hub_id='timm/'), "fastvit_t12.apple_in1k": _cfg( hf_hub_id='timm/'), "fastvit_s12.apple_in1k": _cfg( hf_hub_id='timm/'), "fastvit_sa12.apple_in1k": _cfg( hf_hub_id='timm/'), "fastvit_sa24.apple_in1k": _cfg( hf_hub_id='timm/'), "fastvit_sa36.apple_in1k": _cfg( hf_hub_id='timm/'), "fastvit_ma36.apple_in1k": _cfg( hf_hub_id='timm/', crop_pct=0.95 ), "fastvit_t8.apple_dist_in1k": _cfg( hf_hub_id='timm/'), "fastvit_t12.apple_dist_in1k": _cfg( hf_hub_id='timm/'), "fastvit_s12.apple_dist_in1k": _cfg( hf_hub_id='timm/',), "fastvit_sa12.apple_dist_in1k": _cfg( hf_hub_id='timm/',), "fastvit_sa24.apple_dist_in1k": _cfg( hf_hub_id='timm/',), "fastvit_sa36.apple_dist_in1k": _cfg( hf_hub_id='timm/',), "fastvit_ma36.apple_dist_in1k": _cfg( hf_hub_id='timm/', crop_pct=0.95 ), }) def _create_fastvit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( FastVit, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs ) return model @register_model def fastvit_t8(pretrained=False, **kwargs): """Instantiate FastViT-T8 model variant.""" model_args = dict( layers=(2, 2, 4, 2), embed_dims=(48, 96, 192, 384), mlp_ratios=(3, 3, 3, 3), token_mixers=("repmixer", "repmixer", "repmixer", "repmixer") ) return _create_fastvit('fastvit_t8', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_t12(pretrained=False, **kwargs): """Instantiate FastViT-T12 model variant.""" model_args = dict( layers=(2, 2, 6, 2), embed_dims=(64, 128, 256, 512), mlp_ratios=(3, 3, 3, 3), token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"), ) return _create_fastvit('fastvit_t12', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_s12(pretrained=False, **kwargs): """Instantiate FastViT-S12 model variant.""" model_args = dict( layers=(2, 2, 6, 2), embed_dims=(64, 128, 256, 512), mlp_ratios=(4, 4, 4, 4), token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"), ) return _create_fastvit('fastvit_s12', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_sa12(pretrained=False, **kwargs): """Instantiate FastViT-SA12 model variant.""" model_args = dict( layers=(2, 2, 6, 2), embed_dims=(64, 128, 256, 512), mlp_ratios=(4, 4, 4, 4), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=("repmixer", "repmixer", "repmixer", "attention"), ) return _create_fastvit('fastvit_sa12', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_sa24(pretrained=False, **kwargs): """Instantiate FastViT-SA24 model variant.""" model_args = dict( layers=(4, 4, 12, 4), embed_dims=(64, 128, 256, 512), mlp_ratios=(4, 4, 4, 4), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=("repmixer", "repmixer", "repmixer", "attention"), ) return _create_fastvit('fastvit_sa24', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_sa36(pretrained=False, **kwargs): """Instantiate FastViT-SA36 model variant.""" model_args = dict( layers=(6, 6, 18, 6), embed_dims=(64, 128, 256, 512), mlp_ratios=(4, 4, 4, 4), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=("repmixer", "repmixer", "repmixer", "attention"), ) return _create_fastvit('fastvit_sa36', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fastvit_ma36(pretrained=False, **kwargs): """Instantiate FastViT-MA36 model variant.""" model_args = dict( layers=(6, 6, 18, 6), embed_dims=(76, 152, 304, 608), mlp_ratios=(4, 4, 4, 4), pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))), token_mixers=("repmixer", "repmixer", "repmixer", "attention") ) return _create_fastvit('fastvit_ma36', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/fastvit.py/0
{ "file_path": "pytorch-image-models/timm/models/fastvit.py", "repo_id": "pytorch-image-models", "token_count": 24916 }
174
""" MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch. 99% of the implementation was done from papers, however last minute some adjustments were made based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit There are multiple sets of models defined for both architectures. Typically, names with a `_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit. These configs work well and appear to be a bit faster / lower resource than the paper. The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match. Papers: MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697 @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803 @article{DBLP:journals/corr/abs-2106-04803, author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan}, title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes}, journal = {CoRR}, volume = {abs/2106.04803}, year = {2021} } Hacked together by / Copyright 2022, Ross Wightman """ import math from collections import OrderedDict from dataclasses import dataclass, replace, field from functools import partial from typing import Callable, Optional, Union, Tuple, List import torch from torch import nn from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, ConvMlp, DropPath, LayerNorm, ClassifierHead, NormMlpClassifierHead from timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, create_pool2d from timm.layers import trunc_normal_tf_, to_2tuple, extend_tuple, make_divisible, _assert from timm.layers import RelPosMlp, RelPosBias, RelPosBiasTf, use_fused_attn, resize_rel_pos_bias_table from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit'] @dataclass class MaxxVitTransformerCfg: dim_head: int = 32 head_first: bool = True # head ordering in qkv channel dim expand_ratio: float = 4.0 expand_first: bool = True shortcut_bias: bool = True attn_bias: bool = True attn_drop: float = 0. proj_drop: float = 0. pool_type: str = 'avg2' rel_pos_type: str = 'bias' rel_pos_dim: int = 512 # for relative position types w/ MLP partition_ratio: int = 32 window_size: Optional[Tuple[int, int]] = None grid_size: Optional[Tuple[int, int]] = None no_block_attn: bool = False # disable window block attention for maxvit (ie only grid) use_nchw_attn: bool = False # for MaxViT variants (not used for CoAt), keep tensors in NCHW order init_values: Optional[float] = None act_layer: str = 'gelu' norm_layer: str = 'layernorm2d' norm_layer_cl: str = 'layernorm' norm_eps: float = 1e-6 def __post_init__(self): if self.grid_size is not None: self.grid_size = to_2tuple(self.grid_size) if self.window_size is not None: self.window_size = to_2tuple(self.window_size) if self.grid_size is None: self.grid_size = self.window_size @dataclass class MaxxVitConvCfg: block_type: str = 'mbconv' expand_ratio: float = 4.0 expand_output: bool = True # calculate expansion channels from output (vs input chs) kernel_size: int = 3 group_size: int = 1 # 1 == depthwise pre_norm_act: bool = False # activation after pre-norm output_bias: bool = True # bias for shortcut + final 1x1 projection conv stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw' pool_type: str = 'avg2' downsample_pool_type: str = 'avg2' padding: str = '' attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2 attn_layer: str = 'se' attn_act_layer: str = 'silu' attn_ratio: float = 0.25 init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv act_layer: str = 'gelu' norm_layer: str = '' norm_layer_cl: str = '' norm_eps: Optional[float] = None def __post_init__(self): # mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args assert self.block_type in ('mbconv', 'convnext') use_mbconv = self.block_type == 'mbconv' if not self.norm_layer: self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d' if not self.norm_layer_cl and not use_mbconv: self.norm_layer_cl = 'layernorm' if self.norm_eps is None: self.norm_eps = 1e-5 if use_mbconv else 1e-6 self.downsample_pool_type = self.downsample_pool_type or self.pool_type @dataclass class MaxxVitCfg: embed_dim: Tuple[int, ...] = (96, 192, 384, 768) depths: Tuple[int, ...] = (2, 3, 5, 2) block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T') stem_width: Union[int, Tuple[int, int]] = 64 stem_bias: bool = False conv_cfg: MaxxVitConvCfg = field(default_factory=MaxxVitConvCfg) transformer_cfg: MaxxVitTransformerCfg = field(default_factory=MaxxVitTransformerCfg) head_hidden_size: int = None weight_init: str = 'vit_eff' class Attention2d(nn.Module): fused_attn: Final[bool] """ multi-head attention for 2D NCHW tensors""" def __init__( self, dim: int, dim_out: Optional[int] = None, dim_head: int = 32, bias: bool = True, expand_first: bool = True, head_first: bool = True, rel_pos_cls: Callable = None, attn_drop: float = 0., proj_drop: float = 0. ): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first else dim self.num_heads = dim_attn // dim_head self.dim_head = dim_head self.head_first = head_first self.scale = dim_head ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B, C, H, W = x.shape if self.head_first: q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) else: q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1) if self.fused_attn: attn_bias = None if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos x = torch.nn.functional.scaled_dot_product_attention( q.transpose(-1, -2).contiguous(), k.transpose(-1, -2).contiguous(), v.transpose(-1, -2).contiguous(), attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0., ).transpose(-1, -2).reshape(B, -1, H, W) else: q = q * self.scale attn = q.transpose(-2, -1) @ k if self.rel_pos is not None: attn = self.rel_pos(attn) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x class AttentionCl(nn.Module): """ Channels-last multi-head attention (B, ..., C) """ fused_attn: Final[bool] def __init__( self, dim: int, dim_out: Optional[int] = None, dim_head: int = 32, bias: bool = True, expand_first: bool = True, head_first: bool = True, rel_pos_cls: Callable = None, attn_drop: float = 0., proj_drop: float = 0. ): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first and dim_out > dim else dim assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim' self.num_heads = dim_attn // dim_head self.dim_head = dim_head self.head_first = head_first self.scale = dim_head ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias) self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim_attn, dim_out, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B = x.shape[0] restore_shape = x.shape[:-1] if self.head_first: q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3) else: q, k, v = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.dim_head).transpose(1, 3).unbind(2) if self.fused_attn: attn_bias = None if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if self.rel_pos is not None: attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(restore_shape + (-1,)) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma return x.mul_(gamma) if self.inplace else x * gamma class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class Downsample2d(nn.Module): """ A downsample pooling module supporting several maxpool and avgpool modes * 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1 * 'max2' - MaxPool2d w/ kernel_size = stride = 2 * 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1 * 'avg2' - AvgPool2d w/ kernel_size = stride = 2 """ def __init__( self, dim: int, dim_out: int, pool_type: str = 'avg2', padding: str = '', bias: bool = True, ): super().__init__() assert pool_type in ('max', 'max2', 'avg', 'avg2') if pool_type == 'max': self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=padding or 1) elif pool_type == 'max2': self.pool = create_pool2d('max', 2, padding=padding or 0) # kernel_size == stride == 2 elif pool_type == 'avg': self.pool = create_pool2d( 'avg', kernel_size=3, stride=2, count_include_pad=False, padding=padding or 1) else: self.pool = create_pool2d('avg', 2, padding=padding or 0) if dim != dim_out: self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) else: self.expand = nn.Identity() def forward(self, x): x = self.pool(x) # spatial downsample x = self.expand(x) # expand chs return x def _init_transformer(module, name, scheme=''): if isinstance(module, (nn.Conv2d, nn.Linear)): if scheme == 'normal': nn.init.normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'trunc_normal': trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'xavier_normal': nn.init.xavier_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # vit like nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) class TransformerBlock2d(nn.Module): """ Transformer block with 2D downsampling '2D' NCHW tensor layout Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs. This impl was faster on TPU w/ PT XLA than the 1D experiment. """ def __init__( self, dim: int, dim_out: int, stride: int = 1, rel_pos_cls: Callable = None, cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) act_layer = get_act_layer(cfg.act_layer) if stride == 2: self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias) self.norm1 = nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)), ])) else: assert dim == dim_out self.shortcut = nn.Identity() self.norm1 = norm_layer(dim) self.attn = Attention2d( dim, dim_out, dim_head=cfg.dim_head, expand_first=cfg.expand_first, bias=cfg.attn_bias, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop ) self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = ConvMlp( in_features=dim_out, hidden_features=int(dim_out * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def _init_conv(module, name, scheme=''): if isinstance(module, nn.Conv2d): if scheme == 'normal': nn.init.normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'trunc_normal': trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'xavier_normal': nn.init.xavier_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # efficientnet like fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) if module.bias is not None: nn.init.zeros_(module.bias) def num_groups(group_size, channels): if not group_size: # 0 or None return 1 # normal conv with 1 group else: # NOTE group_size == 1 -> depthwise conv assert channels % group_size == 0 return channels // group_size class MbConvBlock(nn.Module): """ Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand) """ def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: Tuple[int, int] = (1, 1), cfg: MaxxVitConvCfg = MaxxVitConvCfg(), drop_path: float = 0. ): super(MbConvBlock, self).__init__() norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps) mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio) groups = num_groups(cfg.group_size, mid_chs) if stride == 2: self.shortcut = Downsample2d( in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias, padding=cfg.padding) else: self.shortcut = nn.Identity() assert cfg.stride_mode in ('pool', '1x1', 'dw') stride_pool, stride_1, stride_2 = 1, 1, 1 if cfg.stride_mode == 'pool': # NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1 stride_pool, dilation_2 = stride, dilation[1] # FIXME handle dilation of avg pool elif cfg.stride_mode == '1x1': # NOTE I don't like this option described in paper, 1x1 w/ stride throws info away stride_1, dilation_2 = stride, dilation[1] else: stride_2, dilation_2 = stride, dilation[0] self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act) if stride_pool > 1: self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type, padding=cfg.padding) else: self.down = nn.Identity() self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1) self.norm1 = norm_act_layer(mid_chs) self.conv2_kxk = create_conv2d( mid_chs, mid_chs, cfg.kernel_size, stride=stride_2, dilation=dilation_2, groups=groups, padding=cfg.padding) attn_kwargs = {} if isinstance(cfg.attn_layer, str): if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca': attn_kwargs['act_layer'] = cfg.attn_act_layer attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs)) # two different orderings for SE and norm2 (due to some weights and trials using SE before norm2) if cfg.attn_early: self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) self.norm2 = norm_act_layer(mid_chs) self.se = None else: self.se_early = None self.norm2 = norm_act_layer(mid_chs) self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): shortcut = self.shortcut(x) x = self.pre_norm(x) x = self.down(x) # 1x1 expansion conv & norm-act x = self.conv1_1x1(x) x = self.norm1(x) # depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act x = self.conv2_kxk(x) if self.se_early is not None: x = self.se_early(x) x = self.norm2(x) if self.se is not None: x = self.se(x) # 1x1 linear projection to output width x = self.conv3_1x1(x) x = self.drop_path(x) + shortcut return x class ConvNeXtBlock(nn.Module): """ ConvNeXt Block """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 7, stride: int = 1, dilation: Tuple[int, int] = (1, 1), cfg: MaxxVitConvCfg = MaxxVitConvCfg(), conv_mlp: bool = True, drop_path: float = 0. ): super().__init__() out_chs = out_chs or in_chs act_layer = get_act_layer(cfg.act_layer) if conv_mlp: norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) mlp_layer = ConvMlp else: assert 'layernorm' in cfg.norm_layer norm_layer = LayerNorm mlp_layer = Mlp self.use_conv_mlp = conv_mlp if stride == 2: self.shortcut = Downsample2d(in_chs, out_chs) elif in_chs != out_chs: self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias) else: self.shortcut = nn.Identity() assert cfg.stride_mode in ('pool', 'dw') stride_pool, stride_dw = 1, 1 # FIXME handle dilation? if cfg.stride_mode == 'pool': stride_pool = stride else: stride_dw = stride if stride_pool == 2: self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type) else: self.down = nn.Identity() self.conv_dw = create_conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1], depthwise=True, bias=cfg.output_bias) self.norm = norm_layer(out_chs) self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer) if conv_mlp: self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() else: self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = self.shortcut(x) x = self.down(x) x = self.conv_dw(x) if self.use_conv_mlp: x = self.norm(x) x = self.mlp(x) x = self.ls(x) else: x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) x = self.ls(x) x = x.permute(0, 3, 1, 2) x = self.drop_path(x) + shortcut return x def window_partition(x, window_size: List[int]): B, H, W, C = x.shape _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') _assert(W % window_size[1] == 0, '') x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows, window_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x def grid_partition(x, grid_size: List[int]): B, H, W, C = x.shape _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') _assert(W % grid_size[1] == 0, '') x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C) windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def grid_reverse(windows, grid_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C) x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C) return x def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size): rel_pos_cls = None if cfg.rel_pos_type == 'mlp': rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim) elif cfg.rel_pos_type == 'bias': rel_pos_cls = partial(RelPosBias, window_size=window_size) elif cfg.rel_pos_type == 'bias_tf': rel_pos_cls = partial(RelPosBiasTf, window_size=window_size) return rel_pos_cls class PartitionAttentionCl(nn.Module): """ Grid or Block partition + Attn + FFN. NxC 'channels last' tensor layout. """ def __init__( self, dim: int, partition_type: str = 'block', cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) self.partition_block = partition_type == 'block' self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn = AttentionCl( dim, dim, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[1:3] if self.partition_block: partitioned = window_partition(x, self.partition_size) else: partitioned = grid_partition(x, self.partition_size) partitioned = self.attn(partitioned) if self.partition_block: x = window_reverse(partitioned, self.partition_size, img_size) else: x = grid_reverse(partitioned, self.partition_size, img_size) return x def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ParallelPartitionAttention(nn.Module): """ Experimental. Grid and Block partition + single FFN NxC tensor layout. """ def __init__( self, dim: int, cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() assert dim % 2 == 0 norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) assert cfg.window_size == cfg.grid_size self.partition_size = to_2tuple(cfg.window_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn_block = AttentionCl( dim, dim // 2, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.attn_grid = AttentionCl( dim, dim // 2, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), out_features=dim, act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[1:3] partitioned_block = window_partition(x, self.partition_size) partitioned_block = self.attn_block(partitioned_block) x_window = window_reverse(partitioned_block, self.partition_size, img_size) partitioned_grid = grid_partition(x, self.partition_size) partitioned_grid = self.attn_grid(partitioned_grid) x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size) return torch.cat([x_window, x_grid], dim=-1) def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def window_partition_nchw(x, window_size: List[int]): B, C, H, W = x.shape _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') _assert(W % window_size[1] == 0, '') x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1]) windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1]) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[1] x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1]) x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W) return x def grid_partition_nchw(x, grid_size: List[int]): B, C, H, W = x.shape _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') _assert(W % grid_size[1] == 0, '') x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1]) windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1]) return windows @register_notrace_function # reason: int argument is a Proxy def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[1] x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1]) x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W) return x class PartitionAttention2d(nn.Module): """ Grid or Block partition + Attn + FFN '2D' NCHW tensor layout. """ def __init__( self, dim: int, partition_type: str = 'block', cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) self.partition_block = partition_type == 'block' self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn = Attention2d( dim, dim, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = ConvMlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[-2:] if self.partition_block: partitioned = window_partition_nchw(x, self.partition_size) else: partitioned = grid_partition_nchw(x, self.partition_size) partitioned = self.attn(partitioned) if self.partition_block: x = window_reverse_nchw(partitioned, self.partition_size, img_size) else: x = grid_reverse_nchw(partitioned, self.partition_size, img_size) return x def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class MaxxVitBlock(nn.Module): """ MaxVit conv, window partition + FFN , grid partition + FFN """ def __init__( self, dim: int, dim_out: int, stride: int = 1, conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() self.nchw_attn = transformer_cfg.use_nchw_attn conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) partition_layer = PartitionAttention2d if self.nchw_attn else PartitionAttentionCl self.attn_block = None if transformer_cfg.no_block_attn else partition_layer(**attn_kwargs) self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs) def init_weights(self, scheme=''): if self.attn_block is not None: named_apply(partial(_init_transformer, scheme=scheme), self.attn_block) named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): # NCHW format x = self.conv(x) if not self.nchw_attn: x = x.permute(0, 2, 3, 1) # to NHWC (channels-last) if self.attn_block is not None: x = self.attn_block(x) x = self.attn_grid(x) if not self.nchw_attn: x = x.permute(0, 3, 1, 2) # back to NCHW return x class ParallelMaxxVitBlock(nn.Module): """ MaxVit block with parallel cat(window + grid), one FF Experimental timm block. """ def __init__( self, dim, dim_out, stride=1, num_conv=2, conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path=0., ): super().__init__() conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock if num_conv > 1: convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)] convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1) self.conv = nn.Sequential(*convs) else: self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self.attn) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): x = self.conv(x) x = x.permute(0, 2, 3, 1) x = self.attn(x) x = x.permute(0, 3, 1, 2) return x class MaxxVitStage(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 2, depth: int = 4, feat_size: Tuple[int, int] = (14, 14), block_types: Union[str, Tuple[str]] = 'C', transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), drop_path: Union[float, List[float]] = 0., ): super().__init__() self.grad_checkpointing = False block_types = extend_tuple(block_types, depth) blocks = [] for i, t in enumerate(block_types): block_stride = stride if i == 0 else 1 assert t in ('C', 'T', 'M', 'PM') if t == 'C': conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock blocks += [conv_cls( in_chs, out_chs, stride=block_stride, cfg=conv_cfg, drop_path=drop_path[i], )] elif t == 'T': rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size) blocks += [TransformerBlock2d( in_chs, out_chs, stride=block_stride, rel_pos_cls=rel_pos_cls, cfg=transformer_cfg, drop_path=drop_path[i], )] elif t == 'M': blocks += [MaxxVitBlock( in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i], )] elif t == 'PM': blocks += [ParallelMaxxVitBlock( in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i], )] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class Stem(nn.Module): def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, padding: str = '', bias: bool = False, act_layer: str = 'gelu', norm_layer: str = 'batchnorm2d', norm_eps: float = 1e-5, ): super().__init__() if not isinstance(out_chs, (list, tuple)): out_chs = to_2tuple(out_chs) norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) self.out_chs = out_chs[-1] self.stride = 2 self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2, padding=padding, bias=bias) self.norm1 = norm_act_layer(out_chs[0]) self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1, padding=padding, bias=bias) def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) return x def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]): if cfg.window_size is not None: assert cfg.grid_size return cfg partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio cfg = replace(cfg, window_size=partition_size, grid_size=partition_size) return cfg def _overlay_kwargs(cfg: MaxxVitCfg, **kwargs): transformer_kwargs = {} conv_kwargs = {} base_kwargs = {} for k, v in kwargs.items(): if k.startswith('transformer_'): transformer_kwargs[k.replace('transformer_', '')] = v elif k.startswith('conv_'): conv_kwargs[k.replace('conv_', '')] = v else: base_kwargs[k] = v cfg = replace( cfg, transformer_cfg=replace(cfg.transformer_cfg, **transformer_kwargs), conv_cfg=replace(cfg.conv_cfg, **conv_kwargs), **base_kwargs ) return cfg class MaxxVit(nn.Module): """ CoaTNet + MaxVit base model. Highly configurable for different block compositions, tensor layouts, pooling types. """ def __init__( self, cfg: MaxxVitCfg, img_size: Union[int, Tuple[int, int]] = 224, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', drop_rate: float = 0., drop_path_rate: float = 0., **kwargs, ): super().__init__() img_size = to_2tuple(img_size) if kwargs: cfg = _overlay_kwargs(cfg, **kwargs) transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size) self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = cfg.embed_dim[-1] self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] self.stem = Stem( in_chs=in_chans, out_chs=cfg.stem_width, padding=cfg.conv_cfg.padding, bias=cfg.stem_bias, act_layer=cfg.conv_cfg.act_layer, norm_layer=cfg.conv_cfg.norm_layer, norm_eps=cfg.conv_cfg.norm_eps, ) stride = self.stem.stride self.feature_info += [dict(num_chs=self.stem.out_chs, reduction=2, module='stem')] feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))]) num_stages = len(cfg.embed_dim) assert len(cfg.depths) == num_stages dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] in_chs = self.stem.out_chs stages = [] for i in range(num_stages): stage_stride = 2 out_chs = cfg.embed_dim[i] feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size]) stages += [MaxxVitStage( in_chs, out_chs, depth=cfg.depths[i], block_types=cfg.block_type[i], conv_cfg=cfg.conv_cfg, transformer_cfg=transformer_cfg, feat_size=feat_size, drop_path=dpr[i], )] stride *= stage_stride in_chs = out_chs self.feature_info += [dict(num_chs=out_chs, reduction=stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) final_norm_layer = partial(get_norm_layer(cfg.transformer_cfg.norm_layer), eps=cfg.transformer_cfg.norm_eps) self.head_hidden_size = cfg.head_hidden_size if self.head_hidden_size: self.norm = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=self.head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, norm_layer=final_norm_layer, ) else: # standard classifier head w/ norm, pooling, fc classifier self.norm = final_norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) # Weight init (default PyTorch init works well for AdamW if scheme not set) assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff') if cfg.weight_init: named_apply(partial(self._init_weights, scheme=cfg.weight_init), self) def _init_weights(self, module, name, scheme=''): if hasattr(module, 'init_weights'): try: module.init_weights(scheme=scheme) except TypeError: module.init_weights() @torch.jit.ignore def no_weight_decay(self): return { k for k, _ in self.named_parameters() if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _rw_coat_cfg( stride_mode='pool', pool_type='avg2', conv_output_bias=False, conv_attn_early=False, conv_attn_act_layer='relu', conv_norm_layer='', transformer_shortcut_bias=True, transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', init_values=None, rel_pos_type='bias', rel_pos_dim=512, ): # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit # Common differences for initial timm models: # - pre-norm layer in MZBConv included an activation after norm # - mbconv expansion calculated from input instead of output chs # - mbconv shortcut and final 1x1 conv did not have a bias # - SE act layer was relu, not silu # - mbconv uses silu in timm, not gelu # - expansion in attention block done via output proj, not input proj # Variable differences (evolved over training initial models): # - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat) # - SE attention was between conv2 and norm/act # - default to avg pool for mbconv downsample instead of 1x1 or dw conv # - transformer block shortcut has no bias return dict( conv_cfg=MaxxVitConvCfg( stride_mode=stride_mode, pool_type=pool_type, pre_norm_act=True, expand_output=False, output_bias=conv_output_bias, attn_early=conv_attn_early, attn_act_layer=conv_attn_act_layer, act_layer='silu', norm_layer=conv_norm_layer, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, shortcut_bias=transformer_shortcut_bias, pool_type=pool_type, init_values=init_values, norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) def _rw_max_cfg( stride_mode='dw', pool_type='avg2', conv_output_bias=False, conv_attn_ratio=1 / 16, conv_norm_layer='', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, dim_head=32, init_values=None, rel_pos_type='bias', rel_pos_dim=512, ): # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit # Differences of initial timm models: # - mbconv expansion calculated from input instead of output chs # - mbconv shortcut and final 1x1 conv did not have a bias # - mbconv uses silu in timm, not gelu # - expansion in attention block done via output proj, not input proj return dict( conv_cfg=MaxxVitConvCfg( stride_mode=stride_mode, pool_type=pool_type, expand_output=False, output_bias=conv_output_bias, attn_ratio=conv_attn_ratio, act_layer='silu', norm_layer=conv_norm_layer, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, pool_type=pool_type, dim_head=dim_head, window_size=window_size, init_values=init_values, norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) def _next_cfg( stride_mode='dw', pool_type='avg2', conv_norm_layer='layernorm2d', conv_norm_layer_cl='layernorm', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, no_block_attn=False, init_values=1e-6, rel_pos_type='mlp', # MLP by default for maxxvit rel_pos_dim=512, ): # For experimental models with convnext instead of mbconv init_values = to_2tuple(init_values) return dict( conv_cfg=MaxxVitConvCfg( block_type='convnext', stride_mode=stride_mode, pool_type=pool_type, expand_output=False, init_values=init_values[0], norm_layer=conv_norm_layer, norm_layer_cl=conv_norm_layer_cl, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, pool_type=pool_type, window_size=window_size, no_block_attn=no_block_attn, # enabled for MaxxViT-V2 init_values=init_values[1], norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) def _tf_cfg(): return dict( conv_cfg=MaxxVitConvCfg( norm_eps=1e-3, act_layer='gelu_tanh', padding='same', ), transformer_cfg=MaxxVitTransformerCfg( norm_eps=1e-5, act_layer='gelu_tanh', head_first=False, # heads are interleaved (q_nh, q_hdim, k_nh, q_hdim, ....) rel_pos_type='bias_tf', ), ) model_cfgs = dict( # timm specific CoAtNet configs coatnet_pico_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 3, 5, 2), stem_width=(32, 64), **_rw_max_cfg( # using newer max defaults here conv_output_bias=True, conv_attn_ratio=0.25, ), ), coatnet_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), **_rw_max_cfg( # using newer max defaults here stride_mode='pool', conv_output_bias=True, conv_attn_ratio=0.25, ), ), coatnet_0_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( conv_attn_early=True, transformer_shortcut_bias=False, ), ), coatnet_1_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', conv_attn_early=True, transformer_shortcut_bias=False, ) ), coatnet_2_rw=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=(64, 128), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', #init_values=1e-6, ), ), coatnet_3_rw=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=(96, 192), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, ), ), # Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos) coatnet_bn_0_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', conv_attn_early=True, transformer_shortcut_bias=False, transformer_norm_layer='batchnorm2d', ) ), coatnet_rmlp_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), **_rw_max_cfg( conv_output_bias=True, conv_attn_ratio=0.25, rel_pos_type='mlp', rel_pos_dim=384, ), ), coatnet_rmlp_0_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', rel_pos_type='mlp', ), ), coatnet_rmlp_1_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg( pool_type='max', conv_attn_early=True, transformer_shortcut_bias=False, rel_pos_type='mlp', rel_pos_dim=384, # was supposed to be 512, woops ), ), coatnet_rmlp_1_rw2=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', rel_pos_type='mlp', rel_pos_dim=512, # was supposed to be 512, woops ), ), coatnet_rmlp_2_rw=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=(64, 128), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, rel_pos_type='mlp' ), ), coatnet_rmlp_3_rw=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=(96, 192), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, rel_pos_type='mlp' ), ), coatnet_nano_cc=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), block_type=('C', 'C', ('C', 'T'), ('C', 'T')), **_rw_coat_cfg(), ), coatnext_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), weight_init='normal', **_next_cfg( rel_pos_type='bias', init_values=(1e-5, None) ), ), # Trying to be like the CoAtNet paper configs coatnet_0=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 5, 2), stem_width=64, head_hidden_size=768, ), coatnet_1=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=64, head_hidden_size=768, ), coatnet_2=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=128, head_hidden_size=1024, ), coatnet_3=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=192, head_hidden_size=1536, ), coatnet_4=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 12, 28, 2), stem_width=192, head_hidden_size=1536, ), coatnet_5=MaxxVitCfg( embed_dim=(256, 512, 1280, 2048), depths=(2, 12, 28, 2), stem_width=192, head_hidden_size=2048, ), # Experimental MaxVit configs maxvit_pico_rw=MaxxVitCfg( embed_dim=(32, 64, 128, 256), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(24, 32), **_rw_max_cfg(), ), maxvit_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_tiny_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_tiny_pm=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('PM',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_rmlp_pico_rw=MaxxVitCfg( embed_dim=(32, 64, 128, 256), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(24, 32), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_tiny_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_small_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg( rel_pos_type='mlp', init_values=1e-6, ), ), maxvit_rmlp_base_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=(32, 64), head_hidden_size=768, **_rw_max_cfg( rel_pos_type='mlp', ), ), maxxvit_rmlp_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), weight_init='normal', **_next_cfg(), ), maxxvit_rmlp_tiny_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_next_cfg(), ), maxxvit_rmlp_small_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(48, 96), **_next_cfg(), ), maxxvitv2_nano_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(48, 96), weight_init='normal', **_next_cfg( no_block_attn=True, rel_pos_type='bias', ), ), maxxvitv2_rmlp_base_rw=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 12, 2), block_type=('M',) * 4, stem_width=(64, 128), **_next_cfg( no_block_attn=True, ), ), maxxvitv2_rmlp_large_rw=MaxxVitCfg( embed_dim=(160, 320, 640, 1280), depths=(2, 6, 16, 2), block_type=('M',) * 4, stem_width=(80, 160), head_hidden_size=1280, **_next_cfg( no_block_attn=True, ), ), # Trying to be like the MaxViT paper configs maxvit_tiny_tf=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=512, **_tf_cfg(), ), maxvit_small_tf=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=768, **_tf_cfg(), ), maxvit_base_tf=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=768, **_tf_cfg(), ), maxvit_large_tf=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=128, stem_bias=True, head_hidden_size=1024, **_tf_cfg(), ), maxvit_xlarge_tf=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=192, stem_bias=True, head_hidden_size=1536, **_tf_cfg(), ), ) def checkpoint_filter_fn(state_dict, model: nn.Module): model_state_dict = model.state_dict() out_dict = {} for k, v in state_dict.items(): if k.endswith('relative_position_bias_table'): m = model.get_submodule(k[:-29]) if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: v = resize_rel_pos_bias_table( v, new_window_size=m.window_size, new_bias_shape=m.relative_position_bias_table.shape, ) if k in model_state_dict and v.ndim != model_state_dict[k].ndim and v.numel() == model_state_dict[k].numel(): # adapt between conv2d / linear layers assert v.ndim in (2, 4) v = v.reshape(model_state_dict[k].shape) out_dict[k] = v return out_dict def _create_maxxvit(variant, cfg_variant=None, pretrained=False, **kwargs): if cfg_variant is None: if variant in model_cfgs: cfg_variant = variant else: cfg_variant = '_'.join(variant.split('_')[:-1]) return build_model_with_cfg( MaxxVit, variant, pretrained, model_cfg=model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'stem.conv1', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs } default_cfgs = generate_default_cfgs({ # timm specific CoAtNet configs, ImageNet-1k pretrain, fixed rel-pos 'coatnet_pico_rw_224.untrained': _cfg(url=''), 'coatnet_nano_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth', crop_pct=0.9), 'coatnet_0_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'), 'coatnet_1_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth' ), # timm specific CoAtNet configs, ImageNet-12k pretrain w/ 1k fine-tune, fixed rel-pos 'coatnet_2_rw_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/'), #'coatnet_3_rw_224.untrained': _cfg(url=''), # Experimental CoAtNet configs w/ ImageNet-12k pretrain -> 1k fine-tune (different norm layers, MLP rel-pos) 'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/'), 'coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/'), 'coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), # Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos) 'coatnet_bn_0_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=0.95), 'coatnet_rmlp_nano_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth', crop_pct=0.9), 'coatnet_rmlp_0_rw_224.untrained': _cfg(url=''), 'coatnet_rmlp_1_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'), 'coatnet_rmlp_2_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'), 'coatnet_rmlp_3_rw_224.untrained': _cfg(url=''), 'coatnet_nano_cc_224.untrained': _cfg(url=''), 'coatnext_nano_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth', crop_pct=0.9), # ImagenNet-12k pretrain CoAtNet 'coatnet_2_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), 'coatnet_3_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), 'coatnet_rmlp_1_rw2_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), 'coatnet_rmlp_2_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), # Trying to be like the CoAtNet paper configs (will adapt if 'tf' weights are ever released) 'coatnet_0_224.untrained': _cfg(url=''), 'coatnet_1_224.untrained': _cfg(url=''), 'coatnet_2_224.untrained': _cfg(url=''), 'coatnet_3_224.untrained': _cfg(url=''), 'coatnet_4_224.untrained': _cfg(url=''), 'coatnet_5_224.untrained': _cfg(url=''), # timm specific MaxVit configs, ImageNet-1k pretrain or untrained 'maxvit_pico_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_nano_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_tiny_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'), 'maxvit_tiny_rw_256.untrained': _cfg( url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_tiny_pm_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), # timm specific MaxVit w/ MLP rel-pos, ImageNet-1k pretrain 'maxvit_rmlp_pico_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_nano_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_tiny_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_small_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth', crop_pct=0.9, ), 'maxvit_rmlp_small_rw_256.untrained': _cfg( url='', input_size=(3, 256, 256), pool_size=(8, 8)), # timm specific MaxVit w/ ImageNet-12k pretrain and 1k fine-tune 'maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', ), 'maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), # timm specific MaxVit w/ ImageNet-12k pretrain 'maxvit_rmlp_base_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, ), # timm MaxxViT configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks) 'maxxvit_rmlp_nano_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_tiny_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_small_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth', input_size=(3, 256, 256), pool_size=(8, 8)), # timm MaxxViT-V2 configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks, more width, no block attn) 'maxxvitv2_nano_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/'), 'maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxxvitv2_rmlp_large_rw_224.untrained': _cfg(url=''), 'maxxvitv2_rmlp_base_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), # MaxViT models ported from official Tensorflow impl 'maxvit_tiny_tf_224.in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_tiny_tf_384.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_tiny_tf_512.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_small_tf_224.in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_small_tf_384.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_small_tf_512.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_224.in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_base_tf_384.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_512.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_224.in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_large_tf_384.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_512.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_224.in21k': _cfg( hf_hub_id='timm/', num_classes=21843), 'maxvit_base_tf_384.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_512.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_224.in21k': _cfg( hf_hub_id='timm/', num_classes=21843), 'maxvit_large_tf_384.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_512.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), crop_pct=1.0, crop_mode='squash'), 'maxvit_xlarge_tf_224.in21k': _cfg( hf_hub_id='timm/', num_classes=21843), 'maxvit_xlarge_tf_384.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_xlarge_tf_512.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), }) @register_model def coatnet_pico_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_pico_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_1_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_1_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_2_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_2_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_3_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_3_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_bn_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_bn_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_1_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_1_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_1_rw2_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_1_rw2_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_2_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_2_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_2_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_2_rw_384', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_3_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_3_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_nano_cc_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_nano_cc_224', pretrained=pretrained, **kwargs) @register_model def coatnext_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnext_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_0_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_0_224', pretrained=pretrained, **kwargs) @register_model def coatnet_1_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_1_224', pretrained=pretrained, **kwargs) @register_model def coatnet_2_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_2_224', pretrained=pretrained, **kwargs) @register_model def coatnet_3_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_3_224', pretrained=pretrained, **kwargs) @register_model def coatnet_4_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_4_224', pretrained=pretrained, **kwargs) @register_model def coatnet_5_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_5_224', pretrained=pretrained, **kwargs) @register_model def maxvit_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_pico_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_pico_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_small_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_base_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_base_rw_384', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_pm_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_pm_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_base_rw_224', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_base_rw_384', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_large_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_large_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_224', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_384', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_512', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_224', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_384', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_512', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_224', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_384', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_512', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_224', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_384', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_512', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_224', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_384', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_512', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/maxxvit.py/0
{ "file_path": "pytorch-image-models/timm/models/maxxvit.py", "repo_id": "pytorch-image-models", "token_count": 42620 }
175
""" Res2Net and Res2NeXt Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 """ import math import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet __all__ = [] class Bottle2neck(nn.Module): """ Res2Net/Res2NeXT Bottleneck Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py """ expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_, ): super(Bottle2neck, self).__init__() self.scale = scale self.is_first = stride > 1 or downsample is not None self.num_scales = max(1, scale - 1) width = int(math.floor(planes * (base_width / 64.0))) * cardinality self.width = width outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) self.bn1 = norm_layer(width * scale) convs = [] bns = [] for i in range(self.num_scales): convs.append(nn.Conv2d( width, width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)) bns.append(norm_layer(width)) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) if self.is_first: # FIXME this should probably have count_include_pad=False, but hurts original weights self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) else: self.pool = None self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) self.bn3 = norm_layer(outplanes) self.se = attn_layer(outplanes) if attn_layer is not None else None self.relu = act_layer(inplace=True) self.downsample = downsample def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) spx = torch.split(out, self.width, 1) spo = [] sp = spx[0] # redundant, for torchscript for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): if i == 0 or self.is_first: sp = spx[i] else: sp = sp + spx[i] sp = conv(sp) sp = bn(sp) sp = self.relu(sp) spo.append(sp) if self.scale > 1: if self.pool is not None: # self.is_first == True, None check for torchscript spo.append(self.pool(spx[-1])) else: spo.append(spx[-1]) out = torch.cat(spo, 1) out = self.conv3(out) out = self.bn3(out) if self.se is not None: out = self.se(out) if self.downsample is not None: shortcut = self.downsample(x) out += shortcut out = self.relu(out) return out def _create_res2net(variant, pretrained=False, **kwargs): return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'res2net50_26w_4s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_48w_2s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_14w_8s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_26w_6s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_26w_8s.in1k': _cfg(hf_hub_id='timm/'), 'res2net101_26w_4s.in1k': _cfg(hf_hub_id='timm/'), 'res2next50.in1k': _cfg(hf_hub_id='timm/'), 'res2net50d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), 'res2net101d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), }) @register_model def res2net50_26w_4s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w4s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4)) return _create_res2net('res2net50_26w_4s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net101_26w_4s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-101 26w4s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4)) return _create_res2net('res2net101_26w_4s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_26w_6s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w6s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6)) return _create_res2net('res2net50_26w_6s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_26w_8s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w8s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8)) return _create_res2net('res2net50_26w_8s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_48w_2s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 48w2s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2)) return _create_res2net('res2net50_48w_2s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_14w_8s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 14w8s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8)) return _create_res2net('res2net50_14w_8s', pretrained, **dict(model_args, **kwargs)) @register_model def res2next50(pretrained=False, **kwargs) -> ResNet: """Construct Res2NeXt-50 4s """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4)) return _create_res2net('res2next50', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50d(pretrained=False, **kwargs) -> ResNet: """Construct Res2Net-50 """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, stem_type='deep', avg_down=True, stem_width=32, block_args=dict(scale=4)) return _create_res2net('res2net50d', pretrained, **dict(model_args, **kwargs)) @register_model def res2net101d(pretrained=False, **kwargs) -> ResNet: """Construct Res2Net-50 """ model_args = dict( block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, stem_type='deep', avg_down=True, stem_width=32, block_args=dict(scale=4)) return _create_res2net('res2net101d', pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/res2net.py/0
{ "file_path": "pytorch-image-models/timm/models/res2net.py", "repo_id": "pytorch-image-models", "token_count": 3659 }
176
"""VGG Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for timm functionality. Copyright 2021 Ross Wightman """ from typing import Union, List, Dict, Any, cast import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs __all__ = ['VGG'] cfgs: Dict[str, List[Union[str, int]]] = { 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } @register_notrace_module # reason: FX can't symbolically trace control flow in forward method class ConvMlp(nn.Module): def __init__( self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, drop_rate: float = 0.2, act_layer: nn.Module = None, conv_layer: nn.Module = None, ): super(ConvMlp, self).__init__() self.input_kernel_size = kernel_size mid_features = int(out_features * mlp_ratio) self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) self.act1 = act_layer(True) self.drop = nn.Dropout(drop_rate) self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) self.act2 = act_layer(True) def forward(self, x): if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: # keep the input size >= 7x7 output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) x = F.adaptive_avg_pool2d(x, output_size) x = self.fc1(x) x = self.act1(x) x = self.drop(x) x = self.fc2(x) x = self.act2(x) return x class VGG(nn.Module): def __init__( self, cfg: List[Any], num_classes: int = 1000, in_chans: int = 3, output_stride: int = 32, mlp_ratio: float = 1.0, act_layer: nn.Module = nn.ReLU, conv_layer: nn.Module = nn.Conv2d, norm_layer: nn.Module = None, global_pool: str = 'avg', drop_rate: float = 0., ) -> None: super(VGG, self).__init__() assert output_stride == 32 self.num_classes = num_classes self.num_features = 4096 self.drop_rate = drop_rate self.grad_checkpointing = False self.use_norm = norm_layer is not None self.feature_info = [] prev_chs = in_chans net_stride = 1 pool_layer = nn.MaxPool2d layers: List[nn.Module] = [] for v in cfg: last_idx = len(layers) - 1 if v == 'M': self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) layers += [pool_layer(kernel_size=2, stride=2)] net_stride *= 2 else: v = cast(int, v) conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) if norm_layer is not None: layers += [conv2d, norm_layer(v), act_layer(inplace=True)] else: layers += [conv2d, act_layer(inplace=True)] prev_chs = v self.features = nn.Sequential(*layers) self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) self.pre_logits = ConvMlp( prev_chs, self.num_features, 7, mlp_ratio=mlp_ratio, drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer, ) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) self._initialize_weights() @torch.jit.ignore def group_matcher(self, coarse=False): # this treats BN layers as separate groups for bn variants, a lot of effort to fix that return dict(stem=r'^features\.0', blocks=r'^features\.(\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.head = ClassifierHead( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.features(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False): x = self.pre_logits(x) return x if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def _initialize_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def _filter_fn(state_dict): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} for k, v in state_dict.items(): k_r = k k_r = k_r.replace('classifier.0', 'pre_logits.fc1') k_r = k_r.replace('classifier.3', 'pre_logits.fc2') k_r = k_r.replace('classifier.6', 'head.fc') if 'classifier.0.weight' in k: v = v.reshape(-1, 512, 7, 7) if 'classifier.3.weight' in k: v = v.reshape(-1, 4096, 1, 1) out_dict[k_r] = v return out_dict def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: cfg = variant.split('_')[0] # NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5] out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5)) model = build_model_with_cfg( VGG, variant, pretrained, model_cfg=cfgs[cfg], feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), pretrained_filter_fn=_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'features.0', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'vgg11.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg13.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg16.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg19.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg11_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg13_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg16_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg19_bn.tv_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 11-layer model (configuration "A") from `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg11', pretrained=pretrained, **model_args) @register_model def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 11-layer model (configuration "A") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) @register_model def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 13-layer model (configuration "B") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg13', pretrained=pretrained, **model_args) @register_model def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 13-layer model (configuration "B") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) @register_model def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg16', pretrained=pretrained, **model_args) @register_model def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) @register_model def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg19', pretrained=pretrained, **model_args) @register_model def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args)
pytorch-image-models/timm/models/vgg.py/0
{ "file_path": "pytorch-image-models/timm/models/vgg.py", "repo_id": "pytorch-image-models", "token_count": 5201 }
177
""" AdamW Optimizer Impl copied from PyTorch master NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed someday """ import math import torch from torch.optim.optimizer import Optimizer class AdamW(Optimizer): r"""Implements AdamW algorithm. The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay coefficient (default: 1e-2) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(AdamW, self).__init__(params, defaults) def __setstate__(self, state): super(AdamW, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue # Perform stepweight decay p.data.mul_(1 - group['lr'] * group['weight_decay']) # Perform optimization step grad = p.grad if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss
pytorch-image-models/timm/optim/adamw.py/0
{ "file_path": "pytorch-image-models/timm/optim/adamw.py", "repo_id": "pytorch-image-models", "token_count": 2417 }
178
""" Cosine Scheduler Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. Hacked together by / Copyright 2021 Ross Wightman """ import logging import math import numpy as np import torch from .scheduler import Scheduler _logger = logging.getLogger(__name__) class CosineLRScheduler(Scheduler): """ Cosine decay with restarts. This is described in the paper https://arxiv.org/abs/1608.03983. Inspiration from https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 """ def __init__( self, optimizer: torch.optim.Optimizer, t_initial: int, lr_min: float = 0., cycle_mul: float = 1., cycle_decay: float = 1., cycle_limit: int = 1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, k_decay=1.0, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) assert t_initial > 0 assert lr_min >= 0 if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: _logger.warning( "Cosine annealing scheduler will have no effect on the learning " "rate since t_initial = t_mul = eta_mul = 1.") self.t_initial = t_initial self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix self.k_decay = k_decay if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - (self.t_initial * i) gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] k = self.k_decay if i < self.cycle_limit: lrs = [ self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) for lr_max in lr_max_values ] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
pytorch-image-models/timm/scheduler/cosine_lr.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/cosine_lr.py", "repo_id": "pytorch-image-models", "token_count": 2031 }
179
""" Logging helpers Hacked together by / Copyright 2020 Ross Wightman """ import logging import logging.handlers class FormatterNoInfo(logging.Formatter): def __init__(self, fmt='%(levelname)s: %(message)s'): logging.Formatter.__init__(self, fmt) def format(self, record): if record.levelno == logging.INFO: return str(record.getMessage()) return logging.Formatter.format(self, record) def setup_default_logging(default_level=logging.INFO, log_path=''): console_handler = logging.StreamHandler() console_handler.setFormatter(FormatterNoInfo()) logging.root.addHandler(console_handler) logging.root.setLevel(default_level) if log_path: file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") file_handler.setFormatter(file_formatter) logging.root.addHandler(file_handler)
pytorch-image-models/timm/utils/log.py/0
{ "file_path": "pytorch-image-models/timm/utils/log.py", "repo_id": "pytorch-image-models", "token_count": 383 }
180
/// Inspired by https://github.com/orhun/rust-tui-template/blob/472aa515119d4c94903eac12d9784417281dc7f5/src/event.rs use crossterm::event; use std::time::{Duration, Instant}; use tokio::sync::{broadcast, mpsc}; /// Events #[derive(Debug)] pub(crate) enum Event { /// Terminal tick. Tick, /// Key press. Key(event::KeyEvent), /// Terminal resize. Resize(u16, u16), } pub(crate) async fn terminal_event_task( fps: u32, event_sender: mpsc::Sender<Event>, mut shutdown_receiver: broadcast::Receiver<()>, _shutdown_guard_sender: mpsc::Sender<()>, ) { // End task if a message is received on shutdown_receiver // _shutdown_guard_sender will be dropped once the task is finished tokio::select! { _ = event_loop(fps, event_sender) => { }, _ = shutdown_receiver.recv() => {} } } /// Main event loop async fn event_loop(fps: u32, event_sender: mpsc::Sender<Event>) { // Frame budget let per_frame = Duration::from_secs(1) / fps; // When was last frame executed let mut last_frame = Instant::now(); loop { // Sleep to avoid blocking the thread for too long if let Some(sleep) = per_frame.checked_sub(last_frame.elapsed()) { tokio::time::sleep(sleep).await; } // Get crossterm event and send a new one over the channel if event::poll(Duration::from_secs(0)).expect("no events available") { match event::read().expect("unable to read event") { event::Event::Key(e) => event_sender.send(Event::Key(e)).await.unwrap_or(()), event::Event::Resize(w, h) => { event_sender.send(Event::Resize(w, h)).await.unwrap_or(()) } _ => (), } } // Frame budget exceeded if last_frame.elapsed() >= per_frame { // Send tick event_sender.send(Event::Tick).await.unwrap_or(()); // Rest last_frame time last_frame = Instant::now(); } } }
text-generation-inference/benchmark/src/event.rs/0
{ "file_path": "text-generation-inference/benchmark/src/event.rs", "repo_id": "text-generation-inference", "token_count": 922 }
181
# Quantization TGI offers GPTQ and bits-and-bytes quantization to quantize large language models. ## Quantization with GPTQ GPTQ is a post-training quantization method to make the model smaller. It quantizes the layers by finding a compressed version of that weight, that will yield a minimum mean squared error like below 👇 Given a layer \\(l\\) with weight matrix \\(W_{l}\\) and layer input \\(X_{l}\\), find quantized weight \\(\\hat{W}_{l}\\): $$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$ TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize gptq ``` Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI. To quantize a given model using GPTQ with a calibration dataset, simply run ```bash text-generation-server quantize tiiuae/falcon-40b /data/falcon-40b-gptq # Add --upload-to-model-id MYUSERNAME/falcon-40b to push the created model to the hub directly ``` This will create a new directory with the quantized files which you can use with, ```bash text-generation-launcher --model-id /data/falcon-40b-gptq/ --sharded true --num-shard 2 --quantize gptq ``` You can learn more about the quantization options by running `text-generation-server quantize --help`. If you wish to do more with GPTQ models (e.g. train an adapter on top), you can read about transformers GPTQ integration [here](https://huggingface.co/blog/gptq-integration). You can learn more about GPTQ from the [paper](https://arxiv.org/pdf/2210.17323.pdf). ## Quantization with bitsandbytes bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models. Unlike GPTQ quantization, bitsandbytes doesn't require a calibration dataset or any post-processing – weights are automatically quantized on load. However, inference with bitsandbytes is slower than GPTQ or FP16 precision. 8-bit quantization enables multi-billion parameter scale models to fit in smaller hardware without degrading performance too much. In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize bitsandbytes ``` 4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load. In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize bitsandbytes-nf4 ``` You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
text-generation-inference/docs/source/conceptual/quantization.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/quantization.md", "repo_id": "text-generation-inference", "token_count": 1114 }
182
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 338, "logprob": -9.0859375, "text": "is" }, { "id": 21784, "logprob": -10.90625, "text": "Deep" }, { "id": 29257, "logprob": -2.65625, "text": "Learning" }, { "id": 29973, "logprob": -4.8085938, "text": "?" } ], "seed": 0, "tokens": [ { "id": 13, "logprob": -0.19958496, "special": false, "text": "\n" }, { "id": 4013, "logprob": -2.203125, "special": false, "text": "This" }, { "id": 1139, "logprob": -0.23693848, "special": false, "text": " question" }, { "id": 756, "logprob": 0.0, "special": false, "text": " has" }, { "id": 1063, "logprob": -0.076538086, "special": false, "text": " been" }, { "id": 4433, "logprob": 0.0, "special": false, "text": " asked" }, { "id": 1784, "logprob": -1.1367188, "special": false, "text": " many" }, { "id": 3064, "logprob": 0.0, "special": false, "text": " times" }, { "id": 322, "logprob": -1.7460938, "special": false, "text": " and" }, { "id": 306, "logprob": 0.0, "special": false, "text": " I" } ], "top_tokens": null }, "generated_text": "What is Deep Learning?\nThis question has been asked many times and I" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq_all_params.json", "repo_id": "text-generation-inference", "token_count": 1165 }
183
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -12.9140625, "text": "Test" }, { "id": 2159, "logprob": -10.7578125, "text": "request" } ], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.54785156, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4091797, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94433594, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.81347656, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2958984, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0644531, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9580078, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5073242, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1816406, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json", "repo_id": "text-generation-inference", "token_count": 1050 }
184
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -9.0234375, "text": " ge" }, { "id": 21017, "logprob": -9.0859375, "text": "ometric" }, { "id": 81, "logprob": -0.25830078, "text": "_" }, { "id": 6009, "logprob": -2.1875, "text": "mean" }, { "id": 26, "logprob": -0.30004883, "text": "(" }, { "id": 62, "logprob": -5.6171875, "text": "L" }, { "id": 44, "logprob": -3.078125, "text": ":" }, { "id": 1682, "logprob": -0.68066406, "text": " List" }, { "id": 77, "logprob": -0.38745117, "text": "[" }, { "id": 1808, "logprob": -0.9453125, "text": "float" }, { "id": 10794, "logprob": -2.5371094, "text": "]):" } ], "seed": 0, "tokens": [ { "id": 284, "logprob": -0.051635742, "special": false, "text": "\n " }, { "id": 442, "logprob": 0.0, "special": false, "text": " return" }, { "id": 11665, "logprob": -1.2236328, "special": false, "text": " reduce" }, { "id": 26, "logprob": 0.0, "special": false, "text": "(" }, { "id": 5962, "logprob": 0.0, "special": false, "text": "lambda" }, { "id": 816, "logprob": 0.0, "special": false, "text": " x" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 533, "logprob": 0.0, "special": false, "text": " y" }, { "id": 44, "logprob": 0.0, "special": false, "text": ":" }, { "id": 816, "logprob": 0.0, "special": false, "text": " x" }, { "id": 319, "logprob": 0.0, "special": false, "text": " *" }, { "id": 533, "logprob": 0.0, "special": false, "text": " y" }, { "id": 30, "logprob": 0.0, "special": false, "text": "," }, { "id": 498, "logprob": 0.0, "special": false, "text": " L" }, { "id": 27, "logprob": 0.0, "special": false, "text": ")" }, { "id": 203, "logprob": -0.12695312, "special": false, "text": "\n" }, { "id": 203, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 589, "logprob": 0.0, "special": false, "text": "def" }, { "id": 3226, "logprob": 0.0, "special": false, "text": " ge" }, { "id": 21017, "logprob": 0.0, "special": false, "text": "ometric" } ] }, "generated_text": "\n return reduce(lambda x, y: x * y, L)\n\ndef geometric" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json", "repo_id": "text-generation-inference", "token_count": 2294 }
185
import pytest @pytest.fixture(scope="module") def bloom_560m_sharded_handle(launcher): with launcher("bigscience/bloom-560m", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def bloom_560m_sharded(bloom_560m_sharded_handle): await bloom_560m_sharded_handle.health(240) return bloom_560m_sharded_handle.client @pytest.mark.asyncio async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot): response = await bloom_560m_sharded.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_sharded_load( bloom_560m_sharded, generate_load, response_snapshot ): responses = await generate_load( bloom_560m_sharded, "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py", "repo_id": "text-generation-inference", "token_count": 511 }
186
import pytest @pytest.fixture(scope="module") def mt0_base_handle(launcher): with launcher("bigscience/mt0-base") as handle: yield handle @pytest.fixture(scope="module") async def mt0_base(mt0_base_handle): await mt0_base_handle.health(300) return mt0_base_handle.client @pytest.mark.asyncio async def test_mt0_base(mt0_base, response_snapshot): response = await mt0_base.generate( "Why is the sky blue?", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 5 assert response == response_snapshot @pytest.mark.asyncio async def test_mt0_base_all_params(mt0_base, response_snapshot): response = await mt0_base.generate( "Why is the sky blue?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 9 assert response == response_snapshot @pytest.mark.asyncio async def test_mt0_base_load(mt0_base, generate_load, response_snapshot): responses = await generate_load( mt0_base, "Why is the sky blue?", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_mt0_base.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_mt0_base.py", "repo_id": "text-generation-inference", "token_count": 713 }
187
syntax = "proto3"; package generate.v2; service TextGenerationService { /// Model Info rpc Info (InfoRequest) returns (InfoResponse) {} /// Service discovery rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {} /// Empties batch cache rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse); /// Remove requests from a cached batch rpc FilterBatch (FilterBatchRequest) returns (FilterBatchResponse); /// Warmup the model and compute max cache size rpc Warmup (WarmupRequest) returns (WarmupResponse); /// Prefill batch and decode first token rpc Prefill (PrefillRequest) returns (PrefillResponse); /// Decode token for a list of prefilled batches rpc Decode (DecodeRequest) returns (DecodeResponse); /// Health check rpc Health (HealthRequest) returns (HealthResponse); } message HealthRequest {} message HealthResponse {} /// Empty request message InfoRequest {} message InfoResponse { bool requires_padding = 1; string dtype = 2; string device_type = 3; optional uint32 window_size = 4; uint32 speculate = 5; } /// Empty request message ServiceDiscoveryRequest {} message ServiceDiscoveryResponse { /// Other shards urls repeated string urls = 1; } message ClearCacheRequest { /// Optional batch id optional uint64 id = 1; } /// Empty response message ClearCacheResponse {} message NextTokenChooserParameters { /// exponential scaling output probability distribution float temperature = 1; /// restricting to the k highest probability elements uint32 top_k = 2; /// restricting to top tokens summing to prob_cut_off <= prob_cut_off float top_p = 3; /// restricting to top tokens summing to prob_cut_off <= prob_cut_off float typical_p = 4; /// apply sampling on the logits bool do_sample = 5; /// random seed for sampling uint64 seed = 6; /// repetition penalty float repetition_penalty = 7; /// token watermarking using "A Watermark for Large Language Models" bool watermark = 8; } message StoppingCriteriaParameters { /// Maximum number of generated tokens uint32 max_new_tokens = 1; /// Optional stopping sequences repeated string stop_sequences = 2; /// Ignore end of sequence token /// used for benchmarking bool ignore_eos_token = 3; } message Request { /// Request ID uint64 id = 1; /// The generation context string inputs = 2; /// Context truncation uint32 truncate = 3; /// Next Token Chooser Parameters NextTokenChooserParameters parameters = 4; /// Stopping Criteria Parameters StoppingCriteriaParameters stopping_parameters = 5; /// Return prefill logprobs bool prefill_logprobs = 6; /// Return most likely n tokens uint32 top_n_tokens = 7; } message Batch { /// Batch ID uint64 id = 1; /// Individual requests repeated Request requests = 2; /// Batch size (==len(requests)) uint32 size = 3; /// Maximum number of tokens this batch will grow to uint32 max_tokens = 4; } message CachedBatch { /// Batch ID uint64 id = 1; /// Individual requests ids repeated uint64 request_ids = 2; /// Batch size (==len(requests)) uint32 size = 3; /// Maximum number of tokens this batch will grow to uint32 max_tokens = 4; } enum FinishReason { FINISH_REASON_LENGTH = 0; FINISH_REASON_EOS_TOKEN = 1; FINISH_REASON_STOP_SEQUENCE = 2; } message GeneratedText { /// Output string text = 1; /// Number of generated tokens uint32 generated_tokens = 2; /// Finish reason FinishReason finish_reason = 3; /// Seed optional uint64 seed = 4; } message Tokens { /// Token IDs repeated uint32 ids = 1; /// Logprobs repeated float logprobs = 2; /// tokens repeated string texts = 3; /// special repeated bool is_special = 4; } message Generation { /// Request ID uint64 request_id = 1; /// Prefill tokens (optional) Tokens prefill_tokens = 2; Tokens tokens = 3; /// Complete generated text optional GeneratedText generated_text = 4; /// Top tokens repeated Tokens top_tokens = 5; } message FilterBatchRequest { /// Batch ID uint64 batch_id = 1; /// Requests to keep repeated uint64 request_ids = 2; } message FilterBatchResponse { /// Filtered Batch (cached) CachedBatch batch = 1; } message PrefillRequest { /// Batch Batch batch = 1; } message PrefillResponse { /// Generation repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; /// Forward elapsed time in nanoseconds uint64 forward_ns = 3; /// Decode elapsed time in nanoseconds uint64 decode_ns = 4; /// Total elapsed time in nanoseconds uint64 total_ns = 5; } message DecodeRequest { /// Cached batches repeated CachedBatch batches = 1; } message DecodeResponse { /// Decodes repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; /// Forward elapsed time in nanoseconds uint64 forward_ns = 3; /// Decode elapsed time in nanoseconds uint64 decode_ns = 4; /// Total elapsed time in nanoseconds uint64 total_ns = 5; /// Concatenate elapsed time in nanoseconds optional uint64 concat_ns = 6; } message WarmupRequest { /// Batch to warmup on Batch batch = 1; uint32 max_input_length = 2; uint32 max_prefill_tokens = 3; uint32 max_total_tokens = 4; } /// Empty response message WarmupResponse { /// Maximum number of tokens supported by the model optional uint32 max_supported_total_tokens = 1; }
text-generation-inference/proto/generate.proto/0
{ "file_path": "text-generation-inference/proto/generate.proto", "repo_id": "text-generation-inference", "token_count": 1964 }
188
use crate::infer::InferError; use crate::infer::InferStreamResponse; use crate::validation::ValidGenerateRequest; use nohash_hasher::{BuildNoHashHasher, IntMap}; use std::cmp::min; use std::collections::VecDeque; use text_generation_client::{Batch, Request}; use tokio::sync::{mpsc, oneshot}; use tokio::time::Instant; use tracing::{info_span, instrument, Span}; /// Queue entry #[derive(Debug)] pub(crate) struct Entry { /// Request pub request: ValidGenerateRequest, /// Response sender to communicate between the Infer struct and the batching_task pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>, /// Span that will live as long as entry pub span: Span, /// Temporary span used as a guard when logging inference, wait times... pub temp_span: Option<Span>, /// Instant when this entry was queued pub queue_time: Instant, /// Instant when this entry was added to a batch pub batch_time: Option<Instant>, } /// Request Queue #[derive(Debug, Clone)] pub(crate) struct Queue { /// Channel to communicate with the background queue task queue_sender: mpsc::UnboundedSender<QueueCommand>, } impl Queue { pub(crate) fn new( requires_padding: bool, block_size: u32, window_size: Option<u32>, speculate: u32, ) -> Self { // Create channel let (queue_sender, queue_receiver) = mpsc::unbounded_channel(); // Launch background queue task tokio::spawn(queue_task( requires_padding, block_size, window_size, speculate, queue_receiver, )); Self { queue_sender } } /// Append an entry to the queue #[instrument(skip_all)] pub(crate) fn append(&self, entry: Entry) { // Send append command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::Append(Box::new(entry), Span::current())) .unwrap(); } // Get the next batch #[instrument(skip(self))] pub(crate) async fn next_batch( &self, min_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { // Create response channel let (response_sender, response_receiver) = oneshot::channel(); // Send next batch command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::NextBatch { min_size, prefill_token_budget, token_budget, response_sender, span: Span::current(), }) .unwrap(); // Await on response channel // Unwrap is safe here response_receiver.await.unwrap() } } // Background task responsible of the queue state async fn queue_task( requires_padding: bool, block_size: u32, window_size: Option<u32>, speculate: u32, mut receiver: mpsc::UnboundedReceiver<QueueCommand>, ) { let mut state = State::new(requires_padding, block_size, window_size, speculate); while let Some(cmd) = receiver.recv().await { match cmd { QueueCommand::Append(entry, span) => { span.in_scope(|| state.append(*entry)); metrics::increment_gauge!("tgi_queue_size", 1.0); } QueueCommand::NextBatch { min_size, prefill_token_budget, token_budget, response_sender, span, } => span.in_scope(|| { let next_batch = state.next_batch(min_size, prefill_token_budget, token_budget); response_sender.send(next_batch).unwrap(); metrics::gauge!("tgi_queue_size", state.entries.len() as f64); }), } } } /// Queue State #[derive(Debug)] struct State { /// Queue entries organized in a Vec entries: VecDeque<(u64, Entry)>, /// Id of the next entry next_id: u64, /// Id of the next batch next_batch_id: u64, /// Whether the model is using padding requires_padding: bool, /// Paged Attention block size block_size: u32, /// Sliding window window_size: Option<u32>, /// Speculation amount speculate: u32, } impl State { fn new( requires_padding: bool, block_size: u32, window_size: Option<u32>, speculate: u32, ) -> Self { Self { entries: VecDeque::with_capacity(128), next_id: 0, next_batch_id: 0, requires_padding, block_size, window_size, speculate, } } /// Append an entry to the queue fn append(&mut self, mut entry: Entry) { // Create a span that will live as long as the entry is in the queue waiting to be batched let queue_span = info_span!(parent: &entry.span, "queued"); entry.temp_span = Some(queue_span); // Push entry in the queue self.entries.push_back((self.next_id, entry)); self.next_id += 1; } // Get the next batch fn next_batch( &mut self, min_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { if self.entries.is_empty() { return None; } // Check if we have enough entries if let Some(min_size) = min_size { if self.entries.len() < min_size { return None; } } // Create span for this batch to add context to inference calls let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty); next_batch_span.follows_from(&Span::current()); let mut batch_requests = Vec::with_capacity(self.entries.len()); let mut batch_entries = IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default()); let mut max_input_length = 0; let mut prefill_tokens: u32 = 0; let mut decode_tokens: u32 = 0; // Pop entries starting from the front of the queue while let Some((id, mut entry)) = self.entries.pop_front() { // Filter entries where the response receiver was dropped (== entries where the request // was dropped by the client) if entry.response_tx.is_closed() { metrics::increment_counter!("tgi_request_failure", "err" => "dropped"); continue; } if self.requires_padding { // We pad to max input length in the Python shards // We need to take these padding tokens into the equation max_input_length = max_input_length.max(entry.request.input_length); prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length } else { // pad to block size prefill_tokens += ((entry.request.input_length + self.block_size - 1) / self.block_size) * self.block_size; } if self.requires_padding { decode_tokens += entry.request.stopping_parameters.max_new_tokens; } else { let max_new_tokens = match self.window_size { None => entry.request.stopping_parameters.max_new_tokens, Some(window_size) => min( window_size.saturating_sub(entry.request.input_length), entry.request.stopping_parameters.max_new_tokens, ), }; // pad to block size decode_tokens += ((max_new_tokens + self.block_size - 1) / self.block_size) * self.block_size; } if prefill_tokens > prefill_token_budget || (prefill_tokens + decode_tokens + self.speculate) > token_budget { // Entry is over budget // Add it back to the front self.entries.push_front((id, entry)); break; } // Create a new span to link the batch back to this entry let entry_batch_span = info_span!(parent: &entry.span, "infer"); // Add relationships next_batch_span.follows_from(&entry_batch_span); entry_batch_span.follows_from(&next_batch_span); // Update entry entry.temp_span = Some(entry_batch_span); batch_requests.push(Request { id, prefill_logprobs: entry.request.decoder_input_details, inputs: entry.request.inputs.clone(), truncate: entry.request.truncate, parameters: Some(entry.request.parameters.clone()), stopping_parameters: Some(entry.request.stopping_parameters.clone()), top_n_tokens: entry.request.top_n_tokens, }); // Set batch_time entry.batch_time = Some(Instant::now()); // Insert in batch_entries IntMap batch_entries.insert(id, entry); } // Empty batch if batch_requests.is_empty() { return None; } // Check if our batch is big enough if let Some(min_size) = min_size { // Batch is too small if batch_requests.len() < min_size { // Add back entries to the queue in the correct order for r in batch_requests.into_iter().rev() { let id = r.id; let entry = batch_entries.remove(&id).unwrap(); self.entries.push_front((id, entry)); } return None; } } // Final batch size let size = batch_requests.len() as u32; next_batch_span.record("batch_size", size); let batch = Batch { id: self.next_batch_id, requests: batch_requests, size, max_tokens: (prefill_tokens + decode_tokens), }; // Increment batch id self.next_batch_id += 1; metrics::histogram!("tgi_batch_next_size", batch.size as f64); Some((batch_entries, batch, next_batch_span)) } } type NextBatch = (IntMap<u64, Entry>, Batch, Span); #[derive(Debug)] enum QueueCommand { Append(Box<Entry>, Span), NextBatch { min_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, response_sender: oneshot::Sender<Option<NextBatch>>, span: Span, }, } #[cfg(test)] mod tests { use super::*; use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters}; use tracing::info_span; fn default_entry() -> ( Entry, mpsc::UnboundedReceiver<Result<InferStreamResponse, InferError>>, ) { let (response_tx, receiver_tx) = mpsc::unbounded_channel(); let entry = Entry { request: ValidGenerateRequest { inputs: "".to_string(), input_length: 0, truncate: 0, decoder_input_details: false, parameters: NextTokenChooserParameters { temperature: 0.0, top_k: 0, top_p: 0.0, typical_p: 0.0, do_sample: false, seed: 0, repetition_penalty: 0.0, watermark: false, }, stopping_parameters: StoppingCriteriaParameters { ignore_eos_token: false, max_new_tokens: 1, stop_sequences: vec![], }, top_n_tokens: 0, }, response_tx, span: info_span!("entry"), temp_span: None, queue_time: Instant::now(), batch_time: None, }; (entry, receiver_tx) } #[test] fn test_append() { let mut state = State::new(false, 1, None, 0); let (entry, _guard) = default_entry(); assert_eq!(state.next_id, 0); assert_eq!(state.entries.len(), 0); state.append(entry); assert_eq!(state.next_id, 1); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 0); } #[test] fn test_next_batch_empty() { let mut state = State::new(false, 1, None, 0); assert!(state.next_batch(None, 1, 1).is_none()); assert!(state.next_batch(Some(1), 1, 1).is_none()); } #[test] fn test_next_batch_min_size() { let mut state = State::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, 2, 2).unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); assert!(state.next_batch(Some(2), 2, 2).is_none()); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 2); } #[test] fn test_next_batch_token_budget() { let mut state = State::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, 1, 1).unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 1); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); let (entries, batch, _) = state.next_batch(None, 3, 3).unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 2); } #[tokio::test] async fn test_queue_append() { let queue = Queue::new(false, 1, None, 0); let (entry, _guard) = default_entry(); queue.append(entry); } #[tokio::test] async fn test_queue_next_batch_empty() { let queue = Queue::new(false, 1, None, 0); assert!(queue.next_batch(None, 1, 1).await.is_none()); assert!(queue.next_batch(Some(1), 1, 1).await.is_none()); } #[tokio::test] async fn test_queue_next_batch_min_size() { let queue = Queue::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, 2, 2).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); let (entry3, _guard3) = default_entry(); queue.append(entry3); // Not enough requests pending assert!(queue.next_batch(Some(2), 2, 2).await.is_none()); // Not enough token budget assert!(queue.next_batch(Some(1), 0, 0).await.is_none()); // Ok let (entries2, batch2, _) = queue.next_batch(Some(1), 2, 2).await.unwrap(); assert_eq!(entries2.len(), 1); assert!(entries2.contains_key(&2)); assert!(entries2.get(&2).unwrap().batch_time.is_some()); assert_eq!(batch2.id, 1); assert_eq!(batch2.size, 1); } #[tokio::test] async fn test_queue_next_batch_token_budget() { let queue = Queue::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, 1, 1).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); let (entry3, _guard3) = default_entry(); queue.append(entry3); let (entries, batch, _) = queue.next_batch(None, 3, 3).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_token_speculate() { let queue = Queue::new(false, 1, None, 2); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); // Budget of 1 is not enough assert!(queue.next_batch(None, 1, 1).await.is_none()); let (entries, batch, _) = queue.next_batch(None, 6, 6).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_dropped_receiver() { let queue = Queue::new(false, 1, None, 0); let (entry, _) = default_entry(); queue.append(entry); assert!(queue.next_batch(None, 1, 1).await.is_none()); } }
text-generation-inference/router/src/queue.rs/0
{ "file_path": "text-generation-inference/router/src/queue.rs", "repo_id": "text-generation-inference", "token_count": 8994 }
189
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _cuda_compat_cuh #define _cuda_compat_cuh // atomicAdd for half types, to support CC < 7.x __device__ __forceinline__ void atomicAdd_half(half* address, half val) { unsigned int * address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); unsigned int old = *address_as_ui; unsigned int assumed; do { assumed = old; __half_raw hsum; hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); half tmpres = __hadd(hsum, val); hsum = __half_raw(tmpres); old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; old = atomicCAS(address_as_ui, assumed, old); } while (assumed != old); } // atomicAdd for half2 types __device__ __forceinline__ void atomicAdd_half2(half2* address, half2 val) { unsigned int* address_as_ui = (unsigned int*)address; unsigned int old = *address_as_ui; unsigned int assumed; do { assumed = old; half2 old_val = *((half2*)&old); half2 new_val = __hadd2(old_val, val); old = atomicCAS(address_as_ui, assumed, *((unsigned int*)&new_val)); } while (assumed != old); } // #if defined(__CUDA_ARCH__) || defined(USE_ROCM) #if __CUDA_ARCH__ < 700 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half* address, half val) { atomicAdd_half(address, val); } #if __CUDA_ARCH__ < 600 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half2* address, half2 val) { atomicAdd_half2(address, val); } #endif #endif #endif #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh", "repo_id": "text-generation-inference", "token_count": 692 }
190
#ifndef _util_h #define _util_h #define DBGS(__x) printf("%s\n", __x) #define DBGI(__x) printf("%s: %i\n", #__x, __x) #define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y) #define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z) #define DBGF(__x) printf("%s: %f\n", #__x, __x) #define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y) #define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z) #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h", "repo_id": "text-generation-inference", "token_count": 296 }
191
#ifndef _util_cuh #define _util_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include <ATen/cuda/CUDAContext.h> #define DIVIDE(x, size) (((x) + (size) - 1) / (size)) #define DBGS(__x) printf("%s\n", __x) #define DBGI(__x) printf("%s: %i\n", #__x, __x) #define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y) #define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z) #define DBGX(__x) printf("%s: %x\n", #__x, __x) #define DBGX2(__x, __y) printf("%s, %s: %x, %x\n", #__x, #__y, __x, __y) #define DBGX3(__x, __y, __z) printf("%s, %s, %s: %x, %x, %x\n", #__x, #__y, #__z, __x, __y, __z) #define DBGF(__x) printf("%s: %f\n", #__x, __x) #define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y) #define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z) #define DBGH(__x) printf("%s: %f\n", #__x, __half2float(__x)) #define DBGH2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __half2float(__x), __half2float(__y)) #define DBGH3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __half2float(__x), __half2float(__y), __half2float(__z)) #define DBGIH(__x, __y) printf("%s, %s: %i, %f\n", #__x, #__y, __x, __half2float(__y)) #define DBGIH2(__x, __y, __z) printf("%s, %s, %s: %i, %f, %f\n", #__x, #__y, #__z, __x, __half2float(__y), __half2float(__z)) __forceinline__ __device__ half dq_scale_(const int qs, const half max_scale) { half qs_h = __hmul(__int2half_rn(qs + 1), __float2half_rn(1.0f / 16.0f)); qs_h = __hmul(qs_h, qs_h); qs_h = __hmul(qs_h, max_scale); return qs_h; } __forceinline__ __device__ float clamp(float x, float a, float b) { return fmaxf(a, fminf(b, x)); } #define cuda_check(ans) { gpu_assert((ans), __FILE__, __LINE__); } inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA error: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void print_global_mem(const half* ptr, int rows, int columns, int stride); #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh", "repo_id": "text-generation-inference", "token_count": 1114 }
192
import torch from text_generation_server.utils.layers import ( TensorParallelEmbedding, ) class ProcessGroup: def __init__(self, rank: int, world_size: int): self._rank = rank self.world_size = world_size def size(self) -> int: return self.world_size def rank(self) -> int: return self._rank class Weights: def __init__(self, rank: int, world_size: int, vocab_size: int, hidden_dim: int): self.weight = ( torch.arange(vocab_size * hidden_dim).float().view(vocab_size, hidden_dim) ) self.process_group = ProcessGroup(rank, world_size) def get_partial_sharded(self, name: str, dim: int): assert dim == 0 rank = self.process_group.rank() world_size = self.process_group.size() size = self.weight.shape[dim] block_size = (size + world_size - 1) // world_size start = rank * block_size stop = (rank + 1) * block_size return self.weight[start:stop] def get_shape(self, name: str): return self.weight.shape def test_weight_hub_files_offline_error(): vocab_size = 17 weights = Weights(rank=0, world_size=1, vocab_size=vocab_size, hidden_dim=256) embeddings = TensorParallelEmbedding("", weights) input_ids = torch.arange(vocab_size) output = embeddings.forward(input_ids) assert embeddings.min_id == 0 assert embeddings.max_id == 17 torch.testing.assert_close(output, torch.arange(256 * 17).float().view(17, 256)) weights_0_2 = Weights(rank=0, world_size=2, vocab_size=vocab_size, hidden_dim=256) weights_1_2 = Weights(rank=1, world_size=2, vocab_size=vocab_size, hidden_dim=256) embeddings_0_2 = TensorParallelEmbedding("", weights_0_2, reduce=False) assert embeddings_0_2.min_id == 0 assert embeddings_0_2.max_id == 9 torch.testing.assert_close( embeddings_0_2.weight, torch.cat([torch.arange(9 * 256), torch.zeros(256)], dim=0) .view(10, 256) .float(), ) embeddings_1_2 = TensorParallelEmbedding("", weights_1_2, reduce=False) assert embeddings_1_2.min_id == 9 assert embeddings_1_2.max_id == 17 torch.testing.assert_close( embeddings_1_2.weight, torch.cat([torch.arange(8 * 256) + 9 * 256, torch.zeros(256)], dim=0) .view(9, 256) .float(), ) output_tp_0 = embeddings_0_2.forward(input_ids) output_tp_1 = embeddings_1_2.forward(input_ids) torch.testing.assert_close(output, output_tp_0 + output_tp_1)
text-generation-inference/server/tests/utils/test_layers.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_layers.py", "repo_id": "text-generation-inference", "token_count": 1111 }
193
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.modeling_utils import PreTrainedModel from transformers.models.gpt_neox import GPTNeoXConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.flash_attn import attention from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelHead, FastLayerNorm, PositionRotaryEmbedding, get_linear, ) def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_multi_weights_row(prefix, quantize=config.quantize) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None linear = get_linear(weight, bias, config.quantize) if config.use_parallel_residual: return linear else: return TensorParallelRowLinear(linear, process_group=weights.process_group) def load_qkv(config, prefix: str, weights, num_heads, head_size, hidden_size): weight = weights.get_multi_weights_col([prefix], quantize=config.quantize, dim=0) if isinstance(weight, torch.Tensor): # Only on non quantized versions weight = ( weight.view( num_heads, 3, head_size, hidden_size, ) .permute(1, 0, 2, 3) .reshape(-1, hidden_size) ) bias = weights.get_sharded(f"{prefix}.bias", dim=0) bias = bias.view(num_heads, 3, head_size).permute(1, 0, 2).reshape(-1) linear = get_linear(weight, bias, config.quantize) if config.use_parallel_residual: return linear else: return TensorParallelColumnLinear(linear) class FlashNeoxAttention(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() num_heads = config.num_attention_heads hidden_size = config.hidden_size self.num_heads = num_heads self.hidden_size = hidden_size self.head_size = hidden_size // num_heads self.rotary_dim = int(config.rotary_pct * self.head_size) if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.rotary_dim, base=config.rotary_emb_base, device=weights.device, ) self.softmax_scale = self.head_size ** (-0.5) self.query_key_value = load_qkv( config, prefix=f"{prefix}.query_key_value", weights=weights, num_heads=self.num_heads, head_size=self.head_size, hidden_size=self.hidden_size, ) self.dense = load_row( config, prefix=f"{prefix}.dense", weights=weights, bias=True ) self.kv_head_mapping = torch.arange( 0, self.num_heads, dtype=torch.int32, device=weights.device ) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ): qkv = self.query_key_value(hidden_states) qkv = qkv.view(-1, 3, self.num_heads, self.head_size) # Inplace rotary self.rotary_emb(qkv[:, 0], qkv[:, 1], cos, sin) paged_attention.reshape_and_cache( qkv[:, 1], qkv[:, 2], kv_cache[0], kv_cache[1], slots ) # output tensor attn_output = torch.empty_like(qkv[:, 0]) # Prefill if cu_seqlen_prefill is not None: # flash attention flash_attn.attention( qkv[:, 0], qkv[:, 1], qkv[:, 2], attn_output, cu_seqlen_prefill, max_s, self.softmax_scale, ) # Decode else: paged_attention.attention( attn_output, qkv[:, 0], kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, input_lengths, max_s, ) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class FlashMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate="tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none", ) ) self.dense_h_to_4h = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.dense_h_to_4h", weights=weights, bias=True ) self.dense_4h_to_h = load_row( config, prefix=f"{prefix}.dense_4h_to_h", weights=weights, bias=True ) def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states class FlashNeoXLayer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() layer_norm_eps = config.layer_norm_eps prefix = f"gpt_neox.layers.{layer_id}" self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = FastLayerNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=layer_norm_eps ) self.post_attention_layernorm = FastLayerNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=layer_norm_eps, ) self.attention = FlashNeoxAttention( config, prefix=f"{prefix}.attention", weights=weights ) self.mlp = FlashMLP(config, prefix=f"{prefix}.mlp", weights=weights) self.process_group = weights.process_group def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ): if self.use_parallel_residual: ln1_hidden_states, _ = self.input_layernorm(hidden_states) attn_output = self.attention( ln1_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ) ln2_hidden_states, _ = self.post_attention_layernorm(hidden_states) mlp_output = self.mlp(ln2_hidden_states) intermediate = mlp_output + attn_output if self.process_group.size() > 1: torch.distributed.all_reduce(intermediate, group=self.process_group) return intermediate + hidden_states, None else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.attention( hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ) hidden_states, residual = self.post_attention_layernorm( hidden_states, residual ) mlp_output = self.mlp(hidden_states) return mlp_output, residual class FlashGPTNeoXPreTrainedModel(PreTrainedModel): config_class = GPTNeoXConfig base_model_prefix = "gpt_neox" supports_gradient_checkpointing = False _no_split_modules = None class FlashGPTNeoXModel(FlashGPTNeoXPreTrainedModel): def __init__(self, config, weights): super().__init__(config) self.config = config self.embed_in = TensorParallelEmbedding( prefix="gpt_neox.embed_in", weights=weights ) self.layers = nn.ModuleList( [ FlashNeoXLayer(layer_id, config, weights) for layer_id in range(config.num_hidden_layers) ] ) self.final_layer_norm = FastLayerNorm.load( prefix="gpt_neox.final_layer_norm", weights=weights, eps=config.layer_norm_eps, ) self.gradient_checkpointing = False self.head_size = self.layers[0].attention.head_size self.num_heads = self.layers[0].attention.num_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, ) -> torch.Tensor: hidden_states = self.embed_in(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].attention.rotary_emb.get_cos_sin( position_ids, max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, input_lengths, max_s, ) hidden_states, _ = self.final_layer_norm(hidden_states, residual) return hidden_states class FlashGPTNeoXForCausalLM(FlashGPTNeoXPreTrainedModel): def __init__(self, config, weights): super().__init__(config) self.gpt_neox = FlashGPTNeoXModel(config, weights) self.embed_out = TensorParallelHead.load( config, prefix="embed_out", weights=weights ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.gpt_neox( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.embed_out(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py", "repo_id": "text-generation-inference", "token_count": 6181 }
194
import torch import torch.distributed from opentelemetry import trace from transformers import AutoConfig, AutoTokenizer from transformers.models.llama import LlamaTokenizer from typing import Optional from text_generation_server.models import FlashCausalLM from text_generation_server.models.custom_modeling.flash_llama_modeling import ( FlashLlamaForCausalLM, LlamaConfig, ) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) tracer = trace.get_tracer(__name__) class FlashLlama(FlashCausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, use_medusa: Optional[str] = None, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = torch.float16 if dtype is None else dtype else: raise NotImplementedError("FlashLlama is only available on GPU") try: tokenizer = LlamaTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = LlamaConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) config.quantize = quantize torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) if config.quantize in ["gptq", "awq"]: weights._set_gptq_params(model_id, revision) model = FlashLlamaForCausalLM(config, weights) if use_medusa: from text_generation_server.utils.medusa import MedusaModel from huggingface_hub import hf_hub_download import json import os from pathlib import Path is_local_model = ( Path(use_medusa).exists() and Path(use_medusa).is_dir() ) or os.getenv("WEIGHTS_CACHE_OVERRIDE", None) is not None if not is_local_model: medusa_config = hf_hub_download( use_medusa, revision=revision, filename="config.json" ) medusa_head = hf_hub_download( use_medusa, revision=revision, filename="medusa_lm_head.pt" ) else: medusa_config = str(Path(use_medusa) / "config.json") medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt") with open(medusa_config, "r") as f: config = json.load(f) medusa_sf = medusa_head[: -len(".pt")] + ".safetensors" weights = Weights( [medusa_sf], device, dtype, process_group=self.process_group ) lm_head = model.lm_head model.lm_head = MedusaModel(config, weights, lm_head) torch.distributed.barrier(group=self.process_group) super(FlashLlama, self).__init__( model=model, tokenizer=tokenizer, num_layers=len(model.model.layers), num_kv_heads=model.model.num_key_value_heads, head_size=model.model.head_size, dtype=dtype, device=device, rank=rank, world_size=world_size, )
text-generation-inference/server/text_generation_server/models/flash_llama.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/flash_llama.py", "repo_id": "text-generation-inference", "token_count": 1942 }
195
import torch import torch.distributed from typing import Optional, List from transformers import AutoTokenizer, AutoModelForCausalLM from text_generation_server.models import CausalLM FIM_PREFIX = "<fim-prefix>" FIM_MIDDLE = "<fim-middle>" FIM_SUFFIX = "<fim-suffix>" FIM_PAD = "<fim-pad>" EOD = "<|endoftext|>" class SantaCoder(CausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): if torch.cuda.is_available(): device = torch.device("cuda") dtype = torch.float16 if dtype is None else dtype else: if quantize: raise ValueError("quantization is not available on CPU") device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) tokenizer.add_special_tokens( { "additional_special_tokens": [ EOD, FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD, ], "pad_token": EOD, } ) with device: model = AutoModelForCausalLM.from_pretrained( model_id, revision=revision, torch_dtype=dtype, load_in_8bit=quantize == "bitsandbytes", trust_remote_code=trust_remote_code, ) super(CausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, ) def decode(self, generated_ids: List[int]) -> str: # Do not skip special tokens as they are used for custom parsing rules of the generated text return self.tokenizer.decode( generated_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False )
text-generation-inference/server/text_generation_server/models/santacoder.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/santacoder.py", "repo_id": "text-generation-inference", "token_count": 1176 }
196
import time import torch.nn as nn import math import json import os import torch import transformers from texttable import Texttable from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer from huggingface_hub import HfApi from accelerate import init_empty_weights from text_generation_server.utils import initialize_torch_distributed, Weights from text_generation_server.utils.hub import weight_files from text_generation_server.utils.gptq.quant_linear import QuantLinear from loguru import logger from typing import Optional DEV = torch.device("cuda:0") class Quantizer(nn.Module): def __init__(self, shape=1): super(Quantizer, self).__init__() self.register_buffer("maxq", torch.tensor(0)) self.register_buffer("scale", torch.zeros(shape)) self.register_buffer("zero", torch.zeros(shape)) def configure( self, bits, perchannel=False, sym=True, mse=False, norm=2.4, grid=100, maxshrink=0.8, trits=False, ): self.maxq = torch.tensor(2**bits - 1) self.perchannel = perchannel self.sym = sym self.mse = mse self.norm = norm self.grid = grid self.maxshrink = maxshrink if trits: self.maxq = torch.tensor(-1) self.scale = torch.zeros_like(self.scale) def _quantize(self, x, scale, zero, maxq): if maxq < 0: return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) return scale * (q - zero) def find_params(self, x, weight=False): dev = x.device self.maxq = self.maxq.to(dev) shape = x.shape if self.perchannel: if weight: x = x.flatten(1) else: if len(shape) == 4: x = x.permute([1, 0, 2, 3]) x = x.flatten(1) if len(shape) == 3: x = x.reshape((-1, shape[-1])).t() if len(shape) == 2: x = x.t() else: x = x.flatten().unsqueeze(0) tmp = torch.zeros(x.shape[0], device=dev) xmin = torch.minimum(x.min(1)[0], tmp) xmax = torch.maximum(x.max(1)[0], tmp) if self.sym: xmax = torch.maximum(torch.abs(xmin), xmax) tmp = xmin < 0 if torch.any(tmp): xmin[tmp] = -xmax[tmp] tmp = (xmin == 0) & (xmax == 0) xmin[tmp] = -1 xmax[tmp] = +1 if self.maxq < 0: self.scale = xmax self.zero = xmin else: self.scale = (xmax - xmin) / self.maxq if self.sym: self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2) else: self.zero = torch.round(-xmin / self.scale) if self.mse: best = torch.full([x.shape[0]], float("inf"), device=dev) for i in range(int(self.maxshrink * self.grid)): p = 1 - i / self.grid xmin1 = p * xmin xmax1 = p * xmax scale1 = (xmax1 - xmin1) / self.maxq zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero q = self._quantize( x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq ) q -= x q.abs_() q.pow_(self.norm) err = torch.sum(q, 1) tmp = err < best if torch.any(tmp): best[tmp] = err[tmp] self.scale[tmp] = scale1[tmp] self.zero[tmp] = zero1[tmp] if not self.perchannel: if weight: tmp = shape[0] else: tmp = shape[1] if len(shape) != 3 else shape[2] self.scale = self.scale.repeat(tmp) self.zero = self.zero.repeat(tmp) if weight: shape = [-1] + [1] * (len(shape) - 1) self.scale = self.scale.reshape(shape) self.zero = self.zero.reshape(shape) return if len(shape) == 4: self.scale = self.scale.reshape((1, -1, 1, 1)) self.zero = self.zero.reshape((1, -1, 1, 1)) if len(shape) == 3: self.scale = self.scale.reshape((1, 1, -1)) self.zero = self.zero.reshape((1, 1, -1)) if len(shape) == 2: self.scale = self.scale.unsqueeze(0) self.zero = self.zero.unsqueeze(0) def quantize(self, x): if self.ready(): return self._quantize(x, self.scale, self.zero, self.maxq) return x def enabled(self): return self.maxq > 0 def ready(self): return torch.all(self.scale != 0) class GPTQ: def __init__(self, layer, observe=False): self.layer = layer self.dev = self.layer.weight.device W = layer.weight.data.clone() if isinstance(self.layer, nn.Conv2d): W = W.flatten(1) if isinstance(self.layer, transformers.Conv1D): W = W.t() self.rows = W.shape[0] self.columns = W.shape[1] self.H = torch.zeros((self.columns, self.columns), device=self.dev) self.nsamples = 0 self.quantizer = Quantizer() self.observe = observe def add_batch(self, inp, out): # Hessian H = 2 X XT + λ I if self.observe: self.inp1 = inp self.out1 = out else: self.inp1 = None self.out1 = None if len(inp.shape) == 2: inp = inp.unsqueeze(0) tmp = inp.shape[0] if isinstance(self.layer, nn.Linear) or isinstance( self.layer, transformers.Conv1D ): if len(inp.shape) == 3: inp = inp.reshape((-1, inp.shape[-1])) inp = inp.t() if isinstance(self.layer, nn.Conv2d): unfold = nn.Unfold( self.layer.kernel_size, dilation=self.layer.dilation, padding=self.layer.padding, stride=self.layer.stride, ) inp = unfold(inp) inp = inp.permute([1, 0, 2]) inp = inp.flatten(1) self.H *= self.nsamples / (self.nsamples + tmp) self.nsamples += tmp # inp = inp.float() inp = math.sqrt(2 / self.nsamples) * inp.float() # self.H += 2 / self.nsamples * inp.matmul(inp.t()) self.H += inp.matmul(inp.t()) def print_loss(self, name, q_weight, weight_error, timecost): table = Texttable() length = 28 name = ( (name + " " * (length - len(name))) if len(name) <= length else name[:length] ) table.header(["name", "weight_error", "fp_inp_SNR", "q_inp_SNR", "time"]) # assign weight self.layer.weight.data = q_weight.reshape(self.layer.weight.shape).to( self.layer.weight.data.dtype ) if self.inp1 is not None: # quantize input to int8 quantizer = Quantizer() quantizer.configure(8, perchannel=False, sym=True, mse=False) quantizer.find_params(self.inp1) q_in = quantizer.quantize(self.inp1).type(torch.float16) q_out = self.layer(q_in) # get kinds of SNR q_SNR = torch_snr_error(q_out, self.out1).item() fp_SNR = torch_snr_error(self.layer(self.inp1), self.out1).item() else: q_SNR = "-" fp_SNR = "-" table.add_row([name, weight_error, fp_SNR, q_SNR, timecost]) print(table.draw().split("\n")[-2]) def fasterquant( self, blocksize=128, percdamp=0.01, groupsize=-1, act_order=False, name="" ): self.layer.to(self.dev) W = self.layer.weight.data.clone() if isinstance(self.layer, nn.Conv2d): W = W.flatten(1) if isinstance(self.layer, transformers.Conv1D): W = W.t() W = W.float() tick = time.time() if not self.quantizer.ready(): self.quantizer.find_params(W, weight=True) H = self.H if not self.observe: del self.H dead = torch.diag(H) == 0 H[dead, dead] = 1 W[:, dead] = 0 if act_order: perm = torch.argsort(torch.diag(H), descending=True) W = W[:, perm] H = H[perm][:, perm] Losses = torch.zeros_like(W) Q = torch.zeros_like(W) damp = percdamp * torch.mean(torch.diag(H)) diag = torch.arange(self.columns, device=self.dev) H[diag, diag] += damp H = torch.linalg.cholesky(H) H = torch.cholesky_inverse(H) try: H = torch.linalg.cholesky(H, upper=True) except Exception: # Addition because Falcon fails on h_to_4h H = torch.linalg.cholesky( H + 1e-5 * torch.eye(H.shape[0]).to(H.device), upper=True ) Hinv = H g_idx = [] scale = [] zero = [] now_idx = 1 for i1 in range(0, self.columns, blocksize): i2 = min(i1 + blocksize, self.columns) count = i2 - i1 W1 = W[:, i1:i2].clone() Q1 = torch.zeros_like(W1) Err1 = torch.zeros_like(W1) Losses1 = torch.zeros_like(W1) Hinv1 = Hinv[i1:i2, i1:i2] for i in range(count): w = W1[:, i] d = Hinv1[i, i] if groupsize != -1: if (i1 + i) % groupsize == 0: self.quantizer.find_params( W[:, (i1 + i) : (i1 + i + groupsize)], weight=True ) if ((i1 + i) // groupsize) - now_idx == -1: scale.append(self.quantizer.scale) zero.append(self.quantizer.zero) now_idx += 1 q = self.quantizer.quantize(w.unsqueeze(1)).flatten() Q1[:, i] = q Losses1[:, i] = (w - q) ** 2 / d**2 err1 = (w - q) / d W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0)) Err1[:, i] = err1 Q[:, i1:i2] = Q1 Losses[:, i1:i2] = Losses1 / 2 W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:]) torch.cuda.synchronize() error = torch.sum(Losses).item() groupsize = groupsize if groupsize != -1 else self.columns g_idx = [i // groupsize for i in range(self.columns)] g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device) if act_order: invperm = torch.argsort(perm) Q = Q[:, invperm] g_idx = g_idx[invperm] if isinstance(self.layer, transformers.Conv1D): Q = Q.t() self.print_loss( name=name, q_weight=Q, weight_error=error, timecost=(time.time() - tick) ) if scale == []: scale.append(self.quantizer.scale) zero.append(self.quantizer.zero) scale = torch.cat(scale, dim=1) zero = torch.cat(zero, dim=1) return scale, zero, g_idx, error def free(self): self.inp1 = None self.out1 = None self.H = None self.Losses = None self.Trace = None torch.cuda.empty_cache() def get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("wikitext", "wikitext-2-raw-v1", split="train") testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer("\n\n".join(traindata["text"]), return_tensors="pt") testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("ptb_text_only", "penn_treebank", split="train") valdata = load_dataset("ptb_text_only", "penn_treebank", split="validation") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer("\n\n".join(traindata["sentence"]), return_tensors="pt") testenc = tokenizer("\n\n".join(valdata["sentence"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_c4(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset( "allenai/c4", "allenai--c4", data_files={"train": "en/c4-train.00000-of-01024.json.gz"}, split="train", use_auth_token=False, ) valdata = load_dataset( "allenai/c4", "allenai--c4", data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"}, split="validation", use_auth_token=False, ) try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) import random random.seed(seed) trainloader = [] for _ in range(nsamples): while True: i = random.randint(0, len(traindata) - 1) trainenc = tokenizer(traindata[i]["text"], return_tensors="pt") if trainenc.input_ids.shape[1] >= seqlen: break i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) import random random.seed(0) valenc = [] for _ in range(256): while True: i = random.randint(0, len(valdata) - 1) tmp = tokenizer(valdata[i]["text"], return_tensors="pt") if tmp.input_ids.shape[1] >= seqlen: break i = random.randint(0, tmp.input_ids.shape[1] - seqlen - 1) j = i + seqlen valenc.append(tmp.input_ids[:, i:j]) valenc = torch.hstack(valenc) class TokenizerWrapper: def __init__(self, input_ids): self.input_ids = input_ids valenc = TokenizerWrapper(valenc) return trainloader, valenc def get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("ptb_text_only", "penn_treebank", split="train") testdata = load_dataset("ptb_text_only", "penn_treebank", split="test") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer(" ".join(traindata["sentence"]), return_tensors="pt") testenc = tokenizer(" ".join(testdata["sentence"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset( "allenai/c4", "allenai--c4", data_files={"train": "en/c4-train.00000-of-01024.json.gz"}, split="train", ) valdata = load_dataset( "allenai/c4", "allenai--c4", data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"}, split="validation", ) try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) import random random.seed(seed) trainloader = [] for _ in range(nsamples): while True: i = random.randint(0, len(traindata) - 1) trainenc = tokenizer(traindata[i]["text"], return_tensors="pt") if trainenc.input_ids.shape[1] >= seqlen: break i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) valenc = tokenizer(" ".join(valdata[:1100]["text"]), return_tensors="pt") valenc = valenc.input_ids[:, : (256 * seqlen)] class TokenizerWrapper: def __init__(self, input_ids): self.input_ids = input_ids valenc = TokenizerWrapper(valenc) return trainloader, valenc def get_loaders( name, nsamples=128, seed=0, seqlen=2048, model_id="", trust_remote_code=False ): if "wikitext2" in name: return get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code) if "ptb" in name: if "new" in name: return get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code) return get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code) if "c4" in name: if "new" in name: return get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code) return get_c4(nsamples, seed, seqlen, model_id, trust_remote_code) def find_layers(module, layers=(nn.Conv2d, nn.Linear), name=""): # Skip last lm_head linear # Need isintance Falcon is inheriting Linear. if isinstance(module, layers) and "lm_head" not in name: return {name: module} res = {} for name1, child in module.named_children(): res.update( find_layers( child, layers=layers, name=name + "." + name1 if name != "" else name1 ) ) return res @torch.no_grad() def sequential( model, dataloader, dev, nsamples, bits, groupsize, *, hooks, percdamp=0.01, sym: bool = False, act_order: bool = False, ): print("Starting ...") use_cache = model.config.use_cache model.config.use_cache = False try: layers = model.model.layers prefix = "model.layers" except Exception: layers = model.transformer.h prefix = "transformer.h" dtype = next(iter(model.parameters())).dtype inps = torch.zeros( (nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev ) cache = {"i": 0} extra = {} class Catcher(nn.Module): def __init__(self, module): super().__init__() self.module = module def forward(self, inp, **kwargs): inps[cache["i"]] = inp cache["i"] += 1 extra.update(kwargs.copy()) raise ValueError layers[0] = Catcher(layers[0]) for batch in dataloader: try: model(batch[0].cuda()) except ValueError: pass layers[0] = layers[0].module # layers[0] = layers[0].cpu() # model.model.embed_tokens = model.model.embed_tokens.cpu() # model.model.norm = model.model.norm.cpu() torch.cuda.empty_cache() for hook in hooks: hook.remove() outs = torch.zeros_like(inps) extra = { k: v.to(dev) if isinstance(v, torch.Tensor) else v for k, v in extra.items() } print("Ready.") quantizers = {} for i in range(len(layers)): print(f"Quantizing layer {i+1}/{len(layers)}..") print("+------------------+--------------+------------+-----------+-------+") print("| name | weight_error | fp_inp_SNR | q_inp_SNR | time |") print("+==================+==============+============+===========+=======+") layer = layers[i] layer.load() full = find_layers(layer) sequential = [list(full.keys())] for names in sequential: subset = {n: full[n] for n in names} gptq = {} for name in subset: gptq[name] = GPTQ(subset[name]) gptq[name].quantizer.configure( bits, perchannel=True, sym=sym, mse=False ) pass def add_batch(name): def tmp(_, inp, out): gptq[name].add_batch(inp[0].data, out.data) return tmp handles = [] for name in subset: handles.append(subset[name].register_forward_hook(add_batch(name))) for j in range(nsamples): outs[j] = layer(inps[j].unsqueeze(0), **extra)[0] for h in handles: h.remove() for name in subset: scale, zero, g_idx, error = gptq[name].fasterquant( percdamp=percdamp, groupsize=groupsize, act_order=act_order, name=name, ) quantizers[f"{prefix}.{i}.{name}"] = ( gptq[name].quantizer.cpu(), scale.cpu(), zero.cpu(), g_idx.cpu(), bits, groupsize, ) gptq[name].free() for j in range(nsamples): outs[j] = layer(inps[j].unsqueeze(0), **extra)[0] layer.unload() del layer del gptq torch.cuda.empty_cache() inps, outs = outs, inps print("+------------------+--------------+------------+-----------+-------+") print("\n") model.config.use_cache = use_cache return quantizers def make_quant_linear(module, names, bits, groupsize, name=""): if isinstance(module, QuantLinear): return for attr in dir(module): tmp = getattr(module, attr) name1 = name + "." + attr if name != "" else attr if name1 in names: delattr(module, attr) setattr( module, attr, QuantLinear.new( bits, groupsize, tmp.in_features, tmp.out_features, tmp.bias is not None, ), ) for name1, child in module.named_children(): make_quant_linear( child, names, bits, groupsize, name + "." + name1 if name != "" else name1 ) # TODO: perform packing on GPU def pack(model, quantizers, bits, groupsize): layers = find_layers(model) layers = {n: layers[n] for n in quantizers} make_quant_linear(model, quantizers, bits, groupsize) qlayers = find_layers(model, (QuantLinear,)) print("Packing ...") for name in qlayers: print(name) quantizers[name], scale, zero, g_idx, _, _ = quantizers[name] qlayers[name].pack(layers[name], scale, zero, g_idx) print("Done.") return model def setdeepattr(module, full_name, tensor): current = module tokens = full_name.split(".") for token in tokens[:-1]: current = getattr(current, token) setattr(current, tokens[-1], tensor) def getdeepattr(module, full_name): current = module tokens = full_name.split(".") for token in tokens: current = getattr(current, token) return current def load_weights_pre_hook(module_name, weights, recursive=False): def inner(module, args): print(f"Pre hook {module_name}") local_params = {} for k, v in module.named_parameters(): if not recursive and k.count(".") != 1: continue local_params[k] = v for k, v in module.named_buffers(): if not recursive and k.count(".") != 1: continue local_params[k] = v for local_param in local_params: current_tensor = getdeepattr(module, local_param) if current_tensor.device == torch.device("meta"): # print(f"Loading {local_param}") if module_name: tensor_name = f"{module_name}.{local_param}" else: tensor_name = local_param tensor = weights.get_tensor(tensor_name) setdeepattr(module, local_param, nn.Parameter(tensor)) else: tensor = current_tensor.to(device=torch.device("cuda:0")) if current_tensor.requires_grad: tensor = nn.Parameter(tensor) setdeepattr(module, local_param, tensor) return inner def load_weights_post_hook(module_name, weights, recursive=False): def inner(module, args, output): print(f"Post hook {module_name}") local_params = {} for k, v in module.named_parameters(): if not recursive and k.count(".") != 1: continue local_params[k] = v for k, v in module.named_buffers(): if not recursive and k.count(".") != 1: continue local_params[k] = v for local_param in local_params: # print(f"Unloading {local_param}") current_tensor = getdeepattr(module, local_param) setdeepattr( module, local_param, nn.Parameter(current_tensor.to(device=torch.device("cpu"))), ) return output return inner def quantize( model_id: str, bits: int, groupsize: int, output_dir: str, revision: str, trust_remote_code: bool, upload_to_model_id: Optional[str], percdamp: float, act_order: bool, ): print("loading model") config = AutoConfig.from_pretrained( model_id, trust_remote_code=trust_remote_code, ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, torch_dtype=torch.float16, trust_remote_code=trust_remote_code ) model = model.eval() print("LOADED model") files = weight_files(model_id, revision, extension=".safetensors") process_group, _, _ = initialize_torch_distributed() weights = Weights( files, device=torch.device("cuda:0"), dtype=torch.float16, process_group=process_group, aliases={"embed_tokens.weight": ["lm_head.weight"]}, ) hooks = [] for name, module in model.named_modules(): def load(module, name): def _load(): load_weights_pre_hook(name, weights, recursive=True)(module, None) return _load def unload(module, name): def _unload(): load_weights_post_hook(name, weights, recursive=True)( module, None, None ) return _unload module.load = load(module, name) module.unload = unload(module, name) hooks.append( module.register_forward_pre_hook(load_weights_pre_hook(name, weights)) ) hooks.append( module.register_forward_hook(load_weights_post_hook(name, weights)) ) model.seqlen = 2048 dataset = "wikitext2" nsamples = 128 seed = None dataloader, testloader = get_loaders( dataset, nsamples=nsamples, seed=seed, model_id=model_id, seqlen=model.seqlen, trust_remote_code=trust_remote_code, ) tick = time.time() quantizers = sequential( model, dataloader, DEV, nsamples, bits, groupsize, percdamp=percdamp, act_order=act_order, hooks=hooks, ) print(time.time() - tick) pack(model, quantizers, bits, groupsize) from safetensors.torch import save_file from transformers.modeling_utils import shard_checkpoint state_dict = model.state_dict() state_dict = {k: v.cpu().contiguous() for k, v in state_dict.items()} state_dict["gptq_bits"] = torch.LongTensor([bits]) state_dict["gptq_groupsize"] = torch.LongTensor([groupsize]) max_shard_size = "10GB" shards, index = shard_checkpoint( state_dict, max_shard_size=max_shard_size, weights_name="model.safetensors" ) os.makedirs(output_dir, exist_ok=True) for shard_file, shard in shards.items(): save_file( shard, os.path.join(output_dir, shard_file), metadata={ "format": "pt", "quantized": "gptq", "origin": "text-generation-inference", }, ) if index is None: path_to_weights = os.path.join(output_dir, "model.safetensors") logger.info(f"Model weights saved in {path_to_weights}") else: save_index_file = "model.safetensors.index.json" save_index_file = os.path.join(output_dir, save_index_file) with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) config = AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code) config.save_pretrained(output_dir) logger.info("Saved config") logger.info("Saving tokenizer") tokenizer = AutoTokenizer.from_pretrained( model_id, trust_remote_code=trust_remote_code ) tokenizer.save_pretrained(output_dir) logger.info("Saved tokenizer") if upload_to_model_id: api = HfApi() api.upload_folder( folder_path=output_dir, repo_id=upload_to_model_id, repo_type="model" )
text-generation-inference/server/text_generation_server/utils/gptq/quantize.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/gptq/quantize.py", "repo_id": "text-generation-inference", "token_count": 15970 }
197
parser: '@typescript-eslint/parser' parserOptions: ecmaFeatures: jsx: true ecmaVersion: latest sourceType: module project: ./tsconfig.json env: browser: true es6: true node: true jest: true ignorePatterns: ['index.js', 'target/'] plugins: - import - '@typescript-eslint' extends: - eslint:recommended - plugin:prettier/recommended rules: # 0 = off, 1 = warn, 2 = error 'space-before-function-paren': 0 'no-useless-constructor': 0 'no-undef': 2 'no-console': [2, { allow: ['error', 'warn', 'info', 'assert'] }] 'comma-dangle': ['error', 'only-multiline'] 'no-unused-vars': 0 'no-var': 2 'one-var-declaration-per-line': 2 'prefer-const': 2 'no-const-assign': 2 'no-duplicate-imports': 2 'no-use-before-define': [2, { 'functions': false, 'classes': false }] 'eqeqeq': [2, 'always', { 'null': 'ignore' }] 'no-case-declarations': 0 'no-restricted-syntax': [ 2, { 'selector': 'BinaryExpression[operator=/(==|===|!=|!==)/][left.raw=true], BinaryExpression[operator=/(==|===|!=|!==)/][right.raw=true]', 'message': Don't compare for equality against boolean literals, }, ] # https://github.com/benmosher/eslint-plugin-import/pull/334 'import/no-duplicates': 2 'import/first': 2 'import/newline-after-import': 2 'import/order': [ 2, { 'newlines-between': 'always', 'alphabetize': { 'order': 'asc' }, 'groups': ['builtin', 'external', 'internal', 'parent', 'sibling', 'index'], }, ] overrides: - files: - ./**/*{.ts,.tsx} rules: 'no-unused-vars': [2, { varsIgnorePattern: '^_', argsIgnorePattern: '^_', ignoreRestSiblings: true }] 'no-undef': 0 # TypeScript declare merge 'no-redeclare': 0 'no-useless-constructor': 0 'no-dupe-class-members': 0 'no-case-declarations': 0 'no-duplicate-imports': 0 # TypeScript Interface and Type 'no-use-before-define': 0 '@typescript-eslint/adjacent-overload-signatures': 2 '@typescript-eslint/await-thenable': 2 '@typescript-eslint/consistent-type-assertions': 2 '@typescript-eslint/ban-types': [ 'error', { 'types': { 'String': { 'message': 'Use string instead', 'fixWith': 'string' }, 'Number': { 'message': 'Use number instead', 'fixWith': 'number' }, 'Boolean': { 'message': 'Use boolean instead', 'fixWith': 'boolean' }, 'Function': { 'message': 'Use explicit type instead' }, }, }, ] '@typescript-eslint/explicit-member-accessibility': [ 'error', { accessibility: 'explicit', overrides: { accessors: 'no-public', constructors: 'no-public', methods: 'no-public', properties: 'no-public', parameterProperties: 'explicit', }, }, ] '@typescript-eslint/method-signature-style': 2 '@typescript-eslint/no-floating-promises': 2 '@typescript-eslint/no-implied-eval': 2 '@typescript-eslint/no-for-in-array': 2 '@typescript-eslint/no-inferrable-types': 2 '@typescript-eslint/no-invalid-void-type': 2 '@typescript-eslint/no-misused-new': 2 '@typescript-eslint/no-misused-promises': 2 '@typescript-eslint/no-namespace': 2 '@typescript-eslint/no-non-null-asserted-optional-chain': 2 '@typescript-eslint/no-throw-literal': 2 '@typescript-eslint/no-unnecessary-boolean-literal-compare': 2 '@typescript-eslint/prefer-for-of': 2 '@typescript-eslint/prefer-nullish-coalescing': 2 '@typescript-eslint/switch-exhaustiveness-check': 2 '@typescript-eslint/prefer-optional-chain': 2 '@typescript-eslint/prefer-readonly': 2 '@typescript-eslint/prefer-string-starts-ends-with': 0 '@typescript-eslint/no-array-constructor': 2 '@typescript-eslint/require-await': 2 '@typescript-eslint/return-await': 2 '@typescript-eslint/ban-ts-comment': [2, { 'ts-expect-error': false, 'ts-ignore': true, 'ts-nocheck': true, 'ts-check': false }] '@typescript-eslint/naming-convention': [ 2, { selector: 'memberLike', format: ['camelCase', 'PascalCase'], modifiers: ['private'], leadingUnderscore: 'forbid', }, ] '@typescript-eslint/no-unused-vars': [2, { varsIgnorePattern: '^_', argsIgnorePattern: '^_', ignoreRestSiblings: true }] '@typescript-eslint/member-ordering': [ 2, { default: [ 'public-static-field', 'protected-static-field', 'private-static-field', 'public-static-method', 'protected-static-method', 'private-static-method', 'public-instance-field', 'protected-instance-field', 'private-instance-field', 'public-constructor', 'protected-constructor', 'private-constructor', 'public-instance-method', 'protected-instance-method', 'private-instance-method', ], }, ]
tokenizers/bindings/node/.eslintrc.yml/0
{ "file_path": "tokenizers/bindings/node/.eslintrc.yml", "repo_id": "tokenizers", "token_count": 2733 }
198
/* eslint-disable prettier/prettier */ // For a detailed explanation regarding each configuration property, visit: // https://jestjs.io/docs/en/configuration.html module.exports = { // All imported modules in your tests should be mocked automatically // automock: false, // Stop running tests after `n` failures // bail: 0, // Respect "browser" field in package.json when resolving modules // browser: false, // The directory where Jest should store its cached dependency information // cacheDirectory: "/private/var/folders/y_/n6h0fkqn3m57bg_ktk25j7rm0000gn/T/jest_dx", // Automatically clear mock calls and instances between every test // clearMocks: false, // Indicates whether the coverage information should be collected while executing the test // collectCoverage: false, // An array of glob patterns indicating a set of files for which coverage information should be collected // collectCoverageFrom: null, // The directory where Jest should output its coverage files // coverageDirectory: null, // An array of regexp pattern strings used to skip coverage collection // coveragePathIgnorePatterns: [ // "/node_modules/" // ], // A list of reporter names that Jest uses when writing coverage reports // coverageReporters: [ // "json", // "text", // "lcov", // "clover" // ], // An object that configures minimum threshold enforcement for coverage results // coverageThreshold: null, // A path to a custom dependency extractor // dependencyExtractor: null, // Make calling deprecated APIs throw helpful error messages // errorOnDeprecated: false, // Force coverage collection from ignored files using an array of glob patterns // forceCoverageMatch: [], // A path to a module which exports an async function that is triggered once before all test suites // globalSetup: null, // A path to a module which exports an async function that is triggered once after all test suites // globalTeardown: null, // A set of global variables that need to be available in all test environments // globals: {}, // The maximum amount of workers used to run your tests. Can be specified as % or a number. E.g. maxWorkers: 10% will use 10% of your CPU amount + 1 as the maximum worker number. maxWorkers: 2 will use a maximum of 2 workers. // maxWorkers: "50%", // An array of directory names to be searched recursively up from the requiring module's location // moduleDirectories: [ // "node_modules" // ], // An array of file extensions your modules use // moduleFileExtensions: [ // "js", // "json", // "jsx", // "ts", // "tsx", // "node" // ], // A map from regular expressions to module names that allow to stub out resources with a single module // moduleNameMapper: {}, // An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader // modulePathIgnorePatterns: [], // Activates notifications for test results // notify: false, // An enum that specifies notification mode. Requires { notify: true } // notifyMode: "failure-change", // A preset that is used as a base for Jest's configuration preset: 'ts-jest', // Run tests from one or more projects // projects: null, // Use this configuration option to add custom reporters to Jest // reporters: undefined, // Automatically reset mock state between every test // resetMocks: false, // Reset the module registry before running each individual test // resetModules: false, // A path to a custom resolver // resolver: null, // Automatically restore mock state between every test // restoreMocks: false, // The root directory that Jest should scan for tests and modules within // rootDir: null, // A list of paths to directories that Jest should use to search for files in // roots: [ // "<rootDir>" // ], // Allows you to use a custom runner instead of Jest's default test runner // runner: "jest-runner", // The paths to modules that run some code to configure or set up the testing environment before each test // setupFiles: [], // A list of paths to modules that run some code to configure or set up the testing framework before each test // setupFilesAfterEnv: [], // A list of paths to snapshot serializer modules Jest should use for snapshot testing // snapshotSerializers: [], // The test environment that will be used for testing testEnvironment: 'node', // Options that will be passed to the testEnvironment // testEnvironmentOptions: {}, // Adds a location field to test results // testLocationInResults: false, // The glob patterns Jest uses to detect test files // testMatch: [ // "**/__tests__/**/*.[jt]s?(x)", // "**/?(*.)+(spec|test).[tj]s?(x)" // ], // An array of regexp pattern strings that are matched against all test paths, matched tests are skipped testPathIgnorePatterns: ['/node_modules/', '/dist/'], // The regexp pattern or array of patterns that Jest uses to detect test files // testRegex: [], // This option allows the use of a custom results processor // testResultsProcessor: null, // This option allows use of a custom test runner // testRunner: "jasmine2", // This option sets the URL for the jsdom environment. It is reflected in properties such as location.href // testURL: "http://localhost", // Setting this value to "fake" allows the use of fake timers for functions such as "setTimeout" // timers: "real", // A map from regular expressions to paths to transformers // transform: null, // An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation // transformIgnorePatterns: [ // "/node_modules/" // ], // An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them // unmockedModulePathPatterns: undefined, // Indicates whether each individual test should be reported during the run // verbose: null, // An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode watchPathIgnorePatterns: ['<rootDir>/node_modules/', '<rootDir>/native/', '<rootDir>/dist/', '<rootDir>/build/'], // Whether to use watchman for file crawling // watchman: true, }
tokenizers/bindings/node/jest.config.js/0
{ "file_path": "tokenizers/bindings/node/jest.config.js", "repo_id": "tokenizers", "token_count": 1715 }
199
# `tokenizers-darwin-arm64` This is the **aarch64-apple-darwin** binary for `tokenizers`
tokenizers/bindings/node/npm/darwin-arm64/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/darwin-arm64/README.md", "repo_id": "tokenizers", "token_count": 33 }
200
# `tokenizers-win32-arm64-msvc` This is the **aarch64-pc-windows-msvc** binary for `tokenizers`
tokenizers/bindings/node/npm/win32-arm64-msvc/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/win32-arm64-msvc/README.md", "repo_id": "tokenizers", "token_count": 38 }
201
pub mod models; pub mod tokenizer;
tokenizers/bindings/node/src/tasks/mod.rs/0
{ "file_path": "tokenizers/bindings/node/src/tasks/mod.rs", "repo_id": "tokenizers", "token_count": 11 }
202
import pytest def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", default=False, help="run slow tests") def pytest_configure(config): config.addinivalue_line("markers", "slow: mark test as slow to run") def pytest_collection_modifyitems(config, items): if config.getoption("--runslow"): # --runslow given in cli: do not skip slow tests return skip_slow = pytest.mark.skip(reason="need --runslow option to run") for item in items: if "slow" in item.keywords: item.add_marker(skip_slow)
tokenizers/bindings/python/conftest.py/0
{ "file_path": "tokenizers/bindings/python/conftest.py", "repo_id": "tokenizers", "token_count": 217 }
203
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from tokenizers.models import BPE from tokenizers.normalizers import NFKC from .base_tokenizer import BaseTokenizer class SentencePieceBPETokenizer(BaseTokenizer): """SentencePiece BPE Tokenizer Represents the BPE algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", replacement: str = "▁", add_prefix_space: bool = True, dropout: Optional[float] = None, fuse_unk: Optional[bool] = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) else: tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = NFKC() tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceBPE", "unk_token": unk_token, "replacement": replacement, "add_prefix_space": add_prefix_space, "dropout": dropout, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return SentencePieceBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py", "repo_id": "tokenizers", "token_count": 1655 }
204
stable
tokenizers/bindings/python/rust-toolchain/0
{ "file_path": "tokenizers/bindings/python/rust-toolchain", "repo_id": "tokenizers", "token_count": 2 }
205
use pyo3::prelude::*; use std::collections::VecDeque; /// An simple iterator that can be instantiated with a specified length. /// We use this with iterators that don't have a size_hint but we might /// know its size. This is useful with progress bars for example. pub struct MaybeSizedIterator<I> { length: Option<usize>, iter: I, } impl<I> MaybeSizedIterator<I> where I: Iterator, { pub fn new(iter: I, length: Option<usize>) -> Self { Self { length, iter } } } impl<I> Iterator for MaybeSizedIterator<I> where I: Iterator, { type Item = I::Item; fn next(&mut self) -> Option<Self::Item> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { (self.length.unwrap_or(0), None) } } /// A buffered iterator that takes care of locking the GIL only when needed. /// The `PyIterator` provided by PyO3 keeps a Python GIL token all along /// and thus doesn't allow us to release the GIL to allow having other threads. /// /// This iterator serves two purposes: /// - First, as opposed to the `pyo3::PyIterator`, it is Send and can easily be parallelized /// - Second, this let us release the GIL between two refills of the buffer, allowing other /// Python threads to work pub struct PyBufferedIterator<T, F> { iter: Option<Py<PyAny>>, converter: F, buffer: VecDeque<PyResult<T>>, size: usize, } impl<T, F, I> PyBufferedIterator<T, F> where F: Fn(&PyAny) -> I, I: IntoIterator<Item = PyResult<T>>, { /// Create a new PyBufferedIterator using the provided Python object. /// This object must implement the Python Iterator Protocol, and an error will /// be return if the contract is not respected. /// /// The `converter` provides a way to convert each item in the iterator into /// something that doesn't embed a 'py token and thus allows the GIL to be released /// /// The `buffer_size` represents the number of items that we buffer before we /// need to acquire the GIL again. pub fn new(iter: &PyAny, converter: F, buffer_size: usize) -> PyResult<Self> { let py = iter.py(); let iter: Py<PyAny> = unsafe { py.from_borrowed_ptr_or_err::<PyAny>(pyo3::ffi::PyObject_GetIter(iter.as_ptr()))? .to_object(py) }; Ok(Self { iter: Some(iter), converter, buffer: VecDeque::with_capacity(buffer_size), size: buffer_size, }) } /// Refill the buffer, and set `self.iter` as `None` if nothing more to get fn refill(&mut self) -> PyResult<()> { if self.iter.is_none() { return Ok(()); } Python::with_gil(|py| loop { if self.buffer.len() >= self.size { return Ok(()); } match unsafe { py.from_owned_ptr_or_opt::<PyAny>(pyo3::ffi::PyIter_Next( self.iter.as_ref().unwrap().as_ref(py).as_ptr(), )) } { Some(obj) => self.buffer.extend((self.converter)(obj)), None => { if PyErr::occurred(py) { return Err(PyErr::fetch(py)); } else { self.iter = None; } } }; if self.iter.is_none() { return Ok(()); } }) } } impl<T, F, I> Iterator for PyBufferedIterator<T, F> where F: Fn(&PyAny) -> I, I: IntoIterator<Item = PyResult<T>>, { type Item = PyResult<T>; fn next(&mut self) -> Option<Self::Item> { if !self.buffer.is_empty() { self.buffer.pop_front() } else if self.iter.is_some() { if let Err(e) = self.refill() { return Some(Err(e)); } self.next() } else { None } } }
tokenizers/bindings/python/src/utils/iterators.rs/0
{ "file_path": "tokenizers/bindings/python/src/utils/iterators.rs", "repo_id": "tokenizers", "token_count": 1797 }
206
import copy import os import pickle import pytest from tokenizers import ( AddedToken, SentencePieceUnigramTokenizer, Tokenizer, models, normalizers, pre_tokenizers, trainers, ) from ..utils import data_dir, train_files class TestBpeTrainer: def test_can_modify(self): trainer = trainers.BpeTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert ( trainers.BpeTrainer(min_frequency=12).__getstate__() == b"""{"BpeTrainer":{"min_frequency":12,"vocab_size":30000,"show_progress":true,"special_tokens":[],"limit_alphabet":null,"initial_alphabet":[],"continuing_subword_prefix":null,"end_of_word_suffix":null,"max_token_length":null,"words":{}}}""" ) assert isinstance(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12))), trainers.BpeTrainer) assert isinstance(copy.deepcopy(trainers.BpeTrainer(min_frequency=12)), trainers.BpeTrainer) # Make sure everything is correct assert pickle.dumps(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12)))) == pickle.dumps( trainers.BpeTrainer(min_frequency=12) ) class TestWordPieceTrainer: def test_can_modify(self): trainer = trainers.WordPieceTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordPieceTrainer())), trainers.WordPieceTrainer) class TestWordLevelTrainer: def test_can_modify(self): trainer = trainers.WordLevelTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"] ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordLevelTrainer())), trainers.WordLevelTrainer) class TestUnigram: def test_train(self, train_files): tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(train_files["small"], show_progress=False) filename = "tests/data/unigram_trained.json" tokenizer.save(filename) os.remove(filename) def test_train_parallelism_with_custom_pretokenizer(self, train_files): class GoodCustomPretok: def split(self, n, normalized): # Here we just test that we can return a List[NormalizedString], it # does not really make sense to return twice the same otherwise return [normalized, normalized] def pre_tokenize(self, pretok): pretok.split(self.split) custom = pre_tokenizers.PreTokenizer.custom(GoodCustomPretok()) bpe_tokenizer = Tokenizer(models.BPE()) bpe_tokenizer.normalizer = normalizers.Lowercase() bpe_tokenizer.pre_tokenizer = custom if "TOKENIZERS_PARALLELISM" in os.environ: del os.environ["TOKENIZERS_PARALLELISM"] trainer = trainers.BpeTrainer(special_tokens=["<unk>"], show_progress=False) bpe_tokenizer.train([train_files["small"]], trainer=trainer) def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.UnigramTrainer())), trainers.UnigramTrainer) def test_train_with_special_tokens(self): filename = "tests/data/dummy-unigram-special_tokens-train.txt" with open(filename, "w") as f: f.write( """ [CLS] The Zen of Python, by Tim Peters [SEP] [CLS] Beautiful is better than ugly. [SEP] [CLS] Explicit is better than implicit. [SEP] [CLS] Simple is better than complex. [SEP] [CLS] Complex is better than complicated. [SEP] [CLS] Flat is better than nested. [SEP] [CLS] Sparse is better than dense. [SEP] [CLS] Readability counts. [SEP] [CLS] Special cases aren't special enough to break the rules. [SEP] [CLS] Although practicality beats purity. [SEP] [CLS] Errors should never pass silently. [SEP] [CLS] Unless explicitly silenced. [SEP] [CLS] In the face of ambiguity, refuse the temptation to guess. [SEP] [CLS] There should be one-- and preferably only one --obvious way to do it. [SEP] [CLS] Although that way may not be obvious at first unless you're Dutch. [SEP] [CLS] Now is better than never. [SEP] [CLS] Although never is often better than *right* now. [SEP] [CLS] If the implementation is hard to explain, it's a bad idea. [SEP] [CLS] If the implementation is easy to explain, it may be a good idea. [SEP] [CLS] Namespaces are one honking great idea -- let's do more of those! [SEP] """ ) tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]" ) tokenizer.train([filename], trainer=trainer) assert tokenizer.encode("[CLS] This is a test [SEP]").tokens == [ "[CLS]", " T", "h", "i", "s", " is ", "a", " ", "te", "s", "t ", "[SEP]", ] tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]", "[UNK]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 def test_cannot_train_different_model(self): tokenizer = Tokenizer(models.BPE()) trainer = trainers.UnigramTrainer(show_progress=False) with pytest.raises(Exception, match="UnigramTrainer can only train a Unigram"): tokenizer.train([], trainer) def test_can_modify(self): trainer = trainers.UnigramTrainer( vocab_size=12345, show_progress=False, special_tokens=["1", AddedToken("2", lstrip=True)], initial_alphabet=["a", "b", "c"], ) assert trainer.vocab_size == 12345 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", normalized=False, special=True), AddedToken("2", lstrip=True, normalized=False, special=True), ] assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] def test_continuing_prefix_trainer_mistmatch(self): UNK = "[UNK]" special_tokens = [UNK] tokenizer = Tokenizer(models.BPE(unk_token=UNK, continuing_subword_prefix="##")) trainer = trainers.BpeTrainer(special_tokens=special_tokens) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [pre_tokenizers.Whitespace(), pre_tokenizers.Digits(individual_digits=True)] ) tokenizer.train(files=["data/big.txt"], trainer=trainer) tokenizer.save("data/tokenizer.json") tokenizer.from_file("data/tokenizer.json")
tokenizers/bindings/python/tests/bindings/test_trainers.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_trainers.py", "repo_id": "tokenizers", "token_count": 4957 }
207
# Added Tokens <tokenizerslangcontent> <python> ## AddedToken [[autodoc]] tokenizers.AddedToken - content - lstrip - normalized - rstrip - single_word </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/added-tokens.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/added-tokens.mdx", "repo_id": "tokenizers", "token_count": 134 }
208
# Quicktour Let's have a quick look at the 🤗 Tokenizers library features. The library provides an implementation of today's most used tokenizers that is both easy to use and blazing fast. ## Build a tokenizer from scratch To illustrate how fast the 🤗 Tokenizers library is, let's train a new tokenizer on [wikitext-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) (516M of text) in just a few seconds. First things first, you will need to download this dataset and unzip it with: ``` bash wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip unzip wikitext-103-raw-v1.zip ``` ### Training the tokenizer In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information about the different type of tokenizers, check out this [guide](https://huggingface.co/transformers/tokenizer_summary.html) in the 🤗 Transformers documentation. Here, training the tokenizer means it will learn merge rules by: - Start with all the characters present in the training corpus as tokens. - Identify the most common pair of tokens and merge it into one token. - Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want. The main API of the library is the `class` `Tokenizer`, here is how we instantiate one with a BPE model: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_tokenizer", "end-before": "END init_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_tokenizer", "end-before": "END quicktour_init_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_tokenizer", "end-before": "END init_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> To train our tokenizer on the wikitext files, we will need to instantiate a [trainer]{.title-ref}, in this case a `BpeTrainer` <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_trainer", "end-before": "END init_trainer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_trainer", "end-before": "END quicktour_init_trainer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_trainer", "end-before": "END init_trainer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> We can set the training arguments like `vocab_size` or `min_frequency` (here left at their default values of 30,000 and 0) but the most important part is to give the `special_tokens` we plan to use later on (they are not used at all during training) so that they get inserted in the vocabulary. <Tip> The order in which you write the special tokens list matters: here `"[UNK]"` will get the ID 0, `"[CLS]"` will get the ID 1 and so forth. </Tip> We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that will split our inputs into words, we might get tokens that overlap several words: for instance we could get an `"it is"` token since those two words often appear next to each other. Using a pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting on whitespace. <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_pretok", "end-before": "END init_pretok", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_pretok", "end-before": "END quicktour_init_pretok", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_pretok", "end-before": "END init_pretok", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Now, we can just call the `Tokenizer.train` method with any list of files we want to use: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START train", "end-before": "END train", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_train", "end-before": "END quicktour_train", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START train", "end-before": "END train", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> This should only take a few seconds to train our tokenizer on the full wikitext dataset! To save the tokenizer in one file that contains all its configuration and vocabulary, just use the `Tokenizer.save` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START save", "end-before": "END save", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_save", "end-before": "END quicktour_save", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START save", "end-before": "END save", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> and you can reload your tokenizer from that file with the `Tokenizer.from_file` `classmethod`: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START reload_tokenizer", "end-before": "END reload_tokenizer", "dedent": 12} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_reload_tokenizer", "end-before": "END quicktour_reload_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START reload_tokenizer", "end-before": "END reload_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ### Using the tokenizer Now that we have trained a tokenizer, we can use it on any text we want with the `Tokenizer.encode` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode", "end-before": "END encode", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode", "end-before": "END quicktour_encode", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode", "end-before": "END encode", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> This applied the full pipeline of the tokenizer on the text, returning an `Encoding` object. To learn more about this pipeline, and how to apply (or customize) parts of it, check out [this page](pipeline). This `Encoding` object then has all the attributes you need for your deep learning model (or other). The `tokens` attribute contains the segmentation of your text in tokens: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_tokens", "end-before": "END print_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_tokens", "end-before": "END quicktour_print_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_tokens", "end-before": "END print_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Similarly, the `ids` attribute will contain the index of each of those tokens in the tokenizer's vocabulary: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_ids", "end-before": "END print_ids", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_ids", "end-before": "END quicktour_print_ids", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_ids", "end-before": "END print_ids", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> An important feature of the 🤗 Tokenizers library is that it comes with full alignment tracking, meaning you can always get the part of your original sentence that corresponds to a given token. Those are stored in the `offsets` attribute of our `Encoding` object. For instance, let's assume we would want to find back what caused the `"[UNK]"` token to appear, which is the token at index 9 in the list, we can just ask for the offset at the index: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_offsets", "end-before": "END print_offsets", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_offsets", "end-before": "END quicktour_print_offsets", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_offsets", "end-before": "END print_offsets", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> and those are the indices that correspond to the emoji in the original sentence: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START use_offsets", "end-before": "END use_offsets", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_use_offsets", "end-before": "END quicktour_use_offsets", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START use_offsets", "end-before": "END use_offsets", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ### Post-processing We might want our tokenizer to automatically add special tokens, like `"[CLS]"` or `"[SEP]"`. To do this, we use a post-processor. `TemplateProcessing` is the most commonly used, you just have to specify a template for the processing of single sentences and pairs of sentences, along with the special tokens and their IDs. When we built our tokenizer, we set `"[CLS]"` and `"[SEP]"` in positions 1 and 2 of our list of special tokens, so this should be their IDs. To double-check, we can use the `Tokenizer.token_to_id` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START check_sep", "end-before": "END check_sep", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_check_sep", "end-before": "END quicktour_check_sep", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START check_sep", "end-before": "END check_sep", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Here is how we can set the post-processing to give us the traditional BERT inputs: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_template_processing", "end-before": "END init_template_processing", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_template_processing", "end-before": "END quicktour_init_template_processing", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_template_processing", "end-before": "END init_template_processing", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Let's go over this snippet of code in more details. First we specify the template for single sentences: those should have the form `"[CLS] $A [SEP]"` where `$A` represents our sentence. Then, we specify the template for sentence pairs, which should have the form `"[CLS] $A [SEP] $B [SEP]"` where `$A` represents the first sentence and `$B` the second one. The `:1` added in the template represent the `type IDs` we want for each part of our input: it defaults to 0 for everything (which is why we don't have `$A:0`) and here we set it to 1 for the tokens of the second sentence and the last `"[SEP]"` token. Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary. To check out this worked properly, let's try to encode the same sentence as before: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_special_tokens", "end-before": "END print_special_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_special_tokens", "end-before": "END quicktour_print_special_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_special_tokens", "end-before": "END print_special_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> To check the results on a pair of sentences, we just pass the two sentences to `Tokenizer.encode`: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_special_tokens_pair", "end-before": "END print_special_tokens_pair", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_special_tokens_pair", "end-before": "END quicktour_print_special_tokens_pair", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_special_tokens_pair", "end-before": "END print_special_tokens_pair", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> You can then check the type IDs attributed to each token is correct with <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_type_ids", "end-before": "END print_type_ids", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_type_ids", "end-before": "END quicktour_print_type_ids", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_type_ids", "end-before": "END print_type_ids", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> If you save your tokenizer with `Tokenizer.save`, the post-processor will be saved along. ### Encoding multiple sentences in a batch To get the full speed of the 🤗 Tokenizers library, it's best to process your texts by batches by using the `Tokenizer.encode_batch` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode_batch", "end-before": "END encode_batch", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode_batch", "end-before": "END quicktour_encode_batch", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode_batch", "end-before": "END encode_batch", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> The output is then a list of `Encoding` objects like the ones we saw before. You can process together as many texts as you like, as long as it fits in memory. To process a batch of sentences pairs, pass two lists to the `Tokenizer.encode_batch` method: the list of sentences A and the list of sentences B: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode_batch_pair", "end-before": "END encode_batch_pair", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode_batch_pair", "end-before": "END quicktour_encode_batch_pair", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode_batch_pair", "end-before": "END encode_batch_pair", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> When encoding multiple sentences, you can automatically pad the outputs to the longest sentence present by using `Tokenizer.enable_padding`, with the `pad_token` and its ID (which we can double-check the id for the padding token with `Tokenizer.token_to_id` like before): <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START enable_padding", "end-before": "END enable_padding", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_enable_padding", "end-before": "END quicktour_enable_padding", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START enable_padding", "end-before": "END enable_padding", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> We can set the `direction` of the padding (defaults to the right) or a given `length` if we want to pad every sample to that specific number (here we leave it unset to pad to the size of the longest text). <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_batch_tokens", "end-before": "END print_batch_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_batch_tokens", "end-before": "END quicktour_print_batch_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_batch_tokens", "end-before": "END print_batch_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> In this case, the `attention mask` generated by the tokenizer takes the padding into account: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_attention_mask", "end-before": "END print_attention_mask", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_attention_mask", "end-before": "END quicktour_print_attention_mask", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_attention_mask", "end-before": "END print_attention_mask", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ## Pretrained <tokenizerslangcontent> <python> ### Using a pretrained tokenizer You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is available in the repository. ```python from tokenizers import Tokenizer tokenizer = Tokenizer.from_pretrained("bert-base-uncased") ``` ### Importing a pretrained tokenizer from legacy vocabulary files You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file. For instance, here is how to import the classic pretrained BERT tokenizer: ```python from tokenizers import BertWordPieceTokenizer tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True) ``` as long as you have downloaded the file `bert-base-uncased-vocab.txt` with ```bash wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt ``` </python> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/quicktour.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/quicktour.mdx", "repo_id": "tokenizers", "token_count": 7936 }
209
Components ==================================================================================================== When building a Tokenizer, you can attach various types of components to this Tokenizer in order to customize its behavior. This page lists most provided components. .. _normalizers: .. entities:: python BertNormalizer.clean_text clean_text BertNormalizer.handle_chinese_chars handle_chinese_chars BertNormalizer.strip_accents strip_accents BertNormalizer.lowercase lowercase Normalizer.Sequence ``Sequence([NFKC(), Lowercase()])`` PreTokenizer.Sequence ``Sequence([Punctuation(), WhitespaceSplit()])`` SplitDelimiterBehavior.removed :obj:`removed` SplitDelimiterBehavior.isolated :obj:`isolated` SplitDelimiterBehavior.merged_with_previous :obj:`merged_with_previous` SplitDelimiterBehavior.merged_with_next :obj:`merged_with_next` SplitDelimiterBehavior.contiguous :obj:`contiguous` .. entities:: rust BertNormalizer.clean_text clean_text BertNormalizer.handle_chinese_chars handle_chinese_chars BertNormalizer.strip_accents strip_accents BertNormalizer.lowercase lowercase Normalizer.Sequence ``Sequence::new(vec![NFKC, Lowercase])`` PreTokenizer.Sequence ``Sequence::new(vec![Punctuation, WhitespaceSplit])`` SplitDelimiterBehavior.removed :obj:`Removed` SplitDelimiterBehavior.isolated :obj:`Isolated` SplitDelimiterBehavior.merged_with_previous :obj:`MergedWithPrevious` SplitDelimiterBehavior.merged_with_next :obj:`MergedWithNext` SplitDelimiterBehavior.contiguous :obj:`Contiguous` .. entities:: node BertNormalizer.clean_text cleanText BertNormalizer.handle_chinese_chars handleChineseChars BertNormalizer.strip_accents stripAccents BertNormalizer.lowercase lowercase Normalizer.Sequence .. PreTokenizer.Sequence .. SplitDelimiterBehavior.removed :obj:`removed` SplitDelimiterBehavior.isolated :obj:`isolated` SplitDelimiterBehavior.merged_with_previous :obj:`mergedWithPrevious` SplitDelimiterBehavior.merged_with_next :obj:`mergedWithNext` SplitDelimiterBehavior.contiguous :obj:`contiguous` Normalizers ---------------------------------------------------------------------------------------------------- A ``Normalizer`` is in charge of pre-processing the input string in order to normalize it as relevant for a given use case. Some common examples of normalization are the Unicode normalization algorithms (NFD, NFKD, NFC & NFKC), lowercasing etc... The specificity of ``tokenizers`` is that we keep track of the alignment while normalizing. This is essential to allow mapping from the generated tokens back to the input text. The ``Normalizer`` is optional. .. list-table:: :header-rows: 1 * - Name - Description - Example * - NFD - NFD unicode normalization - * - NFKD - NFKD unicode normalization - * - NFC - NFC unicode normalization - * - NFKC - NFKC unicode normalization - * - Lowercase - Replaces all uppercase to lowercase - Input: ``HELLO ὈΔΥΣΣΕΎΣ`` Output: ``hello ὀδυσσεύς`` * - Strip - Removes all whitespace characters on the specified sides (left, right or both) of the input - Input: ``" hi "`` Output: ``"hi"`` * - StripAccents - Removes all accent symbols in unicode (to be used with NFD for consistency) - Input: ``é`` Ouput: ``e`` * - Replace - Replaces a custom string or regexp and changes it with given content - ``Replace("a", "e")`` will behave like this: Input: ``"banana"`` Ouput: ``"benene"`` * - BertNormalizer - Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: - :entity:`BertNormalizer.clean_text` - :entity:`BertNormalizer.handle_chinese_chars` - :entity:`BertNormalizer.strip_accents` - :entity:`BertNormalizer.lowercase` - * - Sequence - Composes multiple normalizers that will run in the provided order - :entity:`Normalizer.Sequence` .. _pre-tokenizers: Pre tokenizers ---------------------------------------------------------------------------------------------------- The ``PreTokenizer`` takes care of splitting the input according to a set of rules. This pre-processing lets you ensure that the underlying ``Model`` does not build tokens across multiple "splits". For example if you don't want to have whitespaces inside a token, then you can have a ``PreTokenizer`` that splits on these whitespaces. You can easily combine multiple ``PreTokenizer`` together using a ``Sequence`` (see below). The ``PreTokenizer`` is also allowed to modify the string, just like a ``Normalizer`` does. This is necessary to allow some complicated algorithms that require to split before normalizing (e.g. the ByteLevel) .. list-table:: :header-rows: 1 * - Name - Description - Example * - ByteLevel - Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: - Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters. - A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉) - For non ascii characters, it gets completely unreadable, but it works nonetheless! - Input: ``"Hello my friend, how are you?"`` Ouput: ``"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"`` * - Whitespace - Splits on word boundaries (using the following regular expression: ``\w+|[^\w\s]+`` - Input: ``"Hello there!"`` Output: ``"Hello", "there", "!"`` * - WhitespaceSplit - Splits on any whitespace character - Input: ``"Hello there!"`` Output: ``"Hello", "there!"`` * - Punctuation - Will isolate all punctuation characters - Input: ``"Hello?"`` Ouput: ``"Hello", "?"`` * - Metaspace - Splits on whitespaces and replaces them with a special char "▁" (U+2581) - Input: ``"Hello there"`` Ouput: ``"Hello", "▁there"`` * - CharDelimiterSplit - Splits on a given character - Example with ``x``: Input: ``"Helloxthere"`` Ouput: ``"Hello", "there"`` * - Digits - Splits the numbers from any other characters. - Input: ``"Hello123there"`` Output: ```"Hello", "123", "there"``` * - Split - Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. - pattern should be either a custom string or regexp. - behavior should be one of: * :entity:`SplitDelimiterBehavior.removed` * :entity:`SplitDelimiterBehavior.isolated` * :entity:`SplitDelimiterBehavior.merged_with_previous` * :entity:`SplitDelimiterBehavior.merged_with_next` * :entity:`SplitDelimiterBehavior.contiguous` - invert should be a boolean flag. - Example with `pattern` = :obj:`" "`, `behavior` = :obj:`"isolated"`, `invert` = :obj:`False`: Input: ``"Hello, how are you?"`` Output: ```"Hello,", " ", "how", " ", "are", " ", "you?"``` * - Sequence - Lets you compose multiple ``PreTokenizer`` that will be run in the given order - :entity:`PreTokenizer.Sequence` .. _models: Models ---------------------------------------------------------------------------------------------------- Models are the core algorithms used to actually tokenize, and therefore, they are the only mandatory component of a Tokenizer. .. list-table:: :header-rows: 1 * - Name - Description * - WordLevel - This is the "classic" tokenization algorithm. It let's you simply map words to IDs without anything fancy. This has the advantage of being really simple to use and understand, but it requires extremely large vocabularies for a good coverage. *Using this* ``Model`` *requires the use of a* ``PreTokenizer``. *No choice will be made by this model directly, it simply maps input tokens to IDs* * - BPE - One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by starting with characters, while merging those that are the most frequently seen together, thus creating new tokens. It then works iteratively to build new tokens out of the most frequent pairs it sees in a corpus. BPE is able to build words it has never seen by using multiple subword tokens, and thus requires smaller vocabularies, with less chances of having "unk" (unknown) tokens. * - WordPiece - This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting in multiple tokens when entire words don't exist in the vocabulary. This is different from BPE that starts from characters, building bigger tokens as possible. It uses the famous ``##`` prefix to identify tokens that are part of a word (ie not starting a word). * - Unigram - Unigram is also a subword tokenization algorithm, and works by trying to identify the best set of subword tokens to maximize the probability for a given sentence. This is different from BPE in the way that this is not deterministic based on a set of rules applied sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while choosing the most probable one. .. _post-processors: PostProcessor ---------------------------------------------------------------------------------------------------- After the whole pipeline, we sometimes want to insert some special tokens before feed a tokenized string into a model like "[CLS] My horse is amazing [SEP]". The ``PostProcessor`` is the component doing just that. .. list-table:: :header-rows: 1 * - Name - Description - Example * - TemplateProcessing - Let's you easily template the post processing, adding special tokens, and specifying the ``type_id`` for each sequence/special token. The template is given two strings representing the single sequence and the pair of sequences, as well as a set of special tokens to use. - Example, when specifying a template with these values: - single: ``"[CLS] $A [SEP]"`` - pair: ``"[CLS] $A [SEP] $B [SEP]"`` - special tokens: - ``"[CLS]"`` - ``"[SEP]"`` Input: ``("I like this", "but not this")`` Output: ``"[CLS] I like this [SEP] but not this [SEP]"`` .. _decoders: Decoders ---------------------------------------------------------------------------------------------------- The Decoder knows how to go from the IDs used by the Tokenizer, back to a readable piece of text. Some ``Normalizer`` and ``PreTokenizer`` use special characters or identifiers that need to be reverted for example. .. list-table:: :header-rows: 1 * - Name - Description * - ByteLevel - Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. * - Metaspace - Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer ``▁`` to identify whitespaces, and so this Decoder helps with decoding these. * - WordPiece - Reverts the WordPiece Model. This model uses a special identifier ``##`` for continuing subwords, and so this Decoder helps with decoding these.
tokenizers/docs/source/components.rst/0
{ "file_path": "tokenizers/docs/source/components.rst", "repo_id": "tokenizers", "token_count": 4236 }
210
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg"> <a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue"> </a> <a href="https://docs.rs/tokenizers/"> <img alt="Doc" src="https://docs.rs/tokenizers/badge.svg"> </a> </p> <br> {{readme}}
tokenizers/tokenizers/README.tpl/0
{ "file_path": "tokenizers/tokenizers/README.tpl", "repo_id": "tokenizers", "token_count": 259 }
211
use crate::decoders::DecoderWrapper; use crate::tokenizer::{Decoder, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct Sequence { decoders: Vec<DecoderWrapper>, } impl Sequence { pub fn new(decoders: Vec<DecoderWrapper>) -> Self { Self { decoders } } } impl Decoder for Sequence { fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> { for decoder in &self.decoders { tokens = decoder.decode_chain(tokens)?; } Ok(tokens) } } #[cfg(test)] mod tests { use super::*; use crate::decoders::ctc::CTC; use crate::pre_tokenizers::metaspace::Metaspace; #[test] fn sequence_basic() { let decoders = vec![ DecoderWrapper::CTC(CTC::default()), DecoderWrapper::Metaspace(Metaspace::default()), ]; let decoder = Sequence::new(decoders); let tokens: Vec<String> = vec!["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"] .into_iter() .map(|s| s.to_string()) .collect(); let out_tokens = decoder.decode(tokens).unwrap(); assert_eq!(out_tokens, "Hi you"); } }
tokenizers/tokenizers/src/decoders/sequence.rs/0
{ "file_path": "tokenizers/tokenizers/src/decoders/sequence.rs", "repo_id": "tokenizers", "token_count": 600 }
212
use super::OrderedVocabIter; use crate::tokenizer::{Model, Result, Token}; use serde_json::Value; use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, Read, Write}; use std::path::{Path, PathBuf}; mod serialization; mod trainer; // Re-export pub use trainer::*; type Vocab = HashMap<String, u32>; #[derive(thiserror::Error, Debug)] pub enum Error { #[error("WordLevel error: Missing [UNK] token from the vocabulary")] MissingUnkToken, #[error("Bad vocabulary json file")] BadVocabulary, } struct Config { files: Option<String>, vocab: HashMap<String, u32>, unk_token: String, } /// A `WordLevelBuilder` can be used to create a `WordLevel` /// model with a custom configuration. pub struct WordLevelBuilder { config: Config, } impl Default for WordLevelBuilder { fn default() -> Self { Self { config: Config { files: None, vocab: HashMap::new(), unk_token: String::from("<unk>"), }, } } } impl WordLevelBuilder { /// Construct a new `WordLevelBuilder`. pub fn new() -> Self { Self::default() } /// Set the input files. #[must_use] pub fn files(mut self, vocab: String) -> Self { self.config.files = Some(vocab); self } /// Set the vocab (token -> ID) mapping. #[must_use] pub fn vocab(mut self, vocab: HashMap<String, u32>) -> Self { self.config.vocab = vocab; self } /// The the `UNK` token for the vocab. #[must_use] pub fn unk_token(mut self, unk_token: String) -> Self { self.config.unk_token = unk_token; self } /// Contructs a `WordLevel` model that uses the `WordLevelBuilder`'s configuration. pub fn build(mut self) -> Result<WordLevel> { if let Some(vocab) = self.config.files { self.config.vocab = WordLevel::read_file(&vocab)?; } let vocab_r = self .config .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); Ok(WordLevel { vocab: self.config.vocab, vocab_r, unk_token: self.config.unk_token, }) } } #[derive(PartialEq, Clone, Eq)] pub struct WordLevel { vocab: HashMap<String, u32>, vocab_r: HashMap<u32, String>, pub unk_token: String, } impl std::fmt::Debug for WordLevel { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("WordLevel") .field("unk_token", &self.unk_token) .field("vocab", &self.vocab.len()) .finish() } } impl WordLevel { pub fn builder() -> WordLevelBuilder { WordLevelBuilder::new() } pub fn read_file(vocab_path: &str) -> Result<Vocab> { let vocab_file = File::open(vocab_path)?; let mut vocab_file = BufReader::new(vocab_file); let mut buffer = String::new(); let mut vocab = HashMap::new(); vocab_file.read_to_string(&mut buffer)?; let json: Value = serde_json::from_str(&buffer)?; match json { Value::Object(m) => { for (token, id) in m { if let Value::Number(id) = id { let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32; vocab.insert(token, id); } } } _ => return Err(Box::new(Error::BadVocabulary)), }; Ok(vocab) } /// Initialize a WordLevel model from vocab and merges file. pub fn from_file(vocab_path: &str, unk_token: String) -> Result<WordLevel> { let vocab = WordLevel::read_file(vocab_path)?; Self::builder().vocab(vocab).unk_token(unk_token).build() } } impl Default for WordLevel { fn default() -> Self { Self { vocab: HashMap::new(), vocab_r: HashMap::new(), unk_token: String::from("<unk>"), } } } impl Model for WordLevel { type Trainer = WordLevelTrainer; fn tokenize(&self, token: &str) -> Result<Vec<Token>> { if let Some(&id) = self.vocab.get(token) { Ok(vec![Token { id, value: token.to_owned(), offsets: (0, token.len()), }]) } else if let Some(&unk_id) = self.vocab.get(&self.unk_token) { Ok(vec![Token { id: unk_id, value: self.unk_token.to_owned(), offsets: (0, token.len()), }]) } else { Err(Box::new(Error::MissingUnkToken)) } } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.keys().len() } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let vocab_file_name = match name { Some(name) => format!("{}-vocab.json", name), None => "vocab.json".to_string(), }; // Write vocab.json let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())] .iter() .collect(); let mut vocab_file = File::create(&vocab_path)?; let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r); let serialized = serde_json::to_string(&order_vocab_iter)?; vocab_file.write_all(serialized.as_bytes())?; Ok(vec![vocab_path]) } fn get_trainer(&self) -> Self::Trainer { WordLevelTrainer::default() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_tokenize_unk() { let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)] .iter() .cloned() .collect(); let wordlevel = WordLevelBuilder::default() .vocab(vocab) .unk_token("<unk>".to_string()) .build() .unwrap(); let tokens = wordlevel.tokenize("c").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]); let tokens = wordlevel.tokenize("a").unwrap(); assert_eq!(tokens, vec![Token::new(1u32, "a".into(), (0, 1)),]); } #[test] fn test_tokenize_missing_unk_token() { let vocab: Vocab = [("a".into(), 0), ("b".into(), 1)].iter().cloned().collect(); let wordlevel = WordLevelBuilder::default().vocab(vocab).build().unwrap(); let tokens = wordlevel.tokenize("a").unwrap(); assert_eq!(tokens, vec![Token::new(0u32, "a".into(), (0, 1)),]); let error = wordlevel.tokenize("c").err().unwrap(); assert!(error.is::<Error>()); } }
tokenizers/tokenizers/src/models/wordlevel/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/wordlevel/mod.rs", "repo_id": "tokenizers", "token_count": 3383 }
213
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] #[macro_rules_attribute(impl_serde_type!)] pub struct CharDelimiterSplit { pub delimiter: char, } impl CharDelimiterSplit { pub fn new(delimiter: char) -> Self { Self { delimiter } } } impl PreTokenizer for CharDelimiterSplit { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { // TODO: Maybe add the option to specify the behavior pretokenized.split(|_, normalized| { normalized.split(self.delimiter, SplitDelimiterBehavior::Removed) }) } }
tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs", "repo_id": "tokenizers", "token_count": 296 }
214
use super::{ normalizer::Range, Model, NormalizedString, Normalizer, Offsets, PreTokenizedString, Token, }; use aho_corasick::{AhoCorasick, AhoCorasickBuilder, MatchKind}; use regex::Regex; use serde::{ser::SerializeSeq, Deserialize, Serialize, Serializer}; use std::collections::{HashMap, HashSet}; /// Represent a token added by the user on top of the existing Model vocabulary. /// AddedToken can be configured to specify the behavior they should have in various situations /// like: /// - Whether they should only match single words /// - Whether to include any whitespace on its left or right #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct AddedToken { /// The content of the added token pub content: String, /// Whether this token must be a single word or can break words pub single_word: bool, /// Whether this token should strip whitespaces on its left pub lstrip: bool, /// Whether this token should strip whitespaces on its right pub rstrip: bool, /// Whether this token should be normalized pub normalized: bool, /// Whether this token is special pub special: bool, } impl AddedToken { /// Build this token from the given content, specifying if it is intented to be a /// special token. Special tokens are not normalized by default. pub fn from<S: Into<String>>(content: S, special: bool) -> Self { Self { content: content.into(), normalized: !special, special, ..Default::default() } } /// Specify whether this token should only match on whole single words, and never /// part of a word. #[must_use] pub fn single_word(mut self, single_word: bool) -> Self { self.single_word = single_word; self } /// Specify whether this token should include all the whitespaces on its left, in /// order to strip them out. #[must_use] pub fn lstrip(mut self, lstrip: bool) -> Self { self.lstrip = lstrip; self } /// Specify whether this token should include all the whitespaces on its right, in /// order to strip them out. #[must_use] pub fn rstrip(mut self, rstrip: bool) -> Self { self.rstrip = rstrip; self } /// Specify whether this token should be normalized and match against its normalized /// version in the input text. #[must_use] pub fn normalized(mut self, normalized: bool) -> Self { self.normalized = normalized; self } /// Specify whether this token is special, meaning if it should be skipped when decoding #[must_use] pub fn special(mut self, special: bool) -> Self { self.special = special; self } } impl Default for AddedToken { fn default() -> Self { Self { content: String::new(), single_word: false, lstrip: false, rstrip: false, normalized: true, special: false, } } } // AddedTokens can be updated if value changed impl std::hash::Hash for AddedToken { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.content.hash(state); } } type MatchingSet = (AhoCorasick, Vec<u32>); lazy_static! { static ref STARTS_WITH_WORD: Regex = Regex::new(r"^\w").unwrap(); static ref ENDS_WITH_WORD: Regex = Regex::new(r"\w$").unwrap(); static ref RIGHTMOST_SPACE_AT_START: Regex = Regex::new(r"^\s*").unwrap(); static ref LEFTMOST_SPACE_AT_END: Regex = Regex::new(r"\s*$").unwrap(); } fn ends_with_word(sentence: &str) -> bool { ENDS_WITH_WORD.is_match(sentence) } fn starts_with_word(sentence: &str) -> bool { STARTS_WITH_WORD.is_match(sentence) } fn space_leftmost_at_end(sentence: &str) -> usize { if let Some(match_) = LEFTMOST_SPACE_AT_END.find(sentence) { match_.start() } else { sentence.len() } } fn space_rightmost_at_start(sentence: &str) -> usize { if let Some(match_) = RIGHTMOST_SPACE_AT_START.find(sentence) { match_.end() } else { 0 } } /// /// A vocabulary built on top of the Model /// /// This provides a way to add new vocabulary to a Tokenizer that has already been trained, /// in a previous process, maybe by someone else. This is especially interesting in the case /// of fine-tunings, where we want to finetune a model while adding some new functionalities /// using some new special tokens, or maybe add some tokens in the case of unknown tokens, etc. /// /// One of the reasons we need to handle these tokens outside of the model is simply that /// for many models, it is not possible to add new tokens after the training process. For example, /// using BPE, the training process generates merges pairs along the vocabulary, and any token /// in the vocabulary can be decomposed in other tokens, down to the original alphabet. If we /// were to add new tokens after this training process, we couldn't make sure the merges pairs /// exist as required. /// #[derive(Clone, Debug)] pub(super) struct AddedVocabulary { /// Contains the mapping from String (token content) to ID. This map contains both special /// tokens and classic added tokens that were added to the this vocabulary. added_tokens_map: HashMap<String, u32>, /// Contains the mapping from ID to AddedToken for all the added tokens, both special /// and classic. added_tokens_map_r: HashMap<u32, AddedToken>, /// Contains only the classic AddedToken, in the specific order the user gave them. added_tokens: Vec<AddedToken>, /// Contains only the special AddedToken, in the specific order the user gave them. special_tokens: Vec<AddedToken>, /// A Set, containing all the special token for easy access while decoding. This let's /// us remove them easily with an O(1) complexity. special_tokens_set: HashSet<String>, /// A RegexSet containing all the non-normalized patterns used to split on AddedTokens split_trie: MatchingSet, /// A RegexSet containing all the normalized patterns used to split on AddedTokens split_normalized_trie: MatchingSet, /// Whether or not special tokens should be splitted when encoding. This is equivalent to ignoring them encode_special_tokens: bool, } impl AddedVocabulary { pub fn new() -> Self { let trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build::<_, &&[u8]>([]) .expect("The trie should build correctly"); let normalized_trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build::<_, &&[u8]>([]) .expect("The normalized trie should build correctly"); Self { added_tokens_map: HashMap::new(), added_tokens_map_r: HashMap::new(), added_tokens: vec![], special_tokens: vec![], special_tokens_set: HashSet::new(), split_trie: (trie, vec![]), split_normalized_trie: (normalized_trie, vec![]), encode_special_tokens: false, } } /// Size of the additional vocabulary #[allow(dead_code)] // Suppress the "method is never used" warning pub fn len(&self) -> usize { self.added_tokens_map.len() } /// Get the additional vocabulary pub fn get_vocab(&self) -> &HashMap<String, u32> { &self.added_tokens_map } /// Get the additional vocabulary with the AddedTokens pub fn get_added_tokens_decoder(&self) -> &HashMap<u32, AddedToken> { &self.added_tokens_map_r } /// Get the id matching one of our token if it exists pub fn token_to_id(&self, token: &str, model: &impl Model) -> Option<u32> { self.added_tokens_map .get(token) .copied() .or_else(|| model.token_to_id(token)) } /// Get the token matching the given id if it exists pub fn id_to_token(&self, id: u32, model: &impl Model) -> Option<String> { self.added_tokens_map_r .get(&id) .map(|t| t.content.clone()) .or_else(|| model.id_to_token(id)) } // pub fn set_encode_special_tokens(&mut self, value: bool) { self.encode_special_tokens = value; } pub fn get_encode_special_tokens(&self) -> bool { self.encode_special_tokens } /// Check if a token is a special token pub fn is_special_token(&self, token: &str) -> bool { self.special_tokens_set.contains(token) } /// Add some special tokens to the vocabulary pub fn add_special_tokens<N: Normalizer>( &mut self, tokens: &[AddedToken], model: &impl Model, normalizer: Option<&N>, ) -> usize { self.add_tokens(tokens, model, normalizer) } /// Add some tokens to the vocabulary pub fn add_tokens<N: Normalizer>( &mut self, tokens: &[AddedToken], model: &impl Model, normalizer: Option<&N>, ) -> usize { // Handle special tokens (if any) for token in tokens { if token.special && !token.content.is_empty() && !self.special_tokens_set.contains(&token.content) { self.special_tokens.push(token.to_owned()); self.special_tokens_set.insert(token.content.clone()); } } // Then we delegate to `add_tokens`, that will take care of refreshing added tokens too. let mut ignored = 0; for token in tokens { if token.content.is_empty() || self.added_tokens_map_r.values().any(|val| val == token) { ignored += 1; continue; } // If a token is already part of the vocabulary, we mark it as added let new_id = if let Some(new_id) = self.token_to_id(&token.content, model) { new_id } else { self.added_tokens_map.values().cloned().max().map_or( model.get_vocab_size() as u32, |max| { if (max >= model.get_vocab_size() as u32) || model.get_vocab_size() == 0 { max + 1 } else { model.get_vocab_size() as u32 } }, ) }; // Make sure we modify the previous entry self.added_tokens_map .entry(token.content.clone()) .and_modify(|old_id| *old_id = new_id) .or_insert_with(|| new_id); // Update the current revert operation self.added_tokens_map_r .entry(new_id) .and_modify(|t| *t = token.clone()) .or_insert_with(|| token.clone()); // Make sure to remove previous entry (if the token gets a new id) // Finally add the token to the classic set if special if !self.special_tokens_set.contains(&token.content) { self.added_tokens.push(token.clone()); } } self.refresh_added_tokens(model, normalizer); // Return the number of added tokens tokens.len() - ignored } /// Reconstruct our internal RegexSet when new tokens are added to the vocabulary. /// /// We keep two different RegexSet, one that will take care of matching against the /// non-normalized string, and one matching against the normalized one. fn refresh_added_tokens<N: Normalizer>(&mut self, model: &impl Model, normalizer: Option<&N>) { type TupleTokenId<'a> = (&'a AddedToken, u32); let (normalized, non_normalized): (Vec<TupleTokenId>, Vec<TupleTokenId>) = self .special_tokens .iter() .chain(self.added_tokens.iter()) .map(|token| { ( token, self.token_to_id(&token.content, model) .expect("Missing additional token"), ) }) .partition(|(token, _)| token.normalized); let (tokens, ids): (Vec<&AddedToken>, Vec<u32>) = non_normalized.into_iter().unzip(); let trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build(tokens.iter().map(|token| &token.content)) .expect("Failed to build tried when refreshing tokens"); self.split_trie = (trie, ids); let (ntokens, nids): (Vec<&AddedToken>, Vec<u32>) = normalized.into_iter().unzip(); let patterns: Vec<_> = ntokens .iter() .map(|token| { let mut content = NormalizedString::from(token.content.as_ref()); if let Some(n) = normalizer { n.normalize(&mut content).unwrap(); } content }) .collect(); let normalized_trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build(patterns.iter().map(|content| content.get())) .expect("Failed to build tried when refreshing tokens (normalized)"); self.split_normalized_trie = (normalized_trie, nids); } /// Find any AddedToken in the given sentence, using the provided MatchingSet. /// This method returns a list "splits", each of them being a pair of Offsets /// and an optional ID if it is an AddedToken. /// The list of splits cover the entire input string. fn find_matches(&self, sentence: &str, split_re: &MatchingSet) -> Vec<(Option<u32>, Offsets)> { if sentence.is_empty() { return vec![(None, (0, 0))]; } let mut start_offset = 0; let mut splits = vec![]; for mat in split_re.0.find_iter(sentence) { let mut start = mat.start(); let mut stop = mat.end(); let aho_id = mat.pattern(); let id = split_re.1[aho_id]; let added_token = &self.added_tokens_map_r.get(&id).unwrap(); if self.encode_special_tokens && self.special_tokens_set.contains(&added_token.content) { continue; } if added_token.single_word { let start_space = start == 0 || !ends_with_word(&sentence[..start]); let stop_space = stop == sentence.len() || !starts_with_word(&sentence[stop..]); if !stop_space || !start_space { // Discard not single word continue; } } if added_token.lstrip { // This will be strictly inferior to start and in correct sentence offset let newstart = space_leftmost_at_end(&sentence[..start]); // The previous match could have already matched those spaces // Ignore them if it's already matched start = std::cmp::max(newstart, start_offset); } if added_token.rstrip { // This will starting a the stop+1 character, so we need // to add the previous stop value stop += space_rightmost_at_start(&sentence[stop..]) } if start_offset < start { splits.push((None, (start_offset, start))); } splits.push((Some(id), (start, stop))); start_offset = stop; } let total_byte_len = sentence.len(); if start_offset != total_byte_len { splits.push((None, (start_offset, total_byte_len))); } splits } /// Split the input sentence to extract anything we found from the `MatchingSet`, as well as /// the list of corresponding IDs /// The list of IDs have the exact same number of elements than the Iterator. fn split_with_indices( &self, sentence: NormalizedString, split_re: &MatchingSet, ) -> Vec<(NormalizedString, Option<Vec<Token>>)> { self.find_matches(sentence.get(), split_re) .into_iter() .map(|(id, byte_offsets)| { let slice = sentence .slice(Range::Normalized(byte_offsets.0..byte_offsets.1)) .expect("AddedVocabulary bad split"); if let Some(id) = id { let value = slice.get().to_owned(); let len = value.len(); (slice, Some(vec![Token::new(id, value, (0, len))])) } else { (slice, None) } }) .collect() } /// Extract the additional vocabulary from the given sentence, normalizing it along the way. /// /// Some tokens should match against their normalized representation, as well as the /// non-normalized one. For example, when we expect to extract the token `yesterday` in the /// input sentence `I read a book Yesterday`, if the normalizer is supposed to lowercase /// everything, we expect a match. pub fn extract_and_normalize<N: Normalizer>( &self, normalizer: Option<&N>, sequence: &str, ) -> PreTokenizedString { let mut pretokenized: PreTokenizedString = sequence.into(); // 1. We extract all the non-normalized tokens from the non-normalized string pretokenized .split(|_, sequence| Ok(self.split_with_indices(sequence, &self.split_trie))) .expect("AddedVocabulary bad split"); // <s> normalized = False // "I read a book <s>Hey" -> "I read a book", " <s>", "Hey" // </s> normalized = True -> "▁</s>" // "I read a book</s>Hey" -> "I read a book</s>Hey" // Day normalized = True -> "Day" // "I read a book monday" -> "I read a book monday" // [DAY] normalized = False -> "Day" // "I read a [DAY] monday" -> "I read a " "[DAY]", "book monday" // 320055 // 2. Then extract the normalized tokens from the normalized pieces of the string pretokenized .split(|_, mut sequence| { normalizer.map(|n| n.normalize(&mut sequence)); Ok(self.split_with_indices(sequence, &self.split_normalized_trie)) }) .expect("AddedVocabulary bad split"); // ["I read a book", " <s>", "Hey"] -> ["▁I read a book", "▁ <s>", "▁Hey"] // ["▁I read a book", "▁ <s>", "▁Hey"] -> [.., "▁ ", "<s>", "▁Hey"] // </s> normalized = True -> "▁</s>" // "I read a book</s>Hey" -> ["▁I read a book", "<","/","s",">", "Hey"] // "I read a " "[DAY]", "book monday" -> "i read a " "[day]", "book monday" pretokenized } } #[derive(Debug, Serialize, Deserialize)] pub(super) struct AddedTokenWithId { /// The id assigned to this token pub id: u32, #[serde(flatten)] /// The target AddedToken pub token: AddedToken, } impl Serialize for AddedVocabulary { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut added_tokens = self .added_tokens_map_r .iter() .map(|(id, token)| AddedTokenWithId { id: *id, token: token.clone(), }) .collect::<Vec<_>>(); // We need to have these added tokens ordered by ascending ID added_tokens.sort_unstable_by_key(|o| o.id); let mut vocabulary = serializer.serialize_seq(Some(added_tokens.len()))?; for token in added_tokens { vocabulary.serialize_element(&token)?; } vocabulary.end() } } #[cfg(test)] mod tests { use super::*; use crate::normalizers::utils::Lowercase; use crate::normalizers::NormalizerWrapper; use crate::{OffsetReferential, OffsetType, Result, Token, Trainer}; use std::path::{Path, PathBuf}; #[derive(Serialize, Deserialize)] struct ModelMock { vocab: HashMap<String, u32>, vocab_r: HashMap<u32, String>, } impl ModelMock { pub fn new<I>(iter: I) -> Self where I: IntoIterator<Item = &'static (&'static str, u32)>, { let vocab: HashMap<String, u32> = iter .into_iter() .map(|&(tok, id)| (tok.to_string(), id)) .collect(); Self { vocab_r: vocab .iter() .map(|(tok, id)| (*id, tok.to_owned())) .collect(), vocab, } } } fn simplify_output(result: &'_ PreTokenizedString) -> Vec<(&'_ str, Option<Vec<u32>>)> { result .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, tokens)| { ( s, tokens .as_ref() .map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>()), ) }) .collect::<Vec<_>>() } struct TrainerMock; impl Trainer for TrainerMock { type Model = ModelMock; fn should_show_progress(&self) -> bool { true } fn train(&self, _model: &mut ModelMock) -> Result<Vec<AddedToken>> { unimplemented!() } fn feed<I, S, F>(&mut self, _iterator: I, _process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { unimplemented!() } } impl Model for ModelMock { type Trainer = TrainerMock; fn tokenize(&self, _sequence: &str) -> Result<Vec<Token>> { unimplemented!() } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn save(&self, _folder: &Path, _name: Option<&str>) -> Result<Vec<PathBuf>> { unimplemented!() } fn get_trainer(&self) -> Self::Trainer { TrainerMock } } #[test] fn can_add_tokens() { let model = ModelMock::new(&[("test", 0), ("tost", 1)]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; // Add tokens normally assert_eq!( vocab.add_tokens( &[AddedToken::from("added_token_1", false)], &model, normalizer ), 1 ); let vocab_len: usize = vocab.len(); assert_eq!(vocab_len, 1); // Does not add multiple time the same token assert_eq!( vocab.add_tokens( &[ AddedToken::from("added_token_2", false), AddedToken::from("added_token_2", false) ], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 2); // Also adds tokens already covered by the model let added_token = AddedToken::from("test", false); assert_eq!( vocab.add_tokens(&[added_token.clone()], &model, normalizer), 1 ); assert_eq!(vocab.len(), 3); assert_eq!(vocab.get_added_tokens_decoder()[&0], added_token); } #[test] fn can_add_special_tokens() { let model = ModelMock::new(&[("test", 0), ("tost", 1)]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; // Add tokens normally assert_eq!( vocab.add_special_tokens( &[AddedToken::from("added_token_1", true)], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 1); // Does not add multiple time the same token assert_eq!( vocab.add_special_tokens( &[ AddedToken::from("added_token_2", true), AddedToken::from("added_token_2", true) ], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 2); // Can add tokens already covered by the model assert_eq!( vocab.add_special_tokens(&[AddedToken::from("test", true)], &model, normalizer), 1 ); assert_eq!(vocab.len(), 3); // New token was added assert!(vocab.is_special_token("test")); assert_eq!( *vocab.get_added_tokens_decoder(), HashMap::from([ (0, AddedToken::from("test", true)), (2, AddedToken::from("added_token_1", true)), (3, AddedToken::from("added_token_2", true)), ]) ); assert!(vocab.added_tokens_map.contains_key("test")); assert!(vocab.added_tokens_map_r.contains_key(&0)); vocab.add_tokens( &[ AddedToken::from("tost", true), AddedToken::from("another_two", false), ], &model, normalizer, ); assert_eq!(vocab.len(), 5); // New token was added assert_eq!(vocab.get_vocab()["another_two"], 4); // New token was added, but the index is not the length of the vocab // Let's add an already added token again assert_eq!( vocab.add_special_tokens(&[AddedToken::from("another_two", true)], &model, normalizer), 1 ); assert_eq!(vocab.len(), 5); // Token was already there assert_eq!(vocab.get_vocab()["another_two"], 4); // Token idx not changed // Just checking that we can set the content of the string in rust let mut token: AddedToken = AddedToken::from("Hey", false); token.content = "hey".to_string(); assert_eq!(token.content, "hey"); // Token was already there token.special = true; assert!(token.special); // Token was already there } #[test] fn can_extract_added_tokens() { // Is able to extract both normal and special tokens let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; vocab.add_tokens( &[ AddedToken::from("my", false), AddedToken::from("name", false), ], &model, normalizer, ); vocab.add_special_tokens( &[ AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), ], &model, normalizer, ); let result = vocab.extract_and_normalize(normalizer, "[CLS] My name is Anthony [SEP]"); assert_eq!( result .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, tokens)| ( s, tokens .as_ref() .map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>()) )) .collect::<Vec<_>>(), vec![ ("[CLS]", Some(vec![2])), (" My ", None), ("name", Some(vec![1])), (" is Anthony ", None), ("[SEP]", Some(vec![3])) ] ); } #[test] fn options_use_cases() { // Is able to extract both normal and special tokens, with various options (lstrip, rstrip, // single_word, normalized) let model = ModelMock::new(&[]); let normalizer = Lowercase; let mut vocab = AddedVocabulary::new(); vocab.add_tokens( &[ AddedToken::from("my", false).lstrip(true).rstrip(true), AddedToken::from("name", false), AddedToken::from("ony", false).single_word(true), ], &model, Some(&normalizer), ); vocab.add_special_tokens( &[ AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), ], &model, Some(&normalizer), ); let result = vocab.extract_and_normalize(Some(&normalizer), "[CLS] My name is Anthony [SEP]"); assert_eq!( simplify_output(&result), vec![ ("[CLS]", Some(vec![3])), // This one includes both spaces because of the lstrip & rstrip // And it matches because normalized == true (" my ", Some(vec![0])), ("name", Some(vec![1])), // `ony` is not extracted here thanks to single_word (" is anthony ", None), ("[SEP]", Some(vec![4])), ] ); } #[test] fn empty_matches() { let vocab = AddedVocabulary::new(); let matches = vocab.find_matches("", &vocab.split_trie); assert_eq!(matches, vec![(None, (0, 0))]); } #[test] fn test_single_word_is_correct() { // Is able to extract both normal and special tokens, with various options (lstrip, rstrip, // single_word, normalized) let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[AddedToken::from("<mask>", false).single_word(true)], &model, Some(&normalizer), ); // Left, in the middle, non single world left, non single word right, end of sentence valid let result = vocab.extract_and_normalize( Some(&normalizer), "<mask> My name <mask> A<mask> <mask>ony <mask>", ); assert_eq!( simplify_output(&result), vec![ ("<mask>", Some(vec![0])), (" my name ", None), ("<mask>", Some(vec![0])), (" a<mask> <mask>ony ", None), ("<mask>", Some(vec![0])) ] ); } #[test] fn test_single_word_is_unicode_correct() { let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; assert_eq!(vocab.len(), 0); vocab.add_tokens( &[AddedToken::from("<mask>", false).single_word(true)], &model, Some(&normalizer), ); let result = vocab.extract_and_normalize(Some(&normalizer), "<mask>, <mask>- ◌̰<mask>"); assert_eq!( simplify_output(&result), vec![ // Punctuation is not word ("<mask>", Some(vec![0])), (", ", None), // dash is not word ("<mask>", Some(vec![0])), // This is unicode combining mark character and is word: https://en.wikipedia.org/wiki/Combining_Diacritical_Marks ("- ◌̰<mask>", None), ] ); } #[test] fn test_lstrip_unicode_space() { let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[AddedToken::from("<mask>", false) .lstrip(true) .rstrip(true) .single_word(true)], &model, Some(&normalizer), ); let result = vocab .extract_and_normalize(Some(&normalizer), "Hi <mask> there\t<mask>\t<mask>\u{2000}"); assert_eq!( simplify_output(&result), vec![ ("hi", None), // Regular space (" <mask> ", Some(vec![0])), ("there", None), // \t is a spacing character ("\t<mask>\t", Some(vec![0])), // Non overlapping // \u{2000} is mongolian vowel separator: https://jkorpela.fi/chars/spaces.html ("<mask>\u{2000}", Some(vec![0])), ] ); } #[test] fn test_encode_special_tokens() { let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[ AddedToken::from("<mask>", true) .lstrip(true) .rstrip(true) .single_word(true), AddedToken::from("ask>", false), AddedToken::from("<pad>", true), ], &model, Some(&normalizer), ); vocab.set_encode_special_tokens(true); let result = vocab.extract_and_normalize( Some(&normalizer), "Hi <mask> there\t<mask>\t<mask>\u{2000} <pad> <mask><pad><pad>", ); assert_eq!( simplify_output(&result), vec![ ("hi <m", None), ("ask>", Some(vec![1])), (" there\t<m", None), ("ask>", Some(vec![1])), ("\t<m", None), ("ask>", Some(vec![1])), ("\u{2000} <pad> <m", None), ("ask>", Some(vec![1])), ("<pad><pad>", None) ] ); vocab.set_encode_special_tokens(false); let result = vocab.extract_and_normalize( Some(&normalizer), "Hi <mask> there\t<mask>\t<mask>\u{2000} <pad> <mask><pad><pad>", ); assert_eq!( simplify_output(&result), vec![ ("hi", None), (" <mask> ", Some(vec![0])), ("there", None), ("\t<mask>\t", Some(vec![0])), ("<mask>\u{2000} ", Some(vec![0])), ("<pad>", Some(vec![2])), (" <mask>", Some(vec![0])), ("<pad>", Some(vec![2])), ("<pad>", Some(vec![2])) ] ); } }
tokenizers/tokenizers/src/tokenizer/added_vocabulary.rs/0
{ "file_path": "tokenizers/tokenizers/src/tokenizer/added_vocabulary.rs", "repo_id": "tokenizers", "token_count": 16897 }
215
use crate::tokenizer::{Encoding, Result}; use serde::{Deserialize, Serialize}; use std::cmp; use std::mem; #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, Default)] pub enum TruncationDirection { Left, #[default] Right, } impl std::convert::AsRef<str> for TruncationDirection { fn as_ref(&self) -> &str { match self { TruncationDirection::Left => "left", TruncationDirection::Right => "right", } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TruncationParams { #[serde(default)] pub direction: TruncationDirection, pub max_length: usize, pub strategy: TruncationStrategy, pub stride: usize, } impl Default for TruncationParams { fn default() -> Self { Self { max_length: 512, strategy: TruncationStrategy::default(), stride: 0, direction: TruncationDirection::default(), } } } #[derive(thiserror::Error, Debug)] pub enum TruncationError { /// We are supposed to truncate the pair sequence, but it has not been provided. #[error("Truncation error: Second sequence not provided")] SecondSequenceNotProvided, /// We cannot truncate the target sequence enough to respect the provided max length. #[error("Truncation error: Sequence to truncate too short to respect the provided max_length")] SequenceTooShort, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)] pub enum TruncationStrategy { LongestFirst, OnlyFirst, OnlySecond, } impl Default for TruncationStrategy { fn default() -> Self { Self::LongestFirst } } impl std::convert::AsRef<str> for TruncationStrategy { fn as_ref(&self) -> &str { match self { Self::LongestFirst => "longest_first", Self::OnlyFirst => "only_first", Self::OnlySecond => "only_second", } } } pub fn truncate_encodings( mut encoding: Encoding, mut pair_encoding: Option<Encoding>, params: &TruncationParams, ) -> Result<(Encoding, Option<Encoding>)> { if params.max_length == 0 { encoding.truncate(0, params.stride, params.direction); if let Some(other_encoding) = pair_encoding.as_mut() { other_encoding.truncate(0, params.stride, params.direction); } return Ok((encoding, pair_encoding)); } let total_length = encoding.get_ids().len() + pair_encoding .as_ref() .map(|e| e.get_ids().len()) .unwrap_or(0); let to_remove = if total_length > params.max_length { total_length - params.max_length } else { return Ok((encoding, pair_encoding)); }; match params.strategy { TruncationStrategy::LongestFirst => { if let Some(other_encoding) = pair_encoding.as_mut() { // Assuming n1 <= n2, there are 3 cases // Case 1: // No truncation needs to be performed. // This scenario is handled before the match. // Case 2: // Only the longer input needs to be truncated. // n1 = n1 // n2 = max_length - n1 // Case 3: // Both inputs must be truncated. // n1 = max_length / 2 // n2 = n1 + max_length % 2 let mut n1 = encoding.get_ids().len(); let mut n2 = other_encoding.get_ids().len(); let mut swap = false; // Ensure n1 is the length of the shortest input if n1 > n2 { swap = true; mem::swap(&mut n1, &mut n2); } if n1 > params.max_length { // This needs to be a special case // to avoid max_length - n1 < 0 // since n1 and n2 are unsigned n2 = n1; } else { n2 = cmp::max(n1, params.max_length - n1); } if n1 + n2 > params.max_length { n1 = params.max_length / 2; n2 = n1 + params.max_length % 2; } // Swap lengths if we swapped previosuly if swap { mem::swap(&mut n1, &mut n2); } encoding.truncate(n1, params.stride, params.direction); other_encoding.truncate(n2, params.stride, params.direction); } else { encoding.truncate(total_length - to_remove, params.stride, params.direction); } } TruncationStrategy::OnlyFirst | TruncationStrategy::OnlySecond => { let target = if params.strategy == TruncationStrategy::OnlyFirst { Ok(&mut encoding) } else if let Some(encoding) = pair_encoding.as_mut() { Ok(encoding) } else { Err(Box::new(TruncationError::SecondSequenceNotProvided)) }?; let target_len = target.get_ids().len(); if target_len > to_remove { target.truncate(target_len - to_remove, params.stride, params.direction); } else { return Err(Box::new(TruncationError::SequenceTooShort)); } } } Ok((encoding, pair_encoding)) } #[cfg(test)] mod tests { use super::*; use crate::tokenizer::Encoding; use std::collections::HashMap; fn get_empty() -> Encoding { Encoding::new( vec![], vec![], vec![], vec![], vec![], vec![], vec![], vec![], HashMap::new(), ) } fn get_short() -> Encoding { Encoding::new( vec![1, 2], vec![0, 0], vec![String::from("a"), String::from("b")], vec![Some(0), Some(1)], vec![(0, 1), (1, 2)], vec![0, 0], vec![1, 1], vec![], HashMap::new(), ) } fn get_medium() -> Encoding { Encoding::new( vec![3, 4, 5, 6], vec![0, 0, 0, 0], vec![ String::from("d"), String::from("e"), String::from("f"), String::from("g"), ], vec![Some(0), Some(1), Some(2), Some(3)], vec![(0, 1), (1, 2), (2, 3), (3, 4)], vec![0, 0, 0, 0], vec![1, 1, 1, 1], vec![], HashMap::new(), ) } fn get_long() -> Encoding { Encoding::new( vec![7, 8, 9, 10, 11, 12, 13, 14], vec![0, 0, 0, 0, 0, 0, 0, 0], vec![ String::from("h"), String::from("i"), String::from("j"), String::from("k"), String::from("l"), String::from("m"), String::from("n"), String::from("o"), ], vec![ Some(0), Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), Some(7), ], vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8), ], vec![0, 0, 0, 0, 0, 0, 0, 0], vec![1, 1, 1, 1, 1, 1, 1, 1], vec![], HashMap::new(), ) } fn truncate_and_assert( encoding1: Encoding, encoding2: Encoding, params: &TruncationParams, n1: usize, n2: usize, ) { match truncate_encodings(encoding1, Some(encoding2), params) { Ok((e1, Some(e2))) => { assert!(e1.get_ids().len() == n1); assert!(e2.get_ids().len() == n2); } _ => panic!(), }; } #[test] fn truncate_encodings_longest_first() { let params = TruncationParams { max_length: 7, strategy: TruncationStrategy::LongestFirst, stride: 0, direction: TruncationDirection::Right, }; truncate_and_assert(get_empty(), get_empty(), &params, 0, 0); truncate_and_assert(get_empty(), get_short(), &params, 0, 2); truncate_and_assert(get_empty(), get_medium(), &params, 0, 4); truncate_and_assert(get_empty(), get_long(), &params, 0, 7); truncate_and_assert(get_short(), get_empty(), &params, 2, 0); truncate_and_assert(get_short(), get_short(), &params, 2, 2); truncate_and_assert(get_short(), get_medium(), &params, 2, 4); truncate_and_assert(get_short(), get_long(), &params, 2, 5); truncate_and_assert(get_medium(), get_empty(), &params, 4, 0); truncate_and_assert(get_medium(), get_short(), &params, 4, 2); truncate_and_assert(get_medium(), get_medium(), &params, 3, 4); truncate_and_assert(get_medium(), get_long(), &params, 3, 4); truncate_and_assert(get_long(), get_empty(), &params, 7, 0); truncate_and_assert(get_long(), get_short(), &params, 5, 2); truncate_and_assert(get_long(), get_medium(), &params, 4, 3); truncate_and_assert(get_long(), get_long(), &params, 3, 4); } #[test] fn truncate_encodings_empty() { let params = TruncationParams { max_length: 0, strategy: TruncationStrategy::LongestFirst, stride: 0, direction: TruncationDirection::Right, }; truncate_and_assert(get_empty(), get_short(), &params, 0, 0); truncate_and_assert(get_medium(), get_medium(), &params, 0, 0); truncate_and_assert(get_long(), get_long(), &params, 0, 0); } #[test] fn test_deserialize_defaults() { let old_truncation_params = r#"{"max_length":256,"strategy":"LongestFirst","stride":0}"#; let params: TruncationParams = serde_json::from_str(old_truncation_params).unwrap(); assert_eq!(params.direction, TruncationDirection::Right); } }
tokenizers/tokenizers/src/utils/truncation.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/truncation.rs", "repo_id": "tokenizers", "token_count": 5473 }
216
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How To Request Support This is an Open Source Project so please be mindful that like in any other project of this kind there is no obligation to answer all requests for help. However, we want to encourage you to ask for help whenever you think it's needed! We are happy about every question we get because it allows us to better understand your needs, possible misunderstandings, and most importantly a way for you to help us make this library better. That being said, this document's main purpose is to provide guidelines at how you can formulate your requests to increase your chances to be understood and to get support. There are two main venues to receive support: [the forums](https://discuss.huggingface.co/) and [the GitHub issues](https://github.com/huggingface/transformers/issues). ## The Forums [The user forums](https://discuss.huggingface.co/) are supported by the wide community of the library users and backed up by developers when needed. If you have a difficulty with deploying this library or some questions, or you'd like to discuss a new feature, please first consider discussing those things at the forums. Only when you feel your subject matter has been crystalized and you still need support from the library developers do proceed to file an [issue](https://github.com/huggingface/transformers/issues). In particular all "Please explain" questions or objectively very user-specific feature requests belong to the forums. Here are some example of such questions: * "I would like to use a BertModel within a RL-Agent for a customer support service. How can I use a BertForMaskedLM in my ChatBotModel?" * "Could you please explain why T5 has no positional embedding matrix under T5Model?" * "How should I set my generation parameters for translation?" * "How to train T5 on De->En translation?" ## The GitHub Issues Everything which hints at a bug should be opened as an [issue](https://github.com/huggingface/transformers/issues). You are not required to read the following guidelines before opening an issue. However, if you notice that your issue doesn't get any replies, chances are that the developers have one or several difficulties with its quality. In this case, reading the following points and adjusting your issue accordingly could help. 1. Before posting an issue, first search for already posted issues, since chances are someone has already asked a similar question before you. If you use Google your search query should be: ``` "huggingface" "transformers" your query ``` The first two quoted words tell Google to limit the search to the context of the Huggingface Transformers. The remainder is your query - most commonly this would be the error message the software fails with. We will go deeper into details shortly. The results of such a query will typically match GitHub issues, Hugging Face forums, StackExchange, and blogs. If you find relevant hints, you may choose to continue the discussion there if you have follow up questions. If what you found is similar but doesn't quite answer your problem, please, post a new issue and do include links to similar issues or forum discussions you may have found. Let's look at some examples: The error message, often referred to as an assertion, tells us what went wrong. Here is an example of an assertion: ```python Traceback (most recent call last): File "<string>", line 1, in <module> File "/transformers/src/transformers/__init__.py", line 34, in <module> from . import dependency_versions_check File "/transformers/src/transformers/dependency_versions_check.py", line 34, in <module> from .utils import is_tokenizers_available File "/transformers/src/transformers/utils/import_utils.py", line 40, in <module> from tqdm.auto import tqdm ModuleNotFoundError: No module named 'tqdm.auto' ``` and it typically includes a traceback, so that we can see the full stack of calls the program made before it fails. This gives us the context to know why the program failed. Going back to the above example. If you received this error search, look at the very last line of the error which is: ```python ModuleNotFoundError: No module named 'tqdm.auto' ``` And now we can use it to do the searching on your favorite search engine: 1. first for `"huggingface" "transformers" "ModuleNotFoundError: No module named 'tqdm.auto'"` 2. if you don't find relevant results, then search for just `"ModuleNotFoundError: No module named 'tqdm.auto'"` 3. and finally if nothing still comes up, then remove the outside quotes: `ModuleNotFoundError: No module named 'tqdm.auto'` If the error includes any messages that include bits unique to your filesystem, always remove those in the search query since other users will not have the same filesystem as yours. For example: ```bash python -c 'open("/tmp/wrong_path.txt", "r")' Traceback (most recent call last): File "<string>", line 1, in <module> FileNotFoundError: [Errno 2] No such file or directory: '/tmp/wrong_path.txt' ``` Here you'd search for just: `"FileNotFoundError: [Errno 2] No such file or directory"` If the local information that you removed were inside the error message and you removed them you may need to remove double quotes since your query is no longer exact. So if the error message was something like: ```bash ValueError: '/tmp/wrong_path.txt' cannot be found ``` then you'd search for `"ValueError" "cannot be found"` As you search you will notice that when you don't use quotes often the search engines will return a variety of unrelated hits, which may or may not be what you want. Experiment with different ways and find which approach gives the most satisfactory results. 2. Keep the issue short, providing the information that you think will aid the developers to understand your situation. Put yourself in the shoes of the person who has never seen your code or knows anything about your custom setup. This mental exercise will help to develop an intuition to what/what not to share" 3. If there is a software failure, always provide the full traceback, for example: ```python $ python -c 'import transformers' Traceback (most recent call last): File "<string>", line 1, in <module> File "/transformers/src/transformers/__init__.py", line 34, in <module> from . import dependency_versions_check File "/transformers/src/transformers/dependency_versions_check.py", line 34, in <module> from .utils import is_tokenizers_available File "/transformers/src/transformers/utils/import_utils.py", line 40, in <module> from tqdm.auto import tqdm ModuleNotFoundError: No module named 'tqdm.auto' ``` As compared to providing just the last line of the error message, e.g.: ```python ModuleNotFoundError: No module named 'tqdm.auto' ``` which is not sufficient. If your application is running on more than one GPU (e.g. under `DistributedDataParallel`) and typically getting every log and traceback printed multiple times, please make sure that you paste only one copy of it. At times the traceback from parallel processes may get interleaved - so either disentangle these or change the loggers to log only for `local_rank==0` so that only one process logs things. 4. When quoting a traceback, command line instructions and any type of code always enclose it in triple backticks inside the editor window, that is: ```` ``` git clone https://github.com/huggingface/transformers cd transformers pip install . ``` ```` If it's a command line with a long argument list, please consider breaking it down using backslashes and new lines. Here is an example of a good command line quote: ```bash cd examples/seq2seq torchrun --nproc_per_node=2 ./finetune_trainer.py \ --model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \ --output_dir output_dir --overwrite_output_dir \ --do_train --n_train 500 --num_train_epochs 1 \ --per_device_train_batch_size 1 --freeze_embeds \ --src_lang en_XX --tgt_lang ro_RO --task translation \ --fp16 ``` If you don't break it up, one has to scroll horizontally which often makes it quite difficult to quickly see what's happening. The backslashes allow us to copy the command directly into the console to run it, without needing to edit it. 5. Include only the important information that you think will help the developer to quickly identify the problem. For example applications often create huge amounts of logs. Ask yourself whether providing all or parts of the log is useful. Pasting a 100-1000 lines of log into the issue is an immediate turn off, since it will take a lot of time to figure out where the pertinent parts of the log are. Attaching a full log can be helpful if it's done as an attachment, if it's enclosed in the following html code in the comment editor window: ``` <details> <summary>Full log</summary> <pre> many lines go here </pre> </details> ``` which would result in the following entry, which can be opened if desired, but otherwise takes little space. <details> <summary>Full log</summary> <pre> many lines go here </pre> </details> You could also provide a link to a pastebin service, but this is less beneficial since those links tend to expire quickly and future readers of your issue might not be able to access that log file anymore and may lack some context. 6. If this is an issue in your code, do try to reduce that code to a minimal example that still demonstrates the problem. Please ask at the forums if you have a hard time figuring how to do that. Please realize that we don't have the luxury of having time to try and understand all of your custom code. If you really tried to make a short reproducible code but couldn't figure it out, it might be that having a traceback will give the developer enough information to know what's going on. But if it is not enough and we can't reproduce the problem, we can't really solve it. Do not despair if you can't figure it out from the beginning, just share what you can and perhaps someone else will be able to help you at the forums. If your setup involves any custom datasets, the best way to help us reproduce the problem is to create a [Google Colab notebook](https://colab.research.google.com/) that demonstrates the issue and once you verify that the issue still exists, include a link to that notebook in the Issue. Just make sure that you don't copy and paste the location bar url of the open notebook - as this is private and we won't be able to open it. Instead, you need to click on `Share` in the right upper corner of the notebook, select `Get Link` and then copy and paste the public link it will give to you. 7. If you forked off some of this project's code or example applications, please, do not ask us to go into your code repository and figure out what you may have done. The code is already very complex and unless there is an easy way to do a diff and it's a small diff, it won't be possible to find someone with time on their hands to make a lengthy investigation. Albeit, you might find someone at the forums who will be generous to do this for you. 8. Before reporting an issue, first, always try to update your environment to the latest official version of this library. We have no resources to go and debug older revisions, which could easily have bugs that have been fixed in the latest released version. We understand that this is not always possible, especially when APIs change, in which case file an issue against the highest library version your environment can support. Of course, if you upgrade the library, always retest that the problem is still there. 9. Please do not ask us to reproduce an issue with your custom data, since we don't have it. So, either you should use some existing dataset supported by HF datasets or you need to supply a code that generates a small sample on the fly, or some another quick and simple way to get it. Please do not send us any non-public domain data that may require a license or a permission to be used. 10. Do not tag multiple developers on the issue unless you know this is expected, either because you asked them and they gave you an explicit permission to tag them or the issue template instructs you to do so. The "who to tag for what domain" part of the issue template is there to help users direct their questions to the right developers who are designated maintainers of project's specific domains. They can then decide at their own discretion to tag other developers if they feel it'd help move the issue forward. We currently don't have a triage service and we trust your capacity to identify the right domain and thus the persons to tag in your issue. If you are not sure, please use the forums to ask for guidance. When in doubt, err on the side of not tagging a given person. If you tag multiple people out of context or permission don't be surprised if you get no response at all. Please remember that every time you tag someone, they get a notification and you're taking their time without their permission. Please be sensitive to that. If you got helped by one of the developers in the past please don't tag them in future issues, unless they are listed in the issue template for the domain you are asking about or that developer gave you an explicit permission to tag them in future issues. If you see a certain developer doing multiple and/or recent commits into a specific area of the project that you feel is relevant to your issue, it is not a good reason to tag them. Various developers may be fixing things that prevent them from moving forward, but often their work is focused on a totally different domain. And while they may or may not know how to help you with the problem at hand, it would benefit the whole community much more if they focus on the domain of their unique expertise. 11. Use the Edit button. Take your time, and re-read and improve the wording and formatting to make your posts and comments as easy to understand as possible. Avoid posting multiple comments in a row, as each comment generates a notification for the developers tagged in that issue. If you happened to post multiple comments in a row, and nobody followed up yet - consider merging those into one or a few comments while editing the combined content to be coherent. If you choose to edit your older comments after others posted follow up comments you need to be aware that your modifications might not be noticed, so if it's not a typo fixing, try to write a new comment flagging that something has been changed in the previous comments. For example, the very first comment is the most important one. If while the thread unfolds you realize that things aren't as they seemed to you originally you may want to edit the first post to reflect the up-to-date understanding of the issue at hand so that it helps those who read your issue in the future quickly understand what's going on and not need to sift through dozens of comments. It also helps to indicate that the post was edited. So, those reading the thread later can understand why there might be certain discontinuity in the information flow. Use bullets and items if you have lists of items and the outcome improves overall readability. Use backticks to refer to class and function names, e.g. `BartModel` and `generate` as these stand out and improve the speed of a reader's comprehension. Try not use italics and bold text too much as these often make the text more difficult to read. 12. If you are cross-referencing a specific comment in a given thread or another issue, always link to that specific comment, rather than using the issue link. If you do the latter it could be quite impossible to find which specific comment you're referring to. To get the link to the specific comment do not copy the url from the location bar of your browser, but instead, click the `...` icon in the upper right corner of the comment and then select "Copy Link". For example the first link is a link to an issue, and the second to a specific comment in the same issue: 1. https://github.com/huggingface/transformers/issues/9257 2. https://github.com/huggingface/transformers/issues/9257#issuecomment-749945162 13. If you are replying to a last comment, it's totally fine to make your reply with just your comment in it. The readers can follow the information flow here. But if you're replying to a comment that happened some comments back it's always a good practice to quote just the relevant lines you're replying it. The `>` is used for quoting, or you can always use the menu to do so. For example your editor box will look like: ``` > How big is your gpu cluster? Our cluster is made of 256 gpus. ``` If you are addressing multiple comments, quote the relevant parts of each before your answer. Some people use the same comment to do multiple replies, others separate them into separate comments. Either way works. The latter approach helps for linking to a specific comment. In general the best way to figure out what works the best is learn from issues posted by other people - see which issues get great responses and which get little to no response - observe what the posters who received great responses did differently from those who did not. Thank you for reading this somewhat lengthy document. We would like to conclude that these are not absolute rules, but a friendly advice that will help maximize the chances for us to understand what you are trying to communicate, reproduce the problem then resolve it to your satisfaction and the benefit of the whole community. If after reading this document there are remaining questions on how and why or there is a need for further elucidation, please, don't hesitate to ask your question in [this thread](https://discuss.huggingface.co/t/how-to-request-support/3128).
transformers/ISSUES.md/0
{ "file_path": "transformers/ISSUES.md", "repo_id": "transformers", "token_count": 4684 }
217
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # `pip install -e '.[dev]'` when switching between checkouts and running tests. git_repo_path = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_configure(config): config.addinivalue_line( "markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested") config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment") config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate") config.addinivalue_line("markers", "tool_tests: mark the tool tests that are run on their specific schedule") def pytest_addoption(parser): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): from transformers.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(terminalreporter, id=make_reports) def pytest_sessionfinish(session, exitstatus): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: session.exitstatus = 0 # Doctest custom flag to ignore output. IGNORE_RESULT = doctest.register_optionflag("IGNORE_RESULT") OutputChecker = doctest.OutputChecker class CustomOutputChecker(OutputChecker): def check_output(self, want, got, optionflags): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, want, got, optionflags) doctest.OutputChecker = CustomOutputChecker _pytest.doctest.DoctestModule = HfDoctestModule doctest.DocTestParser = HfDocTestParser
transformers/conftest.py/0
{ "file_path": "transformers/conftest.py", "repo_id": "transformers", "token_count": 994 }
218
### Translating the Transformers documentation into your language As part of our mission to democratize machine learning, we'd love to make the Transformers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language 🙏. **🗞️ Open an issue** To get started, navigate to the [Issues](https://github.com/huggingface/transformers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "Translation template" from the "New issue" button. Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list. **🍴 Fork the repository** First, you'll need to [fork the Transformers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page. Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows: ```bash git clone https://github.com/YOUR-USERNAME/transformers.git ``` **📋 Copy-paste the English version with a new language code** The documentation files are in one leading directory: - [`docs/source`](https://github.com/huggingface/transformers/tree/main/docs/source): All the documentation materials are organized here by language. You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/transformers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following: ```bash cd ~/path/to/transformers/docs cp -r source/en source/LANG-ID ``` Here, `LANG-ID` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table. **✍️ Start translating** The fun part comes - translating the text! The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website. > 🙋 If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/LANG-ID/` directory! The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/transformers/blob/main/docs/source/en/_toctree.yml): ```yaml - sections: - local: pipeline_tutorial # Do not change this! Use the same name for your .md file title: Pipelines for inference # Translate this! ... title: Tutorials # Translate this! ``` Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter. > 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/transformers/issues) and tag @stevhliu and @MKhalusova.
transformers/docs/TRANSLATING.md/0
{ "file_path": "transformers/docs/TRANSLATING.md", "repo_id": "transformers", "token_count": 948 }
219
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Vorverarbeiten [[open-in-colab]] Bevor Sie Ihre Daten in einem Modell verwenden können, müssen die Daten in ein für das Modell akzeptables Format gebracht werden. Ein Modell versteht keine Rohtexte, Bilder oder Audiodaten. Diese Eingaben müssen in Zahlen umgewandelt und zu Tensoren zusammengesetzt werden. In dieser Anleitung werden Sie: * Textdaten mit einem Tokenizer vorverarbeiten. * Bild- oder Audiodaten mit einem Feature Extractor vorverarbeiten. * Daten für eine multimodale Aufgabe mit einem Prozessor vorverarbeiten. ## NLP <Youtube id="Yffk5aydLzg"/> Das wichtigste Werkzeug zur Verarbeitung von Textdaten ist ein [Tokenizer](main_classes/tokenizer). Ein Tokenizer zerlegt Text zunächst nach einer Reihe von Regeln in *Token*. Die Token werden in Zahlen umgewandelt, die zum Aufbau von Tensoren als Eingabe für ein Modell verwendet werden. Alle zusätzlichen Eingaben, die ein Modell benötigt, werden ebenfalls vom Tokenizer hinzugefügt. <Tip> Wenn Sie ein vortrainiertes Modell verwenden möchten, ist es wichtig, den zugehörigen vortrainierten Tokenizer zu verwenden. Dadurch wird sichergestellt, dass der Text auf die gleiche Weise aufgeteilt wird wie das Pretraining-Korpus und die gleichen entsprechenden Token-zu-Index (in der Regel als *vocab* bezeichnet) während des Pretrainings verwendet werden. </Tip> Laden Sie einen vortrainierten Tokenizer mit der Klasse [AutoTokenizer], um schnell loszulegen. Damit wird das *vocab* heruntergeladen, das verwendet wird, wenn ein Modell vortrainiert wird. ### Tokenize Laden Sie einen vortrainierten Tokenizer mit [`AutoTokenizer.from_pretrained`]: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") ``` Dann übergeben Sie Ihren Satz an den Tokenizer: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Der Tokenizer gibt ein Wörterbuch mit drei wichtigen Elementen zurück: * [input_ids](glossary#input-ids) sind die Indizes, die den einzelnen Token im Satz entsprechen. * [attention_mask](glossary#attention-mask) gibt an, ob ein Token beachtet werden soll oder nicht. * [token_type_ids](glossary#token-type-ids) gibt an, zu welcher Sequenz ein Token gehört, wenn es mehr als eine Sequenz gibt. Sie können die `input_ids` dekodieren, um die ursprüngliche Eingabe zurückzugeben: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` Wie Sie sehen können, hat der Tokenisierer zwei spezielle Token - `CLS` und `SEP` (Klassifikator und Separator) - zum Satz hinzugefügt. Nicht alle Modelle benötigen spezielle Token, aber wenn dies der Fall ist, fügt der Tokenisierer sie automatisch für Sie hinzu. Wenn Sie mehrere Sätze verarbeiten wollen, übergeben Sie die Sätze als Liste an den Tokenizer: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### Pad Dies bringt uns zu einem wichtigen Thema. Wenn Sie einen Haufen von Sätzen verarbeiten, sind diese nicht immer gleich lang. Das ist ein Problem, weil Tensoren, die Eingabe für das Modell, eine einheitliche Form haben müssen. Padding ist eine Strategie, die sicherstellt, dass Tensoren rechteckig sind, indem ein spezielles *Padding-Token* zu Sätzen mit weniger Token hinzugefügt wird. Setzen Sie den Parameter "padding" auf "true", um die kürzeren Sequenzen im Stapel so aufzufüllen, dass sie der längsten Sequenz entsprechen: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` Beachten Sie, dass der Tokenizer den ersten und den dritten Satz mit einer "0" aufgefüllt hat, weil sie kürzer sind! ### Kürzung Auf der anderen Seite des Spektrums kann es vorkommen, dass eine Sequenz zu lang für ein Modell ist. In diesem Fall müssen Sie die Sequenz auf eine kürzere Länge kürzen. Setzen Sie den Parameter "truncation" auf "true", um eine Sequenz auf die vom Modell akzeptierte Höchstlänge zu kürzen: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` ### Tensoren erstellen Schließlich möchten Sie, dass der Tokenizer die tatsächlichen Tensoren zurückgibt, die dem Modell zugeführt werden. Setzen Sie den Parameter `return_tensors` entweder auf `pt` für PyTorch, oder `tf` für TensorFlow: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> ## Audio Audioeingaben werden anders vorverarbeitet als Texteingaben, aber das Endziel bleibt dasselbe: numerische Sequenzen zu erstellen, die das Modell verstehen kann. Ein [feature extractor](main_classes/feature_extractor) dient dem ausdrücklichen Zweck, Merkmale aus Rohbild- oder Audiodaten zu extrahieren und in Tensoren zu konvertieren. Bevor Sie beginnen, installieren Sie 🤗 Datasets, um einen Audio-Datensatz zu laden, mit dem Sie experimentieren können: ```bash pip install datasets ``` Laden Sie den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz (weitere Informationen zum Laden eines Datensatzes finden Sie im 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub)): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` Greifen Sie auf das erste Element der `audio`-Spalte zu, um einen Blick auf die Eingabe zu werfen. Durch den Aufruf der Spalte "audio" wird die Audiodatei automatisch geladen und neu gesampelt: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` Dies gibt drei Elemente zurück: * "array" ist das Sprachsignal, das als 1D-Array geladen - und möglicherweise neu gesampelt - wurde. * Pfad" zeigt auf den Speicherort der Audiodatei. * `sampling_rate` bezieht sich darauf, wie viele Datenpunkte im Sprachsignal pro Sekunde gemessen werden. ### Resample Für dieses Tutorial werden Sie das Modell [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) verwenden. Wie Sie aus der Modellkarte ersehen können, ist das Wav2Vec2-Modell auf 16kHz abgetastetes Sprachaudio vortrainiert. Es ist wichtig, dass die Abtastrate Ihrer Audiodaten mit der Abtastrate des Datensatzes übereinstimmt, der für das Pre-Training des Modells verwendet wurde. Wenn die Abtastrate Ihrer Daten nicht dieselbe ist, müssen Sie Ihre Audiodaten neu abtasten. Der Datensatz [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) hat zum Beispiel eine Abtastrate von 8000 kHz. Um das Wav2Vec2-Modell mit diesem Datensatz verwenden zu können, müssen Sie die Abtastrate auf 16 kHz erhöhen: ```py >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` 1. Verwenden Sie die Methode [~datasets.Dataset.cast_column] von 🤗 Datasets, um die Abtastrate auf 16kHz zu erhöhen: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. Laden Sie die Audiodatei: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` Wie Sie sehen können, ist die Abtastrate jetzt 16kHz! ### Merkmalsextraktor Der nächste Schritt ist das Laden eines Merkmalsextraktors, um die Eingabe zu normalisieren und aufzufüllen. Beim Auffüllen von Textdaten wird für kürzere Sequenzen ein `0` hinzugefügt. Die gleiche Idee gilt für Audiodaten, und der Audio-Feature-Extraktor fügt eine `0` - interpretiert als Stille - zu `array` hinzu. Laden Sie den Merkmalsextraktor mit [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` Übergeben Sie das Audio-"Array" an den Feature-Extraktor. Wir empfehlen auch, das Argument `sampling_rate` im Feature Extractor hinzuzufügen, um eventuell auftretende stille Fehler besser zu beheben. ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` ### Auffüllen und Kürzen Genau wie beim Tokenizer können Sie variable Sequenzen in einem Stapel durch Auffüllen oder Abschneiden behandeln. Werfen Sie einen Blick auf die Sequenzlänge dieser beiden Audiobeispiele: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` Wie Sie sehen können, hat das erste Beispiel eine längere Sequenz als das zweite Beispiel. Lassen Sie uns eine Funktion erstellen, die den Datensatz vorverarbeitet. Geben Sie eine maximale Länge der Probe an, und der Feature-Extraktor wird die Sequenzen entweder auffüllen oder abschneiden, damit sie dieser Länge entsprechen: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` Wenden Sie die Funktion auf die ersten paar Beispiele im Datensatz an: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` Schauen Sie sich nun noch einmal die verarbeiteten Beispiel-Längen an: ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` Die Länge der ersten beiden Beispiele entspricht nun der von Ihnen angegebenen Maximallänge. ## Bildverarbeitung Ein Merkmalsextraktor wird auch verwendet, um Bilder für Bildverarbeitungsaufgaben zu verarbeiten. Auch hier besteht das Ziel darin, das Rohbild in eine Reihe von Tensoren als Eingabe zu konvertieren. Laden wir den [food101](https://huggingface.co/datasets/food101) Datensatz für dieses Tutorial. Verwenden Sie den Parameter 🤗 Datasets `split`, um nur eine kleine Stichprobe aus dem Trainingssplit zu laden, da der Datensatz recht groß ist: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild] (https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) an: ```py >>> dataset[0]["image"] ``` ![vision-preprocess-tutorial.png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png) ### Merkmalsextraktor Laden Sie den Merkmalsextraktor mit [`AutoImageProcessor.from_pretrained`]: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ### Datenerweiterung Bei Bildverarbeitungsaufgaben ist es üblich, den Bildern als Teil der Vorverarbeitung eine Art von Datenerweiterung hinzuzufügen. Sie können Erweiterungen mit jeder beliebigen Bibliothek hinzufügen, aber in diesem Tutorial werden Sie das Modul [`transforms`](https://pytorch.org/vision/stable/transforms.html) von torchvision verwenden. 1. Normalisieren Sie das Bild und verwenden Sie [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html), um einige Transformationen - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) und [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) - miteinander zu verknüpfen: ```py >>> from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor >>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) >>> _transforms = Compose( ... [RandomResizedCrop(image_processor.size["height"]), ColorJitter(brightness=0.5, hue=0.5), ToTensor(), normalize] ... ) ``` 2. Das Modell akzeptiert [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) als Eingabe. Dieser Wert wird vom Merkmalsextraktor erzeugt. Erstellen Sie eine Funktion, die `pixel_values` aus den Transformationen erzeugt: ```py >>> def transforms(examples): ... examples["pixel_values"] = [_transforms(image.convert("RGB")) for image in examples["image"]] ... return examples ``` 3. Dann verwenden Sie 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform), um die Transformationen im laufenden Betrieb anzuwenden: ```py >>> dataset.set_transform(transforms) ``` 4. Wenn Sie nun auf das Bild zugreifen, werden Sie feststellen, dass der Feature Extractor die Modelleingabe "pixel_values" hinzugefügt hat: ```py >>> dataset[0]["image"] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F1A7B0630D0>, 'label': 6, 'pixel_values': tensor([[[ 0.0353, 0.0745, 0.1216, ..., -0.9922, -0.9922, -0.9922], [-0.0196, 0.0667, 0.1294, ..., -0.9765, -0.9843, -0.9922], [ 0.0196, 0.0824, 0.1137, ..., -0.9765, -0.9686, -0.8667], ..., [ 0.0275, 0.0745, 0.0510, ..., -0.1137, -0.1216, -0.0824], [ 0.0667, 0.0824, 0.0667, ..., -0.0588, -0.0745, -0.0980], [ 0.0353, 0.0353, 0.0431, ..., -0.0039, -0.0039, -0.0588]], [[ 0.2078, 0.2471, 0.2863, ..., -0.9451, -0.9373, -0.9451], [ 0.1608, 0.2471, 0.3098, ..., -0.9373, -0.9451, -0.9373], [ 0.2078, 0.2706, 0.3020, ..., -0.9608, -0.9373, -0.8275], ..., [-0.0353, 0.0118, -0.0039, ..., -0.2392, -0.2471, -0.2078], [ 0.0196, 0.0353, 0.0196, ..., -0.1843, -0.2000, -0.2235], [-0.0118, -0.0039, -0.0039, ..., -0.0980, -0.0980, -0.1529]], [[ 0.3961, 0.4431, 0.4980, ..., -0.9216, -0.9137, -0.9216], [ 0.3569, 0.4510, 0.5216, ..., -0.9059, -0.9137, -0.9137], [ 0.4118, 0.4745, 0.5216, ..., -0.9137, -0.8902, -0.7804], ..., [-0.2314, -0.1922, -0.2078, ..., -0.4196, -0.4275, -0.3882], [-0.1843, -0.1686, -0.2000, ..., -0.3647, -0.3804, -0.4039], [-0.1922, -0.1922, -0.1922, ..., -0.2941, -0.2863, -0.3412]]])} ``` Hier sehen Sie, wie das Bild nach der Vorverarbeitung aussieht. Wie von den angewandten Transformationen zu erwarten, wurde das Bild willkürlich beschnitten und seine Farbeigenschaften sind anders. ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` ![preprocessed_image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png) ## Multimodal Für multimodale Aufgaben werden Sie eine Kombination aus allem, was Sie bisher gelernt haben, verwenden und Ihre Fähigkeiten auf eine Aufgabe der automatischen Spracherkennung (ASR) anwenden. Dies bedeutet, dass Sie einen: * Feature Extractor zur Vorverarbeitung der Audiodaten. * Tokenizer, um den Text zu verarbeiten. Kehren wir zum [LJ Speech](https://huggingface.co/datasets/lj_speech) Datensatz zurück: ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` Da Sie hauptsächlich an den Spalten "Audio" und "Text" interessiert sind, entfernen Sie die anderen Spalten: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` Schauen Sie sich nun die Spalten "Audio" und "Text" an: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` Erinnern Sie sich an den früheren Abschnitt über die Verarbeitung von Audiodaten: Sie sollten immer die Abtastrate Ihrer Audiodaten [resample](preprocessing#audio), damit sie mit der Abtastrate des Datensatzes übereinstimmt, der für das Vortraining eines Modells verwendet wird: ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` ### Prozessor Ein Processor kombiniert einen Feature-Extraktor und einen Tokenizer. Laden Sie einen Processor mit [`AutoProcessor.from_pretrained]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. Erstellen Sie eine Funktion, die die Audiodaten zu `input_values` verarbeitet und den Text zu `labels` tokenisiert. Dies sind Ihre Eingaben für das Modell: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. Wenden Sie die Funktion "prepare_dataset" auf ein Beispiel an: ```py >>> prepare_dataset(lj_speech[0]) ``` Beachten Sie, dass der Processor `input_values` und `labels` hinzugefügt hat. Auch die Abtastrate wurde korrekt auf 16kHz heruntergerechnet. Toll, Sie sollten jetzt in der Lage sein, Daten für jede Modalität vorzuverarbeiten und sogar verschiedene Modalitäten zu kombinieren! Im nächsten Kurs lernen Sie, wie Sie ein Modell mit Ihren neu aufbereiteten Daten feinabstimmen können.
transformers/docs/source/de/preprocessing.md/0
{ "file_path": "transformers/docs/source/de/preprocessing.md", "repo_id": "transformers", "token_count": 10554 }
220
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERTology There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT (that some call "BERTology"). Some good examples of this field are: - BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick: https://arxiv.org/abs/1905.05950 - Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650 - What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: https://arxiv.org/abs/1906.04341 - CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://arxiv.org/abs/2210.04633 In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted from the great work of Paul Michel (https://arxiv.org/abs/1905.10650): - accessing all the hidden-states of BERT/GPT/GPT-2, - accessing all the attention weights for each head of BERT/GPT/GPT-2, - retrieving heads output values and gradients to be able to compute head importance score and prune head as explained in https://arxiv.org/abs/1905.10650. To help you understand and use these features, we have added a specific example script: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) while extract information and prune a model pre-trained on GLUE.
transformers/docs/source/en/bertology.md/0
{ "file_path": "transformers/docs/source/en/bertology.md", "repo_id": "transformers", "token_count": 640 }
221
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Trainer The [`Trainer`] class provides an API for feature-complete training in PyTorch, and it supports distributed training on multiple GPUs/TPUs, mixed precision for [NVIDIA GPUs](https://nvidia.github.io/apex/), [AMD GPUs](https://rocm.docs.amd.com/en/latest/rocm.html), and [`torch.amp`](https://pytorch.org/docs/stable/amp.html) for PyTorch. [`Trainer`] goes hand-in-hand with the [`TrainingArguments`] class, which offers a wide range of options to customize how a model is trained. Together, these two classes provide a complete training API. [`Seq2SeqTrainer`] and [`Seq2SeqTrainingArguments`] inherit from the [`Trainer`] and [`TrainingArgument`] classes and they're adapted for training models for sequence-to-sequence tasks such as summarization or translation. <Tip warning={true}> The [`Trainer`] class is optimized for 🤗 Transformers models and can have surprising behaviors when used with other models. When using it with your own model, make sure: - your model always return tuples or subclasses of [`~utils.ModelOutput`] - your model can compute the loss if a `labels` argument is provided and that loss is returned as the first element of the tuple (if your model returns tuples) - your model can accept multiple label arguments (use `label_names` in [`TrainingArguments`] to indicate their name to the [`Trainer`]) but none of them should be named `"label"` </Tip> ## Trainer[[api-reference]] [[autodoc]] Trainer - all ## Seq2SeqTrainer [[autodoc]] Seq2SeqTrainer - evaluate - predict ## TrainingArguments [[autodoc]] TrainingArguments - all ## Seq2SeqTrainingArguments [[autodoc]] Seq2SeqTrainingArguments - all
transformers/docs/source/en/main_classes/trainer.md/0
{ "file_path": "transformers/docs/source/en/main_classes/trainer.md", "repo_id": "transformers", "token_count": 689 }
222
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BigBird ## Overview The BigBird model was proposed in [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Zaheer, Manzil and Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a sparse-attention based transformer which extends Transformer based models, such as BERT to much longer sequences. In addition to sparse attention, BigBird also applies global attention as well as random attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and random attention approximates full attention, while being computationally much more efficient for longer sequences. As a consequence of the capability to handle longer context, BigBird has shown improved performance on various long document NLP tasks, such as question answering and summarization, compared to BERT or RoBERTa. The abstract from the paper is the following: *Transformers-based models, such as BERT, have been one of the most successful deep learning models for NLP. Unfortunately, one of their core limitations is the quadratic dependency (mainly in terms of memory) on the sequence length due to their full attention mechanism. To remedy this, we propose, BigBird, a sparse attention mechanism that reduces this quadratic dependency to linear. We show that BigBird is a universal approximator of sequence functions and is Turing complete, thereby preserving these properties of the quadratic, full attention model. Along the way, our theoretical analysis reveals some of the benefits of having O(1) global tokens (such as CLS), that attend to the entire sequence as part of the sparse attention mechanism. The proposed sparse attention can handle sequences of length up to 8x of what was previously possible using similar hardware. As a consequence of the capability to handle longer context, BigBird drastically improves performance on various NLP tasks such as question answering and summarization. We also propose novel applications to genomics data.* This model was contributed by [vasudevgupta](https://huggingface.co/vasudevgupta). The original code can be found [here](https://github.com/google-research/bigbird). ## Usage tips - For an in-detail explanation on how BigBird's attention works, see [this blog post](https://huggingface.co/blog/big-bird). - BigBird comes with 2 implementations: **original_full** & **block_sparse**. For the sequence length < 1024, using **original_full** is advised as there is no benefit in using **block_sparse** attention. - The code currently uses window size of 3 blocks and 2 global blocks. - Sequence length must be divisible by block size. - Current implementation supports only **ITC**. - Current implementation doesn't support **num_random_blocks = 0** - BigBird is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## BigBirdConfig [[autodoc]] BigBirdConfig ## BigBirdTokenizer [[autodoc]] BigBirdTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## BigBirdTokenizerFast [[autodoc]] BigBirdTokenizerFast ## BigBird specific outputs [[autodoc]] models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput <frameworkcontent> <pt> ## BigBirdModel [[autodoc]] BigBirdModel - forward ## BigBirdForPreTraining [[autodoc]] BigBirdForPreTraining - forward ## BigBirdForCausalLM [[autodoc]] BigBirdForCausalLM - forward ## BigBirdForMaskedLM [[autodoc]] BigBirdForMaskedLM - forward ## BigBirdForSequenceClassification [[autodoc]] BigBirdForSequenceClassification - forward ## BigBirdForMultipleChoice [[autodoc]] BigBirdForMultipleChoice - forward ## BigBirdForTokenClassification [[autodoc]] BigBirdForTokenClassification - forward ## BigBirdForQuestionAnswering [[autodoc]] BigBirdForQuestionAnswering - forward </pt> <jax> ## FlaxBigBirdModel [[autodoc]] FlaxBigBirdModel - __call__ ## FlaxBigBirdForPreTraining [[autodoc]] FlaxBigBirdForPreTraining - __call__ ## FlaxBigBirdForCausalLM [[autodoc]] FlaxBigBirdForCausalLM - __call__ ## FlaxBigBirdForMaskedLM [[autodoc]] FlaxBigBirdForMaskedLM - __call__ ## FlaxBigBirdForSequenceClassification [[autodoc]] FlaxBigBirdForSequenceClassification - __call__ ## FlaxBigBirdForMultipleChoice [[autodoc]] FlaxBigBirdForMultipleChoice - __call__ ## FlaxBigBirdForTokenClassification [[autodoc]] FlaxBigBirdForTokenClassification - __call__ ## FlaxBigBirdForQuestionAnswering [[autodoc]] FlaxBigBirdForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/big_bird.md/0
{ "file_path": "transformers/docs/source/en/model_doc/big_bird.md", "repo_id": "transformers", "token_count": 1682 }
223
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeBERTa ## Overview The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google's BERT model released in 2018 and Facebook's RoBERTa model released in 2019. It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa. The abstract from the paper is the following: *Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.* This model was contributed by [DeBERTa](https://huggingface.co/DeBERTa). This model TF 2.0 implementation was contributed by [kamalkraj](https://huggingface.co/kamalkraj) . The original code can be found [here](https://github.com/microsoft/DeBERTa). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DeBERTa. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A blog post on how to [Accelerate Large Model Training using DeepSpeed](https://huggingface.co/blog/accelerate-deepspeed) with DeBERTa. - A blog post on [Supercharged Customer Service with Machine Learning](https://huggingface.co/blog/supercharge-customer-service-with-machine-learning) with DeBERTa. - [`DebertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb). - [`TFDebertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb). - [Text classification task guide](../tasks/sequence_classification) <PipelineTag pipeline="token-classification" /> - [`DebertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb). - [`TFDebertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). - [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course. - [Byte-Pair Encoding tokenization](https://huggingface.co/course/chapter6/5?fw=pt) chapter of the 🤗 Hugging Face Course. - [Token classification task guide](../tasks/token_classification) <PipelineTag pipeline="fill-mask"/> - [`DebertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFDebertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course. - [Masked language modeling task guide](../tasks/masked_language_modeling) <PipelineTag pipeline="question-answering"/> - [`DebertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb). - [`TFDebertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb). - [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course. - [Question answering task guide](../tasks/question_answering) ## DebertaConfig [[autodoc]] DebertaConfig ## DebertaTokenizer [[autodoc]] DebertaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## DebertaTokenizerFast [[autodoc]] DebertaTokenizerFast - build_inputs_with_special_tokens - create_token_type_ids_from_sequences <frameworkcontent> <pt> ## DebertaModel [[autodoc]] DebertaModel - forward ## DebertaPreTrainedModel [[autodoc]] DebertaPreTrainedModel ## DebertaForMaskedLM [[autodoc]] DebertaForMaskedLM - forward ## DebertaForSequenceClassification [[autodoc]] DebertaForSequenceClassification - forward ## DebertaForTokenClassification [[autodoc]] DebertaForTokenClassification - forward ## DebertaForQuestionAnswering [[autodoc]] DebertaForQuestionAnswering - forward </pt> <tf> ## TFDebertaModel [[autodoc]] TFDebertaModel - call ## TFDebertaPreTrainedModel [[autodoc]] TFDebertaPreTrainedModel - call ## TFDebertaForMaskedLM [[autodoc]] TFDebertaForMaskedLM - call ## TFDebertaForSequenceClassification [[autodoc]] TFDebertaForSequenceClassification - call ## TFDebertaForTokenClassification [[autodoc]] TFDebertaForTokenClassification - call ## TFDebertaForQuestionAnswering [[autodoc]] TFDebertaForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/deberta.md/0
{ "file_path": "transformers/docs/source/en/model_doc/deberta.md", "repo_id": "transformers", "token_count": 2499 }
224
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # EfficientFormer ## Overview The EfficientFormer model was proposed in [EfficientFormer: Vision Transformers at MobileNet Speed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Eric Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. EfficientFormer proposes a dimension-consistent pure transformer that can be run on mobile devices for dense prediction tasks like image classification, object detection and semantic segmentation. The abstract from the paper is the following: *Vision Transformers (ViT) have shown rapid progress in computer vision tasks, achieving promising results on various benchmarks. However, due to the massive number of parameters and model design, e.g., attention mechanism, ViT-based models are generally times slower than lightweight convolutional networks. Therefore, the deployment of ViT for real-time applications is particularly challenging, especially on resource-constrained hardware such as mobile devices. Recent efforts try to reduce the computation complexity of ViT through network architecture search or hybrid design with MobileNet block, yet the inference speed is still unsatisfactory. This leads to an important question: can transformers run as fast as MobileNet while obtaining high performance? To answer this, we first revisit the network architecture and operators used in ViT-based models and identify inefficient designs. Then we introduce a dimension-consistent pure transformer (without MobileNet blocks) as a design paradigm. Finally, we perform latency-driven slimming to get a series of final models dubbed EfficientFormer. Extensive experiments show the superiority of EfficientFormer in performance and speed on mobile devices. Our fastest model, EfficientFormer-L1, achieves 79.2% top-1 accuracy on ImageNet-1K with only 1.6 ms inference latency on iPhone 12 (compiled with CoreML), which { runs as fast as MobileNetV2×1.4 (1.6 ms, 74.7% top-1),} and our largest model, EfficientFormer-L7, obtains 83.3% accuracy with only 7.0 ms latency. Our work proves that properly designed transformers can reach extremely low latency on mobile devices while maintaining high performance.* This model was contributed by [novice03](https://huggingface.co/novice03) and [Bearnardd](https://huggingface.co/Bearnardd). The original code can be found [here](https://github.com/snap-research/EfficientFormer). The TensorFlow version of this model was added by [D-Roberts](https://huggingface.co/D-Roberts). ## Documentation resources - [Image classification task guide](../tasks/image_classification) ## EfficientFormerConfig [[autodoc]] EfficientFormerConfig ## EfficientFormerImageProcessor [[autodoc]] EfficientFormerImageProcessor - preprocess <frameworkcontent> <pt> ## EfficientFormerModel [[autodoc]] EfficientFormerModel - forward ## EfficientFormerForImageClassification [[autodoc]] EfficientFormerForImageClassification - forward ## EfficientFormerForImageClassificationWithTeacher [[autodoc]] EfficientFormerForImageClassificationWithTeacher - forward </pt> <tf> ## TFEfficientFormerModel [[autodoc]] TFEfficientFormerModel - call ## TFEfficientFormerForImageClassification [[autodoc]] TFEfficientFormerForImageClassification - call ## TFEfficientFormerForImageClassificationWithTeacher [[autodoc]] TFEfficientFormerForImageClassificationWithTeacher - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/efficientformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/efficientformer.md", "repo_id": "transformers", "token_count": 1075 }
225