text
stringlengths 7
318k
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
439
|
---|---|---|---|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImg2ImgPipeline,
UNet2DConditionModel,
)
from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel
from diffusers.utils import load_image
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_numpy,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class ControlNetImg2ImgPipelineFastTests(
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
):
pipeline_class = StableDiffusionControlNetImg2ImgPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"})
image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(4, 8),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
norm_num_groups=1,
)
torch.manual_seed(0)
controlnet = ControlNetModel(
block_out_channels=(4, 8),
layers_per_block=2,
in_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
cross_attention_dim=32,
conditioning_embedding_out_channels=(16, 32),
norm_num_groups=1,
)
torch.manual_seed(0)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[4, 8],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
norm_num_groups=2,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
controlnet_embedder_scale_factor = 2
control_image = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
generator=generator,
device=torch.device(device),
)
image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def test_attention_slicing_forward_pass(self):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class StableDiffusionMultiControlNetPipelineFastTests(
PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
):
pipeline_class = StableDiffusionControlNetImg2ImgPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(4, 8),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
norm_num_groups=1,
)
torch.manual_seed(0)
def init_weights(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
controlnet1 = ControlNetModel(
block_out_channels=(4, 8),
layers_per_block=2,
in_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
cross_attention_dim=32,
conditioning_embedding_out_channels=(16, 32),
norm_num_groups=1,
)
controlnet1.controlnet_down_blocks.apply(init_weights)
torch.manual_seed(0)
controlnet2 = ControlNetModel(
block_out_channels=(4, 8),
layers_per_block=2,
in_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
cross_attention_dim=32,
conditioning_embedding_out_channels=(16, 32),
norm_num_groups=1,
)
controlnet2.controlnet_down_blocks.apply(init_weights)
torch.manual_seed(0)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[4, 8],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
norm_num_groups=2,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
controlnet = MultiControlNetModel([controlnet1, controlnet2])
components = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
controlnet_embedder_scale_factor = 2
control_image = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
generator=generator,
device=torch.device(device),
),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
generator=generator,
device=torch.device(device),
),
]
image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def test_control_guidance_switch(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
scale = 10.0
steps = 4
inputs = self.get_dummy_inputs(torch_device)
inputs["num_inference_steps"] = steps
inputs["controlnet_conditioning_scale"] = scale
output_1 = pipe(**inputs)[0]
inputs = self.get_dummy_inputs(torch_device)
inputs["num_inference_steps"] = steps
inputs["controlnet_conditioning_scale"] = scale
output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0]
inputs = self.get_dummy_inputs(torch_device)
inputs["num_inference_steps"] = steps
inputs["controlnet_conditioning_scale"] = scale
output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0]
inputs = self.get_dummy_inputs(torch_device)
inputs["num_inference_steps"] = steps
inputs["controlnet_conditioning_scale"] = scale
output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_1 - output_2)) > 1e-3
assert np.sum(np.abs(output_1 - output_3)) > 1e-3
assert np.sum(np.abs(output_1 - output_4)) > 1e-3
def test_attention_slicing_forward_pass(self):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def test_save_pretrained_raise_not_implemented_exception(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(tmpdir)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_canny(self):
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "evil space-punk bird"
control_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
).resize((512, 512))
image = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png"
).resize((512, 512))
output = pipe(
prompt,
image,
control_image=control_image,
generator=generator,
output_type="np",
num_inference_steps=50,
strength=0.6,
)
image = output.images[0]
assert image.shape == (512, 512, 3)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy"
)
assert np.abs(expected_image - image).max() < 9e-2
def test_load_local(self):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
controlnet = ControlNetModel.from_single_file(
"https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
)
pipe_sf = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
"https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors",
safety_checker=None,
controlnet=controlnet,
scheduler_type="pndm",
)
pipe_sf.unet.set_default_attn_processor()
pipe_sf.enable_model_cpu_offload()
control_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
).resize((512, 512))
image = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png"
).resize((512, 512))
prompt = "bird"
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt,
image=image,
control_image=control_image,
strength=0.9,
generator=generator,
output_type="np",
num_inference_steps=3,
).images[0]
generator = torch.Generator(device="cpu").manual_seed(0)
output_sf = pipe_sf(
prompt,
image=image,
control_image=control_image,
strength=0.9,
generator=generator,
output_type="np",
num_inference_steps=3,
).images[0]
max_diff = numpy_cosine_similarity_distance(output_sf.flatten(), output.flatten())
assert max_diff < 1e-3
| diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py/0 | {
"file_path": "diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py",
"repo_id": "diffusers",
"token_count": 8247
} | 131 |
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class StableDiffusion2InpaintPipelineFastTests(
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
):
pipeline_class = StableDiffusionInpaintPipeline
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
image_params = frozenset(
[]
) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
image_latents_params = frozenset([])
callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"})
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=9,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
# SD2-specific config below
attention_head_dim=(2, 4),
use_linear_projection=True,
)
scheduler = PNDMScheduler(skip_prk_steps=True)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
# SD2-specific config below
hidden_act="gelu",
projection_dim=512,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64))
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def test_stable_diffusion_inpaint(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionInpaintPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_inpaint_pipeline(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy"
)
model_id = "stabilityai/stable-diffusion-2-inpainting"
pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
generator = torch.manual_seed(0)
output = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
generator=generator,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def test_stable_diffusion_inpaint_pipeline_fp16(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy"
)
model_id = "stabilityai/stable-diffusion-2-inpainting"
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id,
torch_dtype=torch.float16,
safety_checker=None,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
generator = torch.manual_seed(0)
output = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
generator=generator,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
)
model_id = "stabilityai/stable-diffusion-2-inpainting"
pndm = PNDMScheduler.from_pretrained(model_id, subfolder="scheduler")
pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id,
safety_checker=None,
scheduler=pndm,
torch_dtype=torch.float16,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
generator = torch.manual_seed(0)
_ = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
generator=generator,
num_inference_steps=2,
output_type="np",
)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py/0 | {
"file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py",
"repo_id": "diffusers",
"token_count": 4790
} | 132 |
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNet2DConditionModel,
UNet2DModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
skip_mps,
torch_device,
)
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = UnCLIPImageVariationPipeline
params = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
batch_params = IMAGE_VARIATION_BATCH_PARAMS
required_optional_params = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
test_xformers_attention = False
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 100
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=self.text_embedder_hidden_size,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModelWithProjection(config)
@property
def dummy_image_encoder(self):
torch.manual_seed(0)
config = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,
projection_dim=self.text_embedder_hidden_size,
num_hidden_layers=5,
num_attention_heads=4,
image_size=32,
intermediate_size=37,
patch_size=1,
)
return CLIPVisionModelWithProjection(config)
@property
def dummy_text_proj(self):
torch.manual_seed(0)
model_kwargs = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
model = UnCLIPTextProjModel(**model_kwargs)
return model
@property
def dummy_decoder(self):
torch.manual_seed(0)
model_kwargs = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_super_res_kwargs(self):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def dummy_super_res_first(self):
torch.manual_seed(0)
model = UNet2DModel(**self.dummy_super_res_kwargs)
return model
@property
def dummy_super_res_last(self):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1)
model = UNet2DModel(**self.dummy_super_res_kwargs)
return model
def get_dummy_components(self):
decoder = self.dummy_decoder
text_proj = self.dummy_text_proj
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
super_res_first = self.dummy_super_res_first
super_res_last = self.dummy_super_res_last
decoder_scheduler = UnCLIPScheduler(
variance_type="learned_range",
prediction_type="epsilon",
num_train_timesteps=1000,
)
super_res_scheduler = UnCLIPScheduler(
variance_type="fixed_small_log",
prediction_type="epsilon",
num_train_timesteps=1000,
)
feature_extractor = CLIPImageProcessor(crop_size=32, size=32)
image_encoder = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def get_dummy_inputs(self, device, seed=0, pil_image=True):
input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
if pil_image:
input_image = input_image * 0.5 + 0.5
input_image = input_image.clamp(0, 1)
input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy()
input_image = DiffusionPipeline.numpy_to_pil(input_image)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def test_unclip_image_variation_input_tensor(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
pipeline_inputs = self.get_dummy_inputs(device, pil_image=False)
output = pipe(**pipeline_inputs)
image = output.images
tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=False)
image_from_tuple = pipe(
**tuple_pipeline_inputs,
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_unclip_image_variation_input_image(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
pipeline_inputs = self.get_dummy_inputs(device, pil_image=True)
output = pipe(**pipeline_inputs)
image = output.images
tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=True)
image_from_tuple = pipe(
**tuple_pipeline_inputs,
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_unclip_image_variation_input_list_images(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
pipeline_inputs = self.get_dummy_inputs(device, pil_image=True)
pipeline_inputs["image"] = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
output = pipe(**pipeline_inputs)
image = output.images
tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=True)
tuple_pipeline_inputs["image"] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
image_from_tuple = pipe(
**tuple_pipeline_inputs,
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
expected_slice = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_unclip_passed_image_embed(self):
device = torch.device("cpu")
class DummyScheduler:
init_noise_sigma = 1
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=device).manual_seed(0)
dtype = pipe.decoder.dtype
batch_size = 1
shape = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
decoder_latents = pipe.prepare_latents(
shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler()
)
shape = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
super_res_latents = pipe.prepare_latents(
shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler()
)
pipeline_inputs = self.get_dummy_inputs(device, pil_image=False)
img_out_1 = pipe(
**pipeline_inputs, decoder_latents=decoder_latents, super_res_latents=super_res_latents
).images
pipeline_inputs = self.get_dummy_inputs(device, pil_image=False)
# Don't pass image, instead pass embedding
image = pipeline_inputs.pop("image")
image_embeddings = pipe.image_encoder(image).image_embeds
img_out_2 = pipe(
**pipeline_inputs,
decoder_latents=decoder_latents,
super_res_latents=super_res_latents,
image_embeddings=image_embeddings,
).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_1 - img_out_2).max() < 1e-4
# Overriding PipelineTesterMixin::test_attention_slicing_forward_pass
# because UnCLIP GPU undeterminism requires a looser check.
@skip_mps
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
expected_max_diff = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=test_max_difference, expected_max_diff=expected_max_diff
)
# Overriding PipelineTesterMixin::test_inference_batch_single_identical
# because UnCLIP undeterminism requires a looser check.
@unittest.skip("UnCLIP produces very large differences. Test is not useful.")
@skip_mps
def test_inference_batch_single_identical(self):
additional_params_copy_to_batched_inputs = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, expected_max_diff=5e-3
)
def test_inference_batch_consistent(self):
additional_params_copy_to_batched_inputs = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
batch_sizes = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=batch_sizes,
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs,
)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs
)
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()
@unittest.skip("UnCLIP produces very large difference. Test is not useful.")
@skip_mps
def test_save_load_local(self):
return super().test_save_load_local(expected_max_difference=4e-3)
@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()
@unittest.skip("UnCLIP produces very large difference in fp16 vs fp32. Test is not useful.")
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1.0)
@nightly
@require_torch_gpu
class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_unclip_image_variation_karlo(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy"
)
pipeline = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipeline(
input_image,
generator=generator,
output_type="np",
)
image = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(image, expected_image, 15)
| diffusers/tests/pipelines/unclip/test_unclip_image_variation.py/0 | {
"file_path": "diffusers/tests/pipelines/unclip/test_unclip_image_variation.py",
"repo_id": "diffusers",
"token_count": 8162
} | 133 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UniPCMultistepSchedulerTest(SchedulerCommonTest):
scheduler_classes = (UniPCMultistepScheduler,)
forward_default_kwargs = (("num_inference_steps", 25),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output, new_output = sample, sample
for t in range(time_step, time_step + scheduler.config.solver_order + 1):
t = scheduler.timesteps[t]
output = scheduler.step(residual, t, output, **kwargs).prev_sample
new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residual (must be after setting timesteps)
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, scheduler=None, **config):
if scheduler is None:
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
time_step_0 = scheduler.timesteps[5]
time_step_1 = scheduler.timesteps[6]
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_switch(self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
scheduler = UniPCMultistepScheduler(**self.get_scheduler_config())
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2464) < 1e-3
scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
scheduler = DEISMultistepScheduler.from_config(scheduler.config)
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2464) < 1e-3
def test_timesteps(self):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
solver_order=order,
solver_type=solver_type,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_solver_order_and_type(self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
)
sample = self.full_loop(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
)
assert not torch.isnan(sample).any(), "Samples have nan numbers"
def test_lower_order_final(self):
self.check_over_configs(lower_order_final=True)
self.check_over_configs(lower_order_final=False)
def test_inference_steps(self):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2464) < 1e-3
def test_full_loop_with_karras(self):
sample = self.full_loop(use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2925) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1014) < 1e-3
def test_full_loop_with_karras_and_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1966) < 1e-3
def test_fp16_support(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter.half()
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
assert sample.dtype == torch.float16
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 8
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 315.5757) < 1e-2, f" expected result sum 315.5757, but get {result_sum}"
assert abs(result_mean.item() - 0.4109) < 1e-3, f" expected result mean 0.4109, but get {result_mean}"
class UniPCMultistepScheduler1DTest(UniPCMultistepSchedulerTest):
@property
def dummy_sample(self):
batch_size = 4
num_channels = 3
width = 8
sample = torch.rand((batch_size, num_channels, width))
return sample
@property
def dummy_noise_deter(self):
batch_size = 4
num_channels = 3
width = 8
num_elems = batch_size * num_channels * width
sample = torch.arange(num_elems).flip(-1)
sample = sample.reshape(num_channels, width, batch_size)
sample = sample / num_elems
sample = sample.permute(2, 0, 1)
return sample
@property
def dummy_sample_deter(self):
batch_size = 4
num_channels = 3
width = 8
num_elems = batch_size * num_channels * width
sample = torch.arange(num_elems)
sample = sample.reshape(num_channels, width, batch_size)
sample = sample / num_elems
sample = sample.permute(2, 0, 1)
return sample
def test_switch(self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
scheduler = UniPCMultistepScheduler(**self.get_scheduler_config())
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2441) < 1e-3
scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
scheduler = DEISMultistepScheduler.from_config(scheduler.config)
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2441) < 1e-3
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2441) < 1e-3
def test_full_loop_with_karras(self):
sample = self.full_loop(use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2898) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1014) < 1e-3
def test_full_loop_with_karras_and_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1944) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 8
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 39.0870) < 1e-2, f" expected result sum 39.0870, but get {result_sum}"
assert abs(result_mean.item() - 0.4072) < 1e-3, f" expected result mean 0.4072, but get {result_mean}"
| diffusers/tests/schedulers/test_scheduler_unipc.py/0 | {
"file_path": "diffusers/tests/schedulers/test_scheduler_unipc.py",
"repo_id": "diffusers",
"token_count": 6988
} | 134 |
# Copyright 2023 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
"""
import os
from datetime import datetime as dt
from datetime import timezone
from github import Github
LABELS_TO_EXEMPT = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/diffusers")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.now(timezone.utc) - issue.updated_at).days > 7
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed")
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.now(timezone.utc) - issue.updated_at).days > 23
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored."
)
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
| diffusers/utils/stale.py/0 | {
"file_path": "diffusers/utils/stale.py",
"repo_id": "diffusers",
"token_count": 1222
} | 135 |
<jupyter_start><jupyter_text>Implรฉmentation ร partir de 0Il est parfois utile de considรฉrer la version la plus simple possible d'une chose pour mieux en comprendre le fonctionnement. C'est ce que nous allons essayer de faire dans ce *notebook*, en commenรงant par un modรจle de diffusion "jouet" pour voir comment les diffรฉrents รฉlรฉments fonctionnent, puis en examinant en quoi ils diffรจrent d'une mise en ลuvre plus complexe.Nous examinerons :- Le processus de corruption (ajouter du bruit aux donnรฉes)- Ce qu'est un UNet, et comment en implรฉmenter un extrรชmement minimal ร partir de zรฉro- L'entraรฎnement au modรจle de diffusion- La thรฉorie de l'รฉchantillonnageEnsuite, nous comparerons nos versions avec l'implรฉmentation DDPM des diffuseurs, en explorant :- Les amรฉliorations par rapport ร notre mini UNet- Le schรฉma de bruit du DDPM- Les diffรฉrences dans l'objectif d'entraรฎnement- Le conditionnement du pas de temps- Les approches d'รฉchantillonnageCe *notebook* est assez approfondi, et peut รชtre sautรฉ en toute sรฉcuritรฉ si vous n'รชtes pas enthousiaste ร l'idรฉe d'une plongรฉe en profondeur ร partir de zรฉro !Il convient รฉgalement de noter que la plupart du code ici est utilisรฉ ร des fins d'illustration, et nous ne recommandons pas de l'adopter directement pour votre propre travail (ร moins que vous n'essayiez d'amรฉliorer les exemples montrรฉs ici ร des fins d'apprentissage). Configuration et importations<jupyter_code>!pip install -q diffusers
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from diffusers import DDPMScheduler, UNet2DModel
from matplotlib import pyplot as plt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'Using device: {device}')<jupyter_output>Using device: cuda<jupyter_text>Les donnรฉesNous allons tester les choses avec un trรจs petit jeu de donnรฉes : MNIST. Si vous souhaitez donner au modรจle un dรฉfi un peu plus difficile ร relever sans rien changer d'autre, `torchvision.datasets.FashionMNIST` devrait faire l'affaire.<jupyter_code>dataset = torchvision.datasets.MNIST(root="mnist/", train=True, download=True, transform=torchvision.transforms.ToTensor())
train_dataloader = DataLoader(dataset, batch_size=8, shuffle=True)
x, y = next(iter(train_dataloader))
print('Input shape:', x.shape)
print('Labels:', y)
plt.imshow(torchvision.utils.make_grid(x)[0], cmap='Greys');<jupyter_output>Input shape: torch.Size([8, 1, 28, 28])
Labels: tensor([1, 9, 7, 3, 5, 2, 1, 4])<jupyter_text>Chaque image est un dessin en niveaux de gris de 28 par 28 pixels d'un chiffre, avec des valeurs allant de 0 ร 1. Le processus de corruptionSupposons que vous n'ayez lu aucun papier sur les modรจles de diffusion, mais que vous sachiez que le processus implique l'ajout de bruit. Comment feriez-vous ?Nous souhaitons probablement disposer d'un moyen simple de contrรดler le degrรฉ de corruption. Et si nous prenions un paramรจtre pour la quantitรฉ de bruit ร ajouter, et que nous le faisions :```noise = torch.rand_like(x)``````noisy_x = (1-amount)*x + amount*noise```Si `amount = 0`, nous rรฉcupรฉrons l'entrรฉe sans aucun changement. Si le montant atteint 1, nous rรฉcupรฉrons du bruit sans aucune trace de l'entrรฉe x. En mรฉlangeant l'entrรฉe avec du bruit de cette faรงon, nous gardons la sortie dans la mรชme plage (0 ร 1).Nous pouvons mettre cela en ลuvre assez facilement (il suffit de surveiller les formes pour ne pas se faire piรฉger par les rรจgles de diffusion) :<jupyter_code>def corrupt(x, amount):
"""Corrompre l'entrรฉe `x` en la mรฉlangeant avec du bruit selon `amount`"""
noise = torch.rand_like(x)
amount = amount.view(-1, 1, 1, 1) # Trier les formes pour que la transmission fonctionne
return x*(1-amount) + noise*amount<jupyter_output><empty_output><jupyter_text>Et regarder les rรฉsultats visuellement pour voir que cela fonctionne comme prรฉvu :<jupyter_code># Tracer les donnรฉes d'entrรฉe
fig, axs = plt.subplots(2, 1, figsize=(12, 5))
axs[0].set_title('Input data')
axs[0].imshow(torchvision.utils.make_grid(x)[0], cmap='Greys')
# Ajouter du bruit
amount = torch.linspace(0, 1, x.shape[0]) # De gauche ร droite -> plus de corruption
noised_x = corrupt(x, amount)
# Tracรฉ de la version bruitรฉe
axs[1].set_title('Corrupted data (-- amount increases -->)')
axs[1].imshow(torchvision.utils.make_grid(noised_x)[0], cmap='Greys')<jupyter_output><empty_output><jupyter_text>Lorsque la quantitรฉ de bruit s'approche de 1, nos donnรฉes commencent ร ressembler ร du bruit alรฉatoire pur. Mais pour la plupart des `noise_amounts`, vous pouvez deviner le chiffre assez bien. Pensez-vous que cela soit optimal ? Le modรจleNous aimerions un modรจle qui prenne en compte des images bruitรฉes de 28px et qui produise une prรฉdiction de la mรชme forme. Un choix populaire ici est une architecture appelรฉe UNet. Inventรฉ ร l'origine pour les tรขches de [segmentation en imagerie mรฉdicale](https://arxiv.org/abs/1505.04597), un UNet se compose d'un "chemin de compression" par lequel les donnรฉes sont comprimรฉes et d'un "chemin d'expansion" par lequel elles s'รฉtendent ร nouveau jusqu'ร la dimension d'origine (similaire ร un autoencodeur), mais il comporte รฉgalement des connexions de saut qui permettent aux informations et aux gradients de circuler ร diffรฉrents niveaux.Certains UNets comportent des blocs complexes ร chaque รฉtape, mais pour cette petite dรฉmonstration, nous construirons un exemple minimal qui prend une image ร un canal et la fait passer par trois couches convolutives sur le chemin descendant (les down_layers dans le diagramme et le code) et trois sur le chemin ascendant, avec des sauts de connexion entre les couches descendantes et ascendantes. Nous utiliserons max pooling pour le downsampling et `nn.Upsample` pour le upsampling plutรดt que de nous appuyer sur des couches apprenantes comme les UNets plus complexes. Voici l'architecture approximative montrant le nombre de canaux dans la sortie de chaque couche : Voici ร quoi cela ressemble dans le code :<jupyter_code>class BasicUNet(nn.Module):
"""Une mise en ลuvre minimale du UNet"""
def __init__(self, in_channels=1, out_channels=1):
super().__init__()
self.down_layers = torch.nn.ModuleList([
nn.Conv2d(in_channels, 32, kernel_size=5, padding=2),
nn.Conv2d(32, 64, kernel_size=5, padding=2),
nn.Conv2d(64, 64, kernel_size=5, padding=2),
])
self.up_layers = torch.nn.ModuleList([
nn.Conv2d(64, 64, kernel_size=5, padding=2),
nn.Conv2d(64, 32, kernel_size=5, padding=2),
nn.Conv2d(32, out_channels, kernel_size=5, padding=2),
])
self.act = nn.SiLU() # La fonction d'activation
self.downscale = nn.MaxPool2d(2)
self.upscale = nn.Upsample(scale_factor=2)
def forward(self, x):
h = []
for i, l in enumerate(self.down_layers):
x = self.act(l(x)) # ร travers la couche et la fonction d'activation
if i < 2: # Pour toutes les couches sauf la troisiรจme (derniรจre) :
h.append(x) # Stockage de la sortie pour la skip connexion
x = self.downscale(x) # Rรฉduction d'รฉchelle pour la couche suivante
for i, l in enumerate(self.up_layers):
if i > 0:
x = self.upscale(x) # Upscale
x += h.pop() # Rรฉcupรฉration d'un rรฉsultat stockรฉ (skip connection)
x = self.act(l(x)) # Par le biais de la couche et de la fonction d'activation
return x<jupyter_output><empty_output><jupyter_text>Nous pouvons vรฉrifier que la forme de la sortie est la mรชme que celle de l'entrรฉe, comme nous nous y attendions :<jupyter_code>net = BasicUNet()
x = torch.rand(8, 1, 28, 28)
net(x).shape<jupyter_output><empty_output><jupyter_text>Ce rรฉseau compte un peu plus de 300 000 paramรจtres :<jupyter_code>sum([p.numel() for p in net.parameters()])<jupyter_output><empty_output><jupyter_text>Vous pouvez envisager de modifier le nombre de canaux dans chaque couche ou d'intervertir les architectures si vous le souhaitez. Entraรฎner le rรฉseauQue doit faire exactement le modรจle ? Lร encore, il y a plusieurs faรงons de procรฉder, mais pour cette dรฉmonstration, choisissons un cadre simple : รฉtant donnรฉ une entrรฉe corrompue noisy_x, le modรจle doit produire sa meilleure estimation de ce ร quoi ressemble l'original x. Nous comparerons cette valeur ร la valeur rรฉelle par le biais de l'erreur quadratique moyenne. Nous comparerons cette estimation ร la valeur rรฉelle par le biais de l'erreur quadratique moyenne.Nous pouvons maintenant entraรฎner le rรฉseau.- Obtenir un batch de donnรฉes- Corrompre les donnรฉes de maniรจre alรฉatoire- Nourrir le modรจle avec ces donnรฉes- Comparer les prรฉdictions du modรจle avec les images propres pour calculer notre perte- Mettre ร jour les paramรจtres du modรจle en consรฉquence.N'hรฉsitez pas ร modifier ce modรจle et ร voir si vous pouvez l'amรฉliorer !<jupyter_code># Chargeur de donnรฉes (vous pouvez modifier la taille des batchs)
batch_size = 128
train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# Combien de fois devrions-nous passer les donnรฉes en revue ?
n_epochs = 3
# Crรฉer le rรฉseau
net = BasicUNet()
net.to(device)
# Notre fonction de perte
loss_fn = nn.MSELoss()
# L'optimiseur
opt = torch.optim.Adam(net.parameters(), lr=1e-3)
# Conserver une trace des pertes pour les consulter ultรฉrieurement
losses = []
# La boucle d'entraรฎnement
for epoch in range(n_epochs):
for x, y in train_dataloader:
# Obtenir des donnรฉes et prรฉparer la version corrompue
x = x.to(device) # Data on the GPU
noise_amount = torch.rand(x.shape[0]).to(device) # Pick random noise amounts
noisy_x = corrupt(x, noise_amount) # Create our noisy x
# Obtenir la prรฉdiction du modรจle
pred = net(noisy_x)
# Calculer la perte
loss = loss_fn(pred, x) # Dans quelle mesure la sortie est-elle proche du vรฉritable x "propre" ?
# Rรฉtropropager et mettre ร jour les paramรจtres
opt.zero_grad()
loss.backward()
opt.step()
# Stocker la perte pour plus tard
losses.append(loss.item())
# Afficher la moyenne des valeurs de perte pour cette รฉpoque :
avg_loss = sum(losses[-len(train_dataloader):])/len(train_dataloader)
print(f'Finished epoch {epoch}. Average loss for this epoch: {avg_loss:05f}')
# Visualiser la courbe des pertes
plt.plot(losses)
plt.ylim(0, 0.1)<jupyter_output>Finished epoch 0. Average loss for this epoch: 0.026736
Finished epoch 1. Average loss for this epoch: 0.020692
Finished epoch 2. Average loss for this epoch: 0.018887<jupyter_text>Nous pouvons essayer de voir ร quoi ressemblent les prรฉdictions du modรจle en saisissant un batch de donnรฉes, en les corrompant ร diffรฉrents degrรฉs et en visualisant ensuite les prรฉdictions du modรจle :<jupyter_code># Rรฉcupรฉrer des donnรฉes
x, y = next(iter(train_dataloader))
x = x[:8] # Seuls les 8 premiers sont utilisรฉs pour faciliter le graphique
# Corruption avec une รฉchelle de montants
amount = torch.linspace(0, 1, x.shape[0]) # De gauche ร droite -> plus de corruption
noised_x = corrupt(x, amount)
# Obtenir les prรฉdictions du modรจle
with torch.no_grad():
preds = net(noised_x.to(device)).detach().cpu()
# Graphique
fig, axs = plt.subplots(3, 1, figsize=(12, 7))
axs[0].set_title('Input data')
axs[0].imshow(torchvision.utils.make_grid(x)[0].clip(0, 1), cmap='Greys')
axs[1].set_title('Corrupted data')
axs[1].imshow(torchvision.utils.make_grid(noised_x)[0].clip(0, 1), cmap='Greys')
axs[2].set_title('Network Predictions')
axs[2].imshow(torchvision.utils.make_grid(preds)[0].clip(0, 1), cmap='Greys');<jupyter_output><empty_output><jupyter_text>Vous pouvez constater que pour les montants les plus faibles, les prรฉdictions sont plutรดt bonnes ! Mais lorsque le niveau devient trรจs รฉlevรฉ, le modรจle a moins d'รฉlรฉments pour travailler, et lorsque nous arrivons ร amount=1, il produit un dรฉsordre flou proche de la moyenne du jeu de donnรฉes pour essayer de couvrir ses paris sur ce ร quoi la sortie pourrait ressembler... รchantillonnageSi nos prรฉdictions ร des niveaux de bruit รฉlevรฉs ne sont pas trรจs bonnes, comment gรฉnรฉrer des images ?Et si nous partions d'un bruit alรฉatoire, que nous regardions les prรฉdictions du modรจle, mais que nous ne nous rapprochions que trรจs peu de cette prรฉdiction (disons, 20 % du chemin). Nous disposons alors d'une image trรจs bruyante dans laquelle il y a peut-รชtre un soupรงon de structure, que nous pouvons introduire dans le modรจle pour obtenir une nouvelle prรฉdiction. Nous espรฉrons que cette nouvelle prรฉdiction est lรฉgรจrement meilleure que la premiรจre (puisque notre point de dรฉpart est lรฉgรจrement moins bruitรฉ) et que nous pouvons donc faire un autre petit pas avec cette nouvelle et meilleure prรฉdiction.Nous rรฉpรฉtons l'opรฉration plusieurs fois et (si tout se passe bien) nous obtenons une image ! Voici ce processus illustrรฉ en seulement 5 รฉtapes, en visualisant l'entrรฉe du modรจle (ร gauche) et les images dรฉbruitรฉes prรฉdites (ร droite) ร chaque รฉtape. Notez que mรชme si le modรจle prรฉdit l'image dรฉbruitรฉe dรจs l'รฉtape 1, nous ne faisons qu'une partie du chemin. Au fil des รฉtapes, les structures apparaissent et sont affinรฉes, jusqu'ร ce que nous obtenions nos rรฉsultats finaux.<jupyter_code>n_steps = 5
x = torch.rand(8, 1, 28, 28).to(device) # Commencer au hasard
step_history = [x.detach().cpu()]
pred_output_history = []
for i in range(n_steps):
with torch.no_grad(): # Pas besoin de suivre les gradients pendant l'infรฉrence
pred = net(x) # Prรฉdire le x0 dรฉbruitรฉ
pred_output_history.append(pred.detach().cpu()) # Stocker les rรฉsultats du modรจle pour les tracer
mix_factor = 1/(n_steps - i) # Dans quelle mesure nous nous rapprochons de la prรฉdiction
x = x*(1-mix_factor) + pred*mix_factor # Dรฉplacer une partie du chemin
step_history.append(x.detach().cpu()) # Stocker l'รฉtape pour le graphique
fig, axs = plt.subplots(n_steps, 2, figsize=(9, 4), sharex=True)
axs[0,0].set_title('x (model input)')
axs[0,1].set_title('model prediction')
for i in range(n_steps):
axs[i, 0].imshow(torchvision.utils.make_grid(step_history[i])[0].clip(0, 1), cmap='Greys')
axs[i, 1].imshow(torchvision.utils.make_grid(pred_output_history[i])[0].clip(0, 1), cmap='Greys')<jupyter_output><empty_output><jupyter_text>Nous pouvons diviser le processus en plusieurs รฉtapes et espรฉrer ainsi obtenir de meilleures images :<jupyter_code>n_steps = 40
x = torch.rand(64, 1, 28, 28).to(device)
for i in range(n_steps):
noise_amount = torch.ones((x.shape[0], )).to(device) * (1-(i/n_steps)) # Starting high going low
with torch.no_grad():
pred = net(x)
mix_factor = 1/(n_steps - i)
x = x*(1-mix_factor) + pred*mix_factor
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
ax.imshow(torchvision.utils.make_grid(x.detach().cpu(), nrow=8)[0].clip(0, 1), cmap='Greys')<jupyter_output><empty_output><jupyter_text>Ce n'est pas gรฉnial, mais il y a des chiffres reconnaissables ! Vous pouvez expรฉrimenter en entraรฎnant plus longtemps (disons, 10 ou 20 รฉpoques) et en modifiant la configuration du modรจle, le taux d'apprentissage, l'optimiseur, etc. N'oubliez pas non plus que fashionMNIST peut รชtre remplacรฉ en une ligne si vous voulez essayer un jeu de donnรฉes un peu plus difficile. Comparaison avec DDPMDans cette section, nous allons voir comment notre implรฉmentation diffรจre de l'approche utilisรฉe dans l'autre *notebook* ([Introduction ร *Diffusers*]()), qui est basรฉ sur l'article de DDPM.Nous verrons que- Le diffuseur `UNet2DModel` est un peu plus avancรฉ que notre BasicUNet- Le processus de corruption est traitรฉ diffรฉremment- L'objectif d'entraรฎnement est diffรฉrent, puisqu'il s'agit de prรฉdire le bruit plutรดt que l'image dรฉbruitรฉe.- Le modรจle est conditionnรฉ sur la quantitรฉ de bruit prรฉsent via un conditionnement par pas de temps, oรน t est transmis comme un argument supplรฉmentaire ร la mรฉthode forward.- Il existe un certain nombre de stratรฉgies d'รฉchantillonnage diffรฉrentes, qui devraient fonctionner mieux que notre version simpliste ci-dessus.Un certain nombre d'amรฉliorations ont รฉtรฉ suggรฉrรฉes depuis la publication de l'article sur le DDPM, mais nous espรฉrons que cet exemple est instructif en ce qui concerne les diffรฉrentes dรฉcisions de conception possibles. Une fois que vous aurez lu cet article, vous pourrez vous plonger dans le document intitulรฉ [*Elucidating the Design Space of Diffusion-Based Generative Models*](https://arxiv.org/abs/2206.00364) qui examine tous ces composants en dรฉtail et formule de nouvelles recommandations sur la maniรจre d'obtenir les meilleures performances.Si tout cela est trop technique ou intimidant, ne vous inquiรฉtez pas ! N'hรฉsitez pas ร sauter le reste de ce *notebook* ou ร le garder pour un jour de pluie. L'UNetLe modรจle UNet2DModel de *Diffusers* comporte un certain nombre d'amรฉliorations par rapport ร notre UNet de base ci-dessus :- GroupNorm applique une normalisation par groupe aux entrรฉes de chaque bloc- Couches de *dropout* pour un entraรฎnement plus doux- Plusieurs couches de ResNet par bloc (si layers_per_block n'est pas fixรฉ ร 1)- Attention (gรฉnรฉralement utilisรฉ uniquement pour les blocs ร faible rรฉsolution)- Conditionnement sur le pas de temps- Blocs de sous-รฉchantillonnage et de surรฉchantillonnage avec des paramรจtres pouvant รชtre apprisCrรฉons et inspectons un modรจle UNet2DModel :<jupyter_code>model = UNet2DModel(
sample_size=28, # la rรฉsolution de l'image cible
in_channels=1, # le nombre de canaux d'entrรฉe, 3 pour les images RVB
out_channels=1, # le nombre de canaux de sortie
layers_per_block=2, # le nombre de couches ResNet ร utiliser par bloc UNet
block_out_channels=(32, 64, 64), # Correspondant ร peu prรจs ร notre exemple UNet de base
down_block_types=(
"DownBlock2D", # un bloc de sous-รฉchantillonnage ResNet normal
"AttnDownBlock2D", # un bloc de sous-รฉchantillonnage ResNet avec auto-attention spatiale
"AttnDownBlock2D",
),
up_block_types=(
"AttnUpBlock2D",
"AttnUpBlock2D", # un bloc de surรฉchantillonnage ResNet avec auto-attention spatiale
"UpBlock2D", # un bloc de surรฉchantillonnage ResNet standard
),
)
print(model)<jupyter_output>UNet2DModel(
(conv_in): Conv2d(1, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(time_proj): Timesteps()
(time_embedding): TimestepEmbedding(
(linear_1): Linear(in_features=32, out_features=128, bias=True)
(act): SiLU()
(linear_2): Linear(in_features=128, out_features=128, bias=True)
)
(down_blocks): ModuleList(
(0): DownBlock2D(
(resnets): ModuleList(
(0): ResnetBlock2D(
(norm1): GroupNorm(32, 32, eps=1e-05, affine=True)
(conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(time_emb_proj): Linear(in_features=128, out_features=32, bias=True)
(norm2): GroupNorm(32, 32, eps=1e-05, affine=True)
(dropout): Dropout(p=0.0, inplace=False)
(conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(nonlinearity): SiLU()
)
(1): ResnetBlock2D(
(norm1): GroupNorm(32, 32, eps=1e-05, affine=True)
(conv1): Con[...]<jupyter_text>Comme vous pouvez le constater, il y a un peu plus de choses qui se passent ! Il a รฉgalement beaucoup plus de paramรจtres que notre BasicUNet :<jupyter_code>sum([p.numel() for p in model.parameters()]) # 1,7M contre les ~309k paramรจtres du BasicUNet<jupyter_output><empty_output><jupyter_text>Nous pouvons reproduire l'entraรฎnement prรฉsentรฉ ci-dessus en utilisant ce modรจle ร la place de notre modรจle original. Nous devons passer x et le pas de temps au modรจle (ici, nous passons toujours t=0 pour montrer qu'il fonctionne sans ce conditionnement de pas de temps et pour faciliter le code d'รฉchantillonnage, mais vous pouvez รฉgalement essayer d'introduire `(amount*1000)` pour obtenir un รฉquivalent de pas de temps ร partir du montant de la corruption). Les lignes modifiรฉes sont indiquรฉes par `<<<` si vous souhaitez inspecter le code.<jupyter_code># Dataloader (vous pouvez modifier la taille du batch)
batch_size = 128
train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# Combien de fois devrions-nous passer les donnรฉes en revue ?
n_epochs = 3
# Crรฉer le rรฉseau
net = UNet2DModel(
sample_size=28, # la rรฉsolution de l'image cible
in_channels=1, # le nombre de canaux d'entrรฉe, 3 pour les images RVB
out_channels=1, # le nombre de canaux de sortie
layers_per_block=2, # le nombre de couches ResNet ร utiliser par bloc UNet
block_out_channels=(32, 64, 64), # Correspondant ร peu prรจs ร notre exemple UNet de base
down_block_types=(
"DownBlock2D", # un bloc de sous-รฉchantillonnage ResNet normal
"AttnDownBlock2D", # un bloc de sous-รฉchantillonnage ResNet avec auto-attention spatiale
"AttnDownBlock2D",
),
up_block_types=(
"AttnUpBlock2D",
"AttnUpBlock2D", # un bloc de surรฉchantillonnage ResNet avec auto-attention spatiale
"UpBlock2D", # un bloc de surรฉchantillonnage ResNet standard
),
)
net.to(device)
# Notre protection contre la perte
loss_fn = nn.MSELoss()
# L'optimiseur
opt = torch.optim.Adam(net.parameters(), lr=1e-3)
# Conserver une trace des pertes pour les visualiser plus tard
losses = []
# La boucle d'entraรฎnement
for epoch in range(n_epochs):
for x, y in train_dataloader:
# Obtenir des donnรฉes et prรฉparer la version corrompue
x = x.to(device) # Data on the GPU
noise_amount = torch.rand(x.shape[0]).to(device) # Choisir des quantitรฉs de bruit alรฉatoires
noisy_x = corrupt(x, noise_amount) # Crรฉer notre bruit x
# Obtenir la prรฉdiction du modรจle
pred = net(noisy_x, 0).sample #<<< En utilisant toujours le pas de temps 0, en ajoutant .sample
# Calculer la perte
loss = loss_fn(pred, x) # Dans quelle mesure la sortie est-elle proche du vรฉritable x "propre" ?
# Rรฉtropropager et mettre ร jour les paramรจtres
opt.zero_grad()
loss.backward()
opt.step()
# Stocker la perte pour plus tard
losses.append(loss.item())
# Afficher la moyenne des valeurs de perte pour cette รฉpoque :
avg_loss = sum(losses[-len(train_dataloader):])/len(train_dataloader)
print(f'Finished epoch {epoch}. Average loss for this epoch: {avg_loss:05f}')
# Graphique
fig, axs = plt.subplots(1, 2, figsize=(12, 5))
# Perte
axs[0].plot(losses)
axs[0].set_ylim(0, 0.1)
axs[0].set_title('Loss over time')
# รchantillons
n_steps = 40
x = torch.rand(64, 1, 28, 28).to(device)
for i in range(n_steps):
noise_amount = torch.ones((x.shape[0], )).to(device) * (1-(i/n_steps)) # De haut en bas
with torch.no_grad():
pred = net(x, 0).sample
mix_factor = 1/(n_steps - i)
x = x*(1-mix_factor) + pred*mix_factor
axs[1].imshow(torchvision.utils.make_grid(x.detach().cpu(), nrow=8)[0].clip(0, 1), cmap='Greys')
axs[1].set_title('Generated Samples')<jupyter_output>Finished epoch 0. Average loss for this epoch: 0.018925
Finished epoch 1. Average loss for this epoch: 0.012785
Finished epoch 2. Average loss for this epoch: 0.011694<jupyter_text>Ces rรฉsultats sont bien meilleurs que notre premiรจre sรฉrie de rรฉsultats ! Vous pouvez envisager de modifier la configuration du Unet ou de prolonger l'entraรฎnement afin d'obtenir des performances encore meilleures. Le processus de corruptionLe papier DDPM dรฉcrit un processus de corruption qui ajoute une petite quantitรฉ de bruit ร chaque "pas de temps". Etant donnรฉ $x_{t-1}$ pour un certain pas de temps, nous pouvons obtenir la version suivante (lรฉgรจrement plus bruitรฉe) $x_t$ avec :$q(\mathbf{x}_t \vert \mathbf{x}_{t-1}) = \mathcal{N}(\mathbf{x}_t; \sqrt{1 - \beta_t} \mathbf{x}_{t-1}, \beta_t\mathbf{I}) \quadq(\mathbf{x}_{1:T} \vert \mathbf{x}_0) = \prod^T_{t=1} q(\mathbf{x}_t \vert \mathbf{x}_{t-1})$Nous prenons $x_{t-1}$, l'รฉchelonnons de $\sqrt{1 - \beta_t}$ et ajoutons du bruit รฉchelonnรฉ de $\beta_t$. Ce $\beta$ est dรฉfini pour chaque t en fonction d'un certain plannificateur, et dรฉtermine la quantitรฉ de bruit ajoutรฉe par pas de temps.Nous ne voulons pas nรฉcessairement faire cette opรฉration 500 fois pour obtenir $x_{500}$, nous avons donc une autre formule pour obtenir $x_t$ pour n'importe quel t รฉtant donnรฉ $x_0$ :<jupyter_code>#??noise_scheduler.add_noise
noise_scheduler = DDPMScheduler(num_train_timesteps=1000)
plt.plot(noise_scheduler.alphas_cumprod.cpu() ** 0.5, label=r"${\sqrt{\bar{\alpha}_t}}$")
plt.plot((1 - noise_scheduler.alphas_cumprod.cpu()) ** 0.5, label=r"$\sqrt{(1 - \bar{\alpha}_t)}$")
plt.legend(fontsize="x-large");<jupyter_output><empty_output><jupyter_text>Au dรฉpart, le x bruitรฉ est principalement x (sqrt_alpha_prod ~= 1), mais au fil du temps, la contribution de x diminue et la composante bruit augmente. Contrairement ร notre mรฉlange linรฉaire de x et de bruit en fonction de la quantitรฉ, celui-ci devient bruyant relativement rapidement. Nous pouvons visualiser cela sur quelques donnรฉes :<jupyter_code># Bruit d'un batch d'images pour visualiser l'effet
fig, axs = plt.subplots(3, 1, figsize=(16, 10))
xb, yb = next(iter(train_dataloader))
xb = xb.to(device)[:8]
xb = xb * 2. - 1. # Pour aller dans (-1, 1)
print('X shape', xb.shape)
# Afficher les entrรฉes propres
axs[0].imshow(torchvision.utils.make_grid(xb[:8])[0].detach().cpu(), cmap='Greys')
axs[0].set_title('Clean X')
# Ajouter du bruit avec le plannificateur
timesteps = torch.linspace(0, 999, 8).long().to(device)
noise = torch.randn_like(xb) # << NB: randn et non rand
noisy_xb = noise_scheduler.add_noise(xb, noise, timesteps)
print('Noisy X shape', noisy_xb.shape)
# Afficher la version bruyante (avec et sans coupure)
axs[1].imshow(torchvision.utils.make_grid(noisy_xb[:8])[0].detach().cpu().clip(-1, 1), cmap='Greys')
axs[1].set_title('Noisy X (clipped to (-1, 1)')
axs[2].imshow(torchvision.utils.make_grid(noisy_xb[:8])[0].detach().cpu(), cmap='Greys')
axs[2].set_title('Noisy X')<jupyter_output>X shape torch.Size([8, 1, 28, 28])
Noisy X shape torch.Size([8, 1, 28, 28]) | diffusion-models-class/units/fr/unit1/diffusion_models_from_scratch.ipynb/0 | {
"file_path": "diffusion-models-class/units/fr/unit1/diffusion_models_from_scratch.ipynb",
"repo_id": "diffusion-models-class",
"token_count": 10500
} | 136 |
<jupyter_start><jupyter_text>Stable Diffusion : plongรฉe en profondeurStable Diffusion est un puissant modรจle de texte ร image. Il existe plusieurs sites web et outils pour rendre son utilisation aussi simple que possible. Il est รฉgalement intรฉgrรฉ ร la bibliothรจque de Diffusers d'Huggingface, ce qui permet de gรฉnรฉrer des images en toute simplicitรฉ :```pyfrom diffusers import StableDiffusionPipelinepipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True).to("cuda")image = pipe("An astronaught scuba diving").images[0]```Dans ce *notebook*, nous allons nous plonger dans le code qui se cache derriรจre ces interfaces faciles ร utiliser, pour voir ce qui se passe sous le capot. Nous commencerons par recrรฉer la fonctionnalitรฉ ci-dessus sous la forme d'un morceau de code effrayant, puis, un par un, nous inspecterons les diffรฉrents composants et comprendrons ce qu'ils font. ร la fin de ce *notebook*, cette mรชme boucle d'รฉchantillonnage devrait ressembler ร quelque chose que vous pouvez peaufiner et modifier ร votre guise. Configuration et importationsVous devrez vous connecter ร Hugging Face et accepter les termes de la licence pour ce modรจle (voir la [carte de modรจle](https://huggingface.co/CompVis/stable-diffusion-v1-4) pour plus de dรฉtails). Lorsque vous exรฉcuterez ce *notebook* pour la premiรจre fois, vous devrez dรฉcommenter les deux cellules suivantes pour installer les prรฉrequis et vous connecter au Hub avec un *token* d'accรจs.<jupyter_code># !pip install -q --upgrade transformers diffusers ftfy
from base64 import b64encode
import numpy
import torch
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
from huggingface_hub import notebook_login
# Pour l'affichage vidรฉo
from IPython.display import HTML
from matplotlib import pyplot as plt
from pathlib import Path
from PIL import Image
from torch import autocast
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, logging
torch.manual_seed(1)
if not (Path.home()/'.huggingface'/'token').exists(): notebook_login()
# Suppression de certains avertissements inutiles lors du chargement de CLIPTextModel
logging.set_verbosity_error()
# Dรฉfinir l'appareil
torch_device = "cuda" if torch.cuda.is_available() else "cpu"<jupyter_output><empty_output><jupyter_text>Chargement des modรจlesCe code (et celui de la section suivante) provient du [*notebook* illustratif d'Huggingface](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb).Il tรฉlรฉcharge et configure les modรจles et les composants que nous utiliserons. Exรฉcutons-le pour l'instant et passons ร la section suivante pour vรฉrifier que tout fonctionne avant d'aller plus loin.Si vous avez chargรฉ un pipeline, vous pouvez aussi accรฉder ร ces composants en utilisant `pipe.unet`, `pipe.vae` et ainsi de suite.**Dans ce *notebook*, nous ne faisons pas d'รฉconomies de mรฉmoire. Si vous vous retrouvez ร court de RAM GPU, regardez le code du pipeline pour vous inspirer avec des choses comme le dรฉcoupage de l'attention, le passage ร la demi-prรฉcision (fp16), le maintien du VAE sur le CPU et d'autres modifications.**<jupyter_code># Charger le modรจle auto-encodeur qui sera utilisรฉ pour dรฉcoder les latents dans l'espace de l'image
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
# Charger le tokenizer et l'encodeur
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
# Le modรจle UNet pour gรฉnรฉrer les latents
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
# Le planificateur de bruit
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
# Nous allons au GPU !
vae = vae.to(torch_device)
text_encoder = text_encoder.to(torch_device)
unet = unet.to(torch_device);<jupyter_output><empty_output><jupyter_text>Une boucle de diffusionSi tout ce que vous voulez, c'est crรฉer une image avec du texte, vous pouvez ignorer ce notebook et utiliser l'un des outils existants (comme [DreamStudio](https://beta.dreamstudio.ai/generate)) ou utiliser le pipeline simplifiรฉ d'Hugging Face comme documentรฉ [ici](https://huggingface.co/blog/stable_diffusion).Ce que nous voulons faire ici, c'est approfondir un peu plus la faรงon dont cela fonctionne. Nous allons donc commencer par vรฉrifier que le code de l'exemple s'exรฉcute. Il ressemble beaucoup ร ce que vous trouverez si vous inspectez la mรฉthode [__call__()](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.pyL200) du pipeline de Stable Diffusion.<jupyter_code># Quelques paramรจtres
prompt = ["A watercolor painting of an otter"]
height = 512 # hauteur par dรฉfaut de Stable Diffusion
width = 512 # largeur par dรฉfaut de Stable Diffusion
num_inference_steps = 30 # Nombre d'รฉtapes de dรฉbruitage
guidance_scale = 7.5 # รchelle pour un guidage sans classifieur
generator = torch.manual_seed(32) # Gรฉnรฉrateur de la graine pour crรฉer le bruit latent initial
batch_size = 1
# Preparation du texte
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Preparation du planificateur
scheduler.set_timesteps(num_inference_steps)
# Preparation des latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma # Mise ร l'รฉchelle (versions prรฉcรฉdentes) latents = latents * self.scheduler.sigmas[0]
# Boucle
with autocast("cuda"):
for i, t in tqdm(enumerate(scheduler.timesteps)):
# รฉtendre les latents si nous procรฉdons ร un guidage sans classifieur afin d'รฉviter de faire deux passages en avant
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
# mettre ร l'รฉchelle les latents (prรฉconditionnement)
# latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5) # Diffusers 0.3 et moins
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# prรฉdire le bruit rรฉsiduel
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# effectuer le guidage
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# calculer l'รฉchantillon bruitรฉ prรฉcรฉdent x_t -> x_t-1
# latents = scheduler.step(noise_pred, i, latents)["prev_sample"] # Diffusers 0.3 et moins
latents = scheduler.step(noise_pred, t, latents).prev_sample
# mettre ร l'รฉchelle et dรฉcoder les latents de l'image ร l'aide du vae
latents = 1 / 0.18215 * latents
with torch.no_grad():
image = vae.decode(latents).sample
# Affichage
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
pil_images[0]<jupyter_output><empty_output><jupyter_text>Cela fonctionne, mais cela fait beaucoup de code ! Examinons les composants un par un. L'auto-encodeur (AE)L'AE peut encoder une image dans une sorte de reprรฉsentation latente, et la dรฉcoder ร nouveau en une image. Nous avons regroupรฉ le code dans quelques fonctions pour que nous puissions voir ร quoi cela ressemble en action :<jupyter_code>def pil_to_latent(input_im):
# Une seule image -> un seul latent dans un batch (donc taille 1, 4, 64, 64)
with torch.no_grad():
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling
return 0.18215 * latent.latent_dist.sample()
def latents_to_pil(latents):
# bain de latents -> liste d'images
latents = (1 / 0.18215) * latents
with torch.no_grad():
image = vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
return pil_images<jupyter_output><empty_output><jupyter_text>Nous utiliserons ici une image provenant du web, mais vous pouvez charger la vรดtre en la tรฉlรฉchargeant et en modifiant le nom du fichier dans la cellule suivante.<jupyter_code># Tรฉlรฉcharger une image de dรฉmonstration
!curl --output macaw.jpg 'https://lafeber.com/pet-birds/wp-content/uploads/2018/06/Scarlet-Macaw-2.jpg'
# Charger l'image avec PIL
input_image = Image.open('macaw.jpg').resize((512, 512))
input_image<jupyter_output><empty_output><jupyter_text>L'encodage dans l'espace latent de l'AE ร l'aide de la fonction dรฉfinie ci-dessus se prรฉsente comme suit :<jupyter_code># Encoder dans l'espace latent
encoded = pil_to_latent(input_image)
encoded.shape
# Visualisons les quatre canaux de cette reprรฉsentation latente :
fig, axs = plt.subplots(1, 4, figsize=(16, 4))
for c in range(4):
axs[c].imshow(encoded[0][c].cpu(), cmap='Greys')<jupyter_output><empty_output><jupyter_text>Ce tenseur 4x64x64 capture de nombreuses informations sur l'image, suffisamment, espรฉrons-le, pour que lorsque nous l'introduisons dans le dรฉcodeur, nous obtenions en retour quelque chose de trรจs proche de notre image d'entrรฉe :<jupyter_code># Dรฉcoder cette reprรฉsentation latente en une image
decoded = latents_to_pil(encoded)[0]
decoded<jupyter_output><empty_output><jupyter_text>Vous verrez de petites diffรฉrences si vous plissez les yeux ! Concentrez-vous sur l'ลil si vous ne voyez rien d'รฉvident. C'est assez impressionnant : cette image latente de 4x64x64 semble contenir beaucoup plus d'informations qu'une image de 64px.Cet auto-encodeur a รฉtรฉ entraรฎnรฉ ร rรฉduire une image ร une reprรฉsentation plus petite, puis ร recrรฉer l'image ร partir de cette version compressรฉe.Dans ce cas particulier, le facteur de compression est de 48, nous partons d'une image 3x512x512(cannaux x hauteur x largeur) et elle est compressรฉe en un vecteur latent 4x64x64. Chaque volume de 3x8x8 pixels dans l'image d'entrรฉe est compressรฉ en seulement 4 nombres (4x1x1). Il est possible de trouver des AEs avec un taux de compression plus รฉlevรฉ (par exemple f16 comme certains modรจles populaires de VQGAN) mais ร un moment donnรฉ, ils commencent ร introduire des artefacts que nous ne voulons pas.Pourquoi utiliser un auto-encodeur ? Nous pouvons faire de la diffusion dans l'espace des pixels oรน le modรจle reรงoit toutes les donnรฉes de l'image comme entrรฉes et produit une prรฉdiction de sortie de la mรชme forme. Mais cela implique le traitement d'un grand nombre de donnรฉes et rend la gรฉnรฉration d'images ร haute rรฉsolution trรจs coรปteuse sur le plan informatique. Certaines solutions consistent ร effectuer la diffusion ร basse rรฉsolution (64 px par exemple), puis ร entraรฎner un modรจle distinct pour augmenter l'รฉchelle de maniรจre rรฉpรฉtรฉe (comme avec D2/Imagen). La diffusion latente, quant ร elle, effectue le processus de diffusion dans cet espace latent, en utilisant les reprรฉsentations compressรฉes de notre AE plutรดt que des images brutes. Ces reprรฉsentations sont riches en informations et peuvent รชtre suffisamment petites pour รชtre gรฉrรฉes par du matรฉriel grand public. Une fois que nous avons gรฉnรฉrรฉ une nouvelle image en tant que reprรฉsentation latente, l'auto-encodeur peut prendre ces sorties latentes finales et les transformer en pixels rรฉels. The SchedulerNous devons maintenant parler de l'ajout de bruit.Pendant l'entraรฎnement, nous ajoutons du bruit ร une image, puis nous demandons au modรจle d'essayer de prรฉdire le bruit. Si nous ajoutons toujours beaucoup de bruit, le modรจle risque de ne pas avoir grand-chose ร faire. Si nous n'en ajoutons qu'une infime quantitรฉ, le modรจle ne pourra pas faire grand-chose avec les points de dรฉpart alรฉatoires que nous utilisons pour l'รฉchantillonnage. Au cours de l'entraรฎnement, la quantitรฉ de bruit varie donc en fonction d'une certaine distribution.Pendant l'รฉchantillonnage, nous voulons "dรฉbruiter" sur un certain nombre d'รฉtapes. Le nombre d'รฉtapes et la quantitรฉ de bruit que nous devons viser ร chaque รฉtape affecteront le rรฉsultat final.Le planificateur est chargรฉ de gรฉrer tous ces dรฉtails. Par exemple : `scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)` met en place un scheduler qui correspond ร celui utilisรฉ pour entraรฎner ce modรจle. Lorsque nous voulons รฉchantillonner sur un plus petit nombre de pas, nous le faisons avec scheduler.set_timesteps :<jupyter_code># Rรฉglage du nombre de pas d'รฉchantillonnage :
scheduler.set_timesteps(15)<jupyter_output><empty_output><jupyter_text>Vous pouvez voir comment notre nouvel ensemble d'รฉtapes correspond ร celles utilisรฉes dans l'entraรฎnement :<jupyter_code># Voyez รงa en termes de 1000 รฉtapes originales utilisรฉes pour l'entraรฎnement :
print(scheduler.timesteps)<jupyter_output>tensor([999.0000, 927.6429, 856.2857, 784.9286, 713.5714, 642.2143, 570.8571,
499.5000, 428.1429, 356.7857, 285.4286, 214.0714, 142.7143, 71.3571,
0.0000], dtype=torch.float64)<jupyter_text>Et quelle est la quantitรฉ de bruit prรฉsente ร chaque endroit :<jupyter_code># Examinez les niveaux de bruit รฉquivalents :
print(scheduler.sigmas)<jupyter_output>tensor([14.6146, 9.6826, 6.6780, 4.7746, 3.5221, 2.6666, 2.0606, 1.6156,
1.2768, 1.0097, 0.7913, 0.6056, 0.4397, 0.2780, 0.0292, 0.0000])<jupyter_text>Pendant l'รฉchantillonnage, nous partons d'un niveau de bruit รฉlevรฉ (en fait, notre entrรฉe sera du bruit pur) et nous "dรฉbruitons" progressivement jusqu'ร obtenir une image, selon ce calendrier.<jupyter_code># Affichage du planificateur de bruit :
plt.plot(scheduler.sigmas)
plt.title('Noise Schedule')
plt.xlabel('Sampling step')
plt.ylabel('sigma')
plt.show()<jupyter_output><empty_output><jupyter_text>Ce "sigma" est la quantitรฉ de bruit ajoutรฉe ร la reprรฉsentation latente. Voyons ce que cela donne en ajoutant un peu de bruit ร notre image codรฉe, puis en dรฉcodant cette version bruitรฉe :<jupyter_code>noise = torch.randn_like(encoded) # Bruit alรฉatoire
sampling_step = 10 # Equivalent ร une รฉtape 10 sur 15 dans la grille ci-dessus
# encoded_and_noised = scheduler.add_noise(encoded, noise, timestep) # Diffusers 0.3 et en dessous
encoded_and_noised = scheduler.add_noise(encoded, noise, timesteps=torch.tensor([scheduler.timesteps[sampling_step]]))
latents_to_pil(encoded_and_noised.float())[0] # Affichage<jupyter_output><empty_output><jupyter_text>ร quoi cela ressemble-t-il ร diffรฉrents pas de temps ? Faites l'expรฉrience et voyez par vous-mรชme !Si vous dรฉcommentez la cellule ci-dessous, vous verrez que dans ce cas, la fonction `scheduler.add_noise` ne fait qu'ajouter du bruit ร l'รฉchelle sigma : `noisy_samples = original_samples + noise * sigmas`<jupyter_code># ??scheduler.add_noise<jupyter_output><empty_output><jupyter_text>D'autres modรจles de diffusion peuvent รชtre entraรฎnรฉs avec diffรฉrentes approches de bruits et d'ordonnancement, dont certaines maintiennent la variance relativement constante entre les niveaux de bruit ("prรฉservation de la variance") avec diffรฉrentes astuces de mise ร l'รฉchelle et de mรฉlange au lieu d'avoir des latents bruitรฉs avec une variance de plus en plus รฉlevรฉe au fur et ร mesure que l'on ajoute du bruit ("explosion de la variance").Si nous voulons partir d'un bruit alรฉatoire au lieu d'une image bruitรฉe, nous devons la mettre ร l'รฉchelle de la plus grande valeur sigma utilisรฉe pendant l'entraรฎnement, soit ~14 dans ce cas. Et avant que ces latents bruitรฉs ne soient introduits dans le modรจle, ils sont ร nouveau mis ร l'รฉchelle dans l'รฉtape dite de prรฉ-conditionnement : `latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)` (maintenant gรฉrรฉ par `latent_model_input = scheduler.scale_model_input(latent_model_input, t)`).Encore une fois, cette mise ร l'รฉchelle/prรฉ-conditionnement diffรจre entre les articles et les implรฉmentations, alors gardez un ลil sur ce point si vous travaillez avec un type diffรฉrent de modรจle de diffusion. Boucle ร partir de la version bruitรฉe de l'entrรฉe (AKA image2image)Voyons ce qui se passe lorsque nous utilisons notre image comme point de dรฉpart, en ajoutant un peu de bruit et en effectuant les derniรจres รฉtapes de dรฉbruitage dans la boucle avec un nouveau prompt.Nous allons utiliser une boucle similaire ร celle de la premiรจre dรฉmonstration, mais nous allons sauter les premiรจres รฉtapes `start_step`.Pour bruiter notre image, nous utiliserons un code comme celui montrรฉ ci-dessus, en utilisant le planificateur pour la bruiter ร un niveau รฉquivalent ร l'รฉtape 10 (`start_step`).<jupyter_code># Paramรจtres (les mรชmes que prรฉcรฉdemment, ร l'exception du nouveau prompt)
prompt = ["A colorful dancer, nat geo photo"]
height = 512 # hauteur par dรฉfaut de Stable Diffusion
width = 512 # largeur par dรฉfaut de Stable Diffusion
num_inference_steps = 30 # Nombre d'รฉtapes de dรฉbruitage
guidance_scale = 7.5 # รchelle pour un guidage sans classifieur
generator = torch.manual_seed(32) # Gรฉnรฉrateur de la graine pour crรฉer le bruit latent initial
batch_size = 1
# Preparation du texte (comme prรฉcรฉdemment)
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Preparation du planificateur (dรฉfinition du nombre d'รฉtapes de l'infรฉrence)
scheduler.set_timesteps(num_inference_steps)
# Preparation des latents (bruitage appropriรฉ pour start_step)
start_step = 10
start_sigma = scheduler.sigmas[start_step]
noise = torch.randn_like(encoded)
latents = scheduler.add_noise(encoded, noise, timesteps=torch.tensor([scheduler.timesteps[start_step]]))
latents = latents.to(torch_device).float()
# Boucle
for i, t in tqdm(enumerate(scheduler.timesteps)):
if i >= start_step: # << C'est la seule modification que nous apportons ร la boucle.
# รฉtendre les latents si nous procรฉdons ร un guidage sans classifieur afin d'รฉviter de faire deux passages en avant
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# prรฉdire le bruit rรฉsiduel
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# effectuer le guidage
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# calculer l'รฉchantillon bruitรฉ prรฉcรฉdent x_t -> x_t-1
latents = scheduler.step(noise_pred, t, latents).prev_sample
latents_to_pil(latents)[0]<jupyter_output><empty_output><jupyter_text>Vous pouvez voir que certaines couleurs et structures de l'image sont conservรฉes, mais nous avons maintenant une nouvelle image ! Plus vous ajoutez de bruit et plus vous effectuez d'รฉtapes, plus l'image s'รฉloigne de l'image d'entrรฉe.C'est ainsi que fonctionne le cรฉlรจbre pipeline img2img. Encore une fois, si c'est votre objectif final, il existe des outils qui facilitent la tรขche !Mais vous pouvez voir que sous le capot, c'est la mรชme chose que la boucle de gรฉnรฉration, en sautant les premiรจres รฉtapes et en partant d'une image bruitรฉe plutรดt que d'une image purement bruitรฉe.Essayez de changer le nombre d'รฉtapes sautรฉes et de voir comment cela affecte la quantitรฉ de changement de l'image par rapport ร l'entrรฉe. Exploration du pipeline texte -> enchรขssementNous utilisons un modรจle d'encodage de texte pour transformer notre texte en un ensemble d'enchรขssements qui sont transmis au modรจle de diffusion en tant que conditionnement. Suivons un morceau de texte tout au long de ce processus et voyons comment il fonctionne.<jupyter_code># Notre prompt textuel
prompt = 'A picture of a puppy'<jupyter_output><empty_output><jupyter_text>Nous commenรงons par la tokenisation :<jupyter_code># Transformer le texte en une sรฉquence de tokens :
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
text_input['input_ids'][0] # Voir les tokens
# Voir les tokens individuels
for t in text_input['input_ids'][0][:8]: # Nous nous contenterons d'examiner les 7 premiers pour vous รฉviter un mur d'<|endoftext|>'
print(t, tokenizer.decoder.get(int(t)))<jupyter_output>tensor(49406) <|startoftext|>
tensor(320) a</w>
tensor(1674) picture</w>
tensor(539) of</w>
tensor(320) a</w>
tensor(6829) puppy</w>
tensor(49407) <|endoftext|>
tensor(49407) <|endoftext|><jupyter_text>Nous pouvons passer directement aux enchรขssements finaux (de sortie) de la maniรจre suivante :<jupyter_code># Rรฉcupรฉrer les enchรขssements de sortie
output_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
print('Shape:', output_embeddings.shape)
output_embeddings<jupyter_output>Shape: torch.Size([1, 77, 768])<jupyter_text>Nous passons nos *tokens* ร travers text_encoder et nous obtenons comme par magie des nombres que nous pouvons introduire dans le modรจle.Comment ces chiffres sont-ils gรฉnรฉrรฉs ? Les tokens sont transformรฉs en un ensemble d'enchรขssements d'entrรฉe, qui sont ensuite introduits dans le transformer pour obtenir les enchรขssements de sortie finaux.Pour obtenir ces enchรขssements d'entrรฉe, il y a en fait deux รฉtapes comme le rรฉvรจle l'inspection de `text_encoder.text_model.embeddings` :<jupyter_code>text_encoder.text_model.embeddings<jupyter_output><empty_output><jupyter_text>Enchรขssement de tokensLe *token* est envoyรฉ ร la fonction `token_embedding` pour le transformer en vecteur. Le nom de la fonction `get_input_embeddings` est trompeur puisque ces enchรขssements de *tokens* doivent รชtre combinรฉs avec les enchรขssements de positions avant d'รชtre utilisรฉs comme entrรฉes dans le modรจle ! Quoi qu'il en soit, examinons d'abord la partie relative ร l'enchรขssements des *tokens*.Nous pouvons regarder la couche d'enchรขssement :<jupyter_code># Accรฉder ร la couche enchรขssement
token_emb_layer = text_encoder.text_model.embeddings.token_embedding
token_emb_layer # Taille du vocabulaire 49408, emb_dim 768<jupyter_output><empty_output><jupyter_text>Et enchรขsser un *token* comme suit :<jupyter_code># Enchรขsser un *token*, dans ce cas, celui du "chiot"
embedding = token_emb_layer(torch.tensor(6829, device=torch_device))
embedding.shape # reprรฉsentation en 768-dim<jupyter_output><empty_output><jupyter_text>Cet unique *tokens* a รฉtรฉ associรฉ avec un vecteur ร 768 dimensions.Nous pouvons faire la mรชme chose avec tous les *tokens* du prompt pour obtenir tous les enchรขssements de *tokens* :<jupyter_code>token_embeddings = token_emb_layer(text_input.input_ids.to(torch_device))
print(token_embeddings.shape) # taille du batch 1, 77 *tokens*, 768 valeurs pour chaque
token_embeddings<jupyter_output>torch.Size([1, 77, 768])<jupyter_text>Enchรขssements positionnelsLes enchรขssements positionnels indiquent au modรจle ร quel endroit d'une sรฉquence se trouve un *token*. Tout comme l'enchรขssement de * tokens*, il s'agit d'un ensemble de paramรจtres (qui peuvent รฉventuellement รชtre appris). Mais maintenant, au lieu de traiter ~50k *tokens* nous avons juste besoin d'un pour chaque position (77 au total) :<jupyter_code>pos_emb_layer = text_encoder.text_model.embeddings.position_embedding
pos_emb_layer<jupyter_output><empty_output><jupyter_text>Nous pouvons obtenir l'enchรขssement positionnel pour chaque position :<jupyter_code>position_ids = text_encoder.text_model.embeddings.position_ids[:, :77]
position_embeddings = pos_emb_layer(position_ids)
print(position_embeddings.shape)
position_embeddings<jupyter_output>torch.Size([1, 77, 768])<jupyter_text>Combiner les enchรขssements de *tokens* et de positionsIl est temps de combiner les deux. Comment faire ? Il suffit de les additionner ! D'autres approches sont possibles, mais pour ce modรจle, c'est ainsi que nous procรฉdons.En les combinant de cette maniรจre, nous obtenons les enchรขssements d'entrรฉe finaux, prรชts ร รชtre introduits dans le *transformer* :<jupyter_code># En les combinant, nous obtenons les enchรขssements d'entrรฉe finaux
input_embeddings = token_embeddings + position_embeddings
print(input_embeddings.shape)
input_embeddings<jupyter_output>torch.Size([1, 77, 768])<jupyter_text>Nous pouvons vรฉrifier que ces rรฉsultats sont les mรชmes que ceux obtenus avec `text_encoder.text_model.embeddings` :<jupyter_code># La procรฉdure suivante combine toutes les รฉtapes ci-dessus (mais ne nous permet pas de les modifier !)
text_encoder.text_model.embeddings(text_input.input_ids.to(torch_device))<jupyter_output><empty_output><jupyter_text>Passage dans le *transformer* Nous voulons modifier les enchรขssements d'entrรฉe (en particulier les enchรขssements de tokens) avant de les envoyer dans le reste du modรจle, mais nous devons d'abord nous assurer que nous savons comment le faire. Nous avons lu le code de la mรฉthode `forward` du text_encoder, et nous nous sommes basรฉs sur ce code pour la mรฉthode `forward` du text_model que le text_encoder englobe. Pour l'inspecter vous-mรชme, tapez `??text_encoder.text_model.forward` et vous obtiendrez les informations sur la fonction et le code source, une astuce de dรฉbogage utile !Quoi qu'il en soit, nous pouvons copier les bits dont nous avons besoin pour obtenir ce que l'on appelle le "dernier รฉtat cachรฉ" et ainsi gรฉnรฉrer nos enchรขssements finaux :<jupyter_code>def get_output_embeds(input_embeddings):
# Le modรจle de texte de CLIP utilise le masquage causal, c'est pourquoi nous le prรฉparons ici :
bsz, seq_len = input_embeddings.shape[:2]
causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype)
# Obtenir les enchรขssements de sortie implique d'appeler le modรจle en passant output_hidden_states=True
# afin qu'il ne renvoie pas uniquement les prรฉdictions finales regroupรฉes :
encoder_outputs = text_encoder.text_model.encoder(
inputs_embeds=input_embeddings,
attention_mask=None, # Nous n'utilisons pas de masque d'attention, cela peut donc รชtre None.
causal_attention_mask=causal_attention_mask.to(torch_device),
output_attentions=None,
output_hidden_states=True, # Nous voulons le rรฉsultat des enchรขssements et non le rรฉsultat final.
return_dict=None,
)
# Seul l'รฉtat cachรฉ de sortie nous intรฉresse
output = encoder_outputs[0]
# Il existe une normalisation de couche finale par laquelle nous devons passer
output = text_encoder.text_model.final_layer_norm(output)
# Et maintenant, elles sont prรชtes !
return output
out_embs_test = get_output_embeds(input_embeddings) # Alimenter le modรจle ร l'aide de notre nouvelle fonction
print(out_embs_test.shape) # Vรฉrifier la forme de la sortie
out_embs_test # Inspecter la sortie<jupyter_output>torch.Size([1, 77, 768])<jupyter_text>Notez que cela correspond aux `output_embeddings` que nous avons vu au dรฉbut. Nous avons trouvรฉ comment diviser cette รฉtape ("obtenir les enchรขssements") en plusieurs sous-รฉtapes prรชtes ร รชtre modifiรฉes.Maintenant que nous avons mis en place ce processus, nous pouvons remplacer l'encodage d'entrรฉe d'un *token* par un nouvel encodage de notre choix, ce qui dans notre cas d'utilisation final, sera quelque chose que nous apprendrons. Pour dรฉmontrer le concept, remplaรงons l'encodage d'entrรฉe de "*puppy*" dans le prompt avec lequel nous avons jouรฉ avec l'enchรขssement du *token* 2368, obtenons un nouvel ensemble d'enchรขssement de sortie basรฉs sur celui-ci et utilisons-les pour gรฉnรฉrer une image afin de voir ce que nous obtenons :<jupyter_code>prompt = 'A picture of a puppy'
# Tokeniser
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
input_ids = text_input.input_ids.to(torch_device)
# Obtenir les enchรขssements des tokens
token_embeddings = token_emb_layer(input_ids)
# Le nouvel enchรขssement. Dans ce cas, il s'agit simplement de l'enchรขssement d'entrรฉe du token 2368
replacement_token_embedding = text_encoder.get_input_embeddings()(torch.tensor(2368, device=torch_device))
# Insรฉrer ceci dans les enchรขssements de token
token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
# Combiner avec le'enchรขssement positionnel
input_embeddings = token_embeddings + position_embeddings
# Passage dans le transformer pour obtenir les enchรขssements finaux
modified_output_embeddings = get_output_embeds(input_embeddings)
print(modified_output_embeddings.shape)
modified_output_embeddings<jupyter_output>torch.Size([1, 77, 768])<jupyter_text>Les premiers sont identiques, les derniers ne le sont pas. Tout ce qui se trouve ร la position du *token* que nous remplaรงons et aprรจs sera affectรฉ.Si tout s'est bien passรฉ, nous devrions voir autre chose qu'un chiot lorsque nous les utiliserons pour gรฉnรฉrer une image. Et bien sรปr, c'est le cas !<jupyter_code># Gรฉnรฉration d'une image avec ces enchรขssements modifiรฉs
def generate_with_embs(text_embeddings):
height = 512 # hauteur par dรฉfaut de Stable Diffusion
width = 512 # largeur par dรฉfaut de Stable Diffusion
num_inference_steps = 30 # Nombre d'รฉtapes de dรฉbruitage
guidance_scale = 7.5 # รchelle pour un guidage sans classifieur
generator = torch.manual_seed(32) # Gรฉnรฉrateur de la graine pour crรฉer le bruit latent initial
batch_size = 1
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Preparation du planificateur
scheduler.set_timesteps(num_inference_steps)
# Preparation des latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Boucle
for i, t in tqdm(enumerate(scheduler.timesteps)):
# รฉtendre les latents si nous procรฉdons ร un guidage sans classifieur afin d'รฉviter de faire deux passages en avant
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# prรฉdire le bruit rรฉsiduel
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# rรฉaliser un guidage
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# calculer l'รฉchantillon bruitรฉ prรฉcรฉdent x_t -> x_t-1
latents = scheduler.step(noise_pred, t, latents).prev_sample
return latents_to_pil(latents)[0]
generate_with_embs(modified_output_embeddings)<jupyter_output><empty_output><jupyter_text>Surprise ! Vous savez maintenant ce que signifie le *token* 2368. Que pouvons-nous en faire ? Pourquoi nous sommes-nous donnรฉ tout ce mal ? Eh bien, nous verrons bientรดt un cas d'utilisation plus convaincant, mais en rรฉsumรฉ, une fois que nous pouvons accรฉder aux enchรขssements de *tokens* et les modifier, nous pouvons faire des choses comme les remplacer par autre chose. Dans l'exemple que nous venons de faire, il s'agissait simplement d'un autre enchรขssement de *tokens* du vocabulaire du modรจle, ce qui รฉquivaut ร une simple modification du prompt. Mais nous pouvons รฉgalement mรฉlanger les *tokens*. Par exemple, voici un mi-chiot / mi-mouflette :<jupyter_code># Au cas oรน vous vous demanderiez comment obtenir le *token* d'un mot, ou l'enchรขssement d'un *token* :
prompt = 'skunk'
print('tokenizer(prompt):', tokenizer(prompt))
print('token_emb_layer([token_id]) shape:', token_emb_layer(torch.tensor([8797], device=torch_device)).shape)
prompt = 'A picture of a puppy'
# Tokeniser
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
input_ids = text_input.input_ids.to(torch_device)
# Obtenir les enchรขssements des tokens
token_embeddings = token_emb_layer(input_ids)
# Le nouvel enchรขssement. Il s'agit maintenant d'un mรฉlange d'enchรขssement des tokens "puppy" et "skunk"
puppy_token_embedding = token_emb_layer(torch.tensor(6829, device=torch_device))
skunk_token_embedding = token_emb_layer(torch.tensor(42194, device=torch_device))
replacement_token_embedding = 0.5*puppy_token_embedding + 0.5*skunk_token_embedding
# Insรฉrer ceci dans les enchรขssements de token
token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
# Combiner avec le'enchรขssement positionnel
input_embeddings = token_embeddings + position_embeddings
# Passage dans le transformer pour obtenir les enchรขssements finaux
modified_output_embeddings = get_output_embeds(input_embeddings)
# Gรฉnรฉrer une image
generate_with_embs(modified_output_embeddings)<jupyter_output><empty_output><jupyter_text>Inversion textuelleNous pouvons donc insรฉrer un enchรขssement de *token* modifiรฉ et l'utiliser pour gรฉnรฉrer une image. Nous avons utilisรฉ l'enchรขssement de *token* pour "chat" dans l'exemple ci-dessus, mais que se passerait-il si nous pouvions "apprendre" un nouvel enchรขssement de *token* pour un concept spรฉcifique ? C'est l'idรฉe qui sous-tend l'"Inversion textuelle", dans laquelle quelques exemples d'images sont utilisรฉs pour crรฉer un nouvel enchรขssement de *token* :_Diagramme tirรฉ de l'[article de blog](https://textual-inversion.github.io/) sur l'inversion textuelle. Notez qu'il ne montre pas l'รฉtape des enchรขssements positionnels pour des raisons de simplicitรฉ._ Nous ne verrons pas comment cet entraรฎnement fonctionne, mais nous pouvons essayer de charger l'un de ces nouveaux "concepts" ร partir de la [bibliothรจque de concepts SD crรฉรฉe par la communautรฉ](https://huggingface.co/sd-concepts-library) et voir comment il s'intรจgre dans notre exemple ci-dessus. Nous utiliserons [https://huggingface.co/sd-concepts-library/birb-style](https://huggingface.co/sd-concepts-library/birb-style) puisque c'est le premier que nous avons crรฉรฉ. Tรฉlรฉchargez le fichier learned_embeds.bin ร partir de lร et tรฉlรฉchargez-le ร l'endroit oรน se trouve ce *notebook* avant d'exรฉcuter la cellule suivante :<jupyter_code>birb_embed = torch.load('learned_embeds.bin')
birb_embed.keys(), birb_embed['<birb-style>'].shape<jupyter_output><empty_output><jupyter_text>Nous obtenons un dictionnaire avec une clรฉ et l'enchรขssement de *token* correspondant. Comme dans l'exemple prรฉcรฉdent, remplaรงons l'enchรขssement de "puppy" par celui-ci et voyons ce qui se passe :<jupyter_code>prompt = 'A mouse in the style of puppy'
# Tokeniser
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
input_ids = text_input.input_ids.to(torch_device)
# Obtenir les enchรขssements des tokens
token_embeddings = token_emb_layer(input_ids)
# Le nouvel enchรขssement, notre mot d'ordre spรฉcial
replacement_token_embedding = birb_embed['<birb-style>'].to(torch_device)
# Insรฉrer ceci dans les enchรขssements de token
token_embeddings[0, torch.where(input_ids[0]==6829)] = replacement_token_embedding.to(torch_device)
# Combiner avec le'enchรขssement positionnel
input_embeddings = token_embeddings + position_embeddings
# Passage dans le transformer pour obtenir les enchรขssements finaux
modified_output_embeddings = get_output_embeds(input_embeddings)
# Gรฉnรฉrer une image
generate_with_embs(modified_output_embeddings)<jupyter_output><empty_output><jupyter_text>Le *token* a รฉtรฉ remplacรฉ par une expression qui reprรฉsente un style particulier de peinture, mais il pourrait tout aussi bien reprรฉsenter un objet ou une classe d'objets spรฉcifique. Encore une fois, il existe un [beau *notebook* d'infรฉrence](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) d'Hugging Face pour faciliter l'utilisation des diffรฉrents concepts, qui gรจre correctement l'utilisation des noms dans les prompts ("*A in the style of *") sans se prรฉoccuper de toutes ces choses manuelles. Mรฉlanger les enchรขssementsOutre le simple remplacement de l'enchรขssement des tokens d'un seul mot, il existe d'autres astuces que nous pouvons essayer. Par exemple, que se passe-t-il si nous crรฉons une "chimรจre" en calculant la moyenne des enchรขssements de deux prompts diffรฉrents ?<jupyter_code># Enchรขsser deux prompts
text_input1 = tokenizer(["A mouse"], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
text_input2 = tokenizer(["A leopard"], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings1 = text_encoder(text_input1.input_ids.to(torch_device))[0]
text_embeddings2 = text_encoder(text_input2.input_ids.to(torch_device))[0]
# Les mixer ensemble
mix_factor = 0.35
mixed_embeddings = (text_embeddings1*mix_factor + \
text_embeddings2*(1-mix_factor))
# Generer
generate_with_embs(mixed_embeddings)<jupyter_output><empty_output><jupyter_text>L'UNet et le CFG (*Classifier Free Guidance*)Il est maintenant temps d'examiner le modรจle de diffusion proprement dit. Il s'agit gรฉnรฉralement d'un UNet qui prend en compte les latents bruyants (x) et prรฉdit le bruit. Nous utilisons un modรจle conditionnel qui prend รฉgalement en compte le pas de temps (t) et notre enchรขssement de texte (aka encoder_hidden_states) comme conditionnement. L'introduction de tous ces รฉlรฉments dans le modรจle se prรฉsente comme suit : `noise_pred = unet(latents, t, encoder_hidden_states=text_embeddings)["sample"]`Nous pouvons l'essayer et voir ร quoi ressemble le rรฉsultat :<jupyter_code># Preparation du planificateur
scheduler.set_timesteps(num_inference_steps)
# Quel est notre pas de temps ?
t = scheduler.timesteps[0]
sigma = scheduler.sigmas[0]
# Un latent bruyant
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# L'enchรขssement du texte
text_input = tokenizer(['A macaw'], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
# Passage dans l'UNet pour prรฉdire le bruit rรฉsiduel
with torch.no_grad():
noise_pred = unet(latents, t, encoder_hidden_states=text_embeddings)["sample"]
latents.shape, noise_pred.shape # Nous obtenons des prรฉdictions de la mรชme forme que l'entrรฉe.<jupyter_output><empty_output><jupyter_text>รtant donnรฉ un ensemble de latents bruyants, le modรจle prรฉdit la composante de bruit. Nous pouvons retirer ce bruit des latents bruyants pour voir ร quoi ressemble l'image de sortie (`latents_x0 = latents - sigma * noise_pred`). Et nous pouvons ajouter la plus grande partie du bruit ร cette sortie prรฉdite pour obtenir l'entrรฉe (lรฉgรจrement moins bruitรฉe, espรฉrons-le) pour l'รฉtape de diffusion suivante. Pour visualiser cela, gรฉnรฉrons une autre image, en sauvegardant ร la fois la sortie prรฉdite (x0) et l'รฉtape suivante (xt-1) aprรจs chaque รฉtape :<jupyter_code>prompt = 'Oil painting of an otter in a top hat'
height = 512
width = 512
num_inference_steps = 50
guidance_scale = 8
generator = torch.manual_seed(32)
batch_size = 1
# Crรฉer un dossier pour stocker les rรฉsultats
!rm -rf steps/
!mkdir -p steps/
# Preparation du texte
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Preparation du planificateur
scheduler.set_timesteps(num_inference_steps)
# Preparation des latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Boucle
for i, t in tqdm(enumerate(scheduler.timesteps)):
# รฉtendre les latents si nous procรฉdons ร un guidage sans classifieur afin d'รฉviter de faire deux passages en avant
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# prรฉdire le bruit rรฉsiduel
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# effectuer le guidage
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# Obtenir la valeur prรฉdite x0 :
# latents_x0 = latents - sigma * noise_pred # Calculer nous-mรชmes
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample # Utilisation du planificateur (Diffuseurs 0.4 et plus)
# calculer l'รฉchantillon bruitรฉ prรฉcรฉdent x_t -> x_t-1
latents = scheduler.step(noise_pred, t, latents).prev_sample
# Vers des images PIL
im_t0 = latents_to_pil(latents_x0)[0]
im_next = latents_to_pil(latents)[0]
# Combinez les deux images et enregistrez-les pour une visualisation ultรฉrieure
im = Image.new('RGB', (1024, 512))
im.paste(im_next, (0, 0))
im.paste(im_t0, (512, 0))
im.save(f'steps/{i:04}.jpeg')
# Rรฉaliser et diffuser la vidรฉo sur l'รฉtat d'avancement (modifier la largeur ร 1024 pour une pleine rรฉsolution)
!ffmpeg -v 1 -y -f image2 -framerate 12 -i steps/%04d.jpeg -c:v libx264 -preset slow -qp 18 -pix_fmt yuv420p out.mp4
mp4 = open('out.mp4','rb').read()
data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
HTML("""
<video width=600 controls>
<source src="%s" type="video/mp4">
</video>
""" % data_url)<jupyter_output><empty_output><jupyter_text>La version de droite montre la "sortie finale" prรฉdite (x0) ร chaque รฉtape, et c'est ce qui est gรฉnรฉralement utilisรฉ pour les vidรฉos de progression, etc. La version de gauche reprรฉsente l'รฉtape suivante. Nous trouvons intรฉressant de comparer les deux, en regardant les vidรฉos de progression, on pourrait penser que des changements radicaux se produisent, en particulier aux premiers stades, mais comme les changements apportรฉs ร chaque รฉtape sont relativement faibles, le processus rรฉel est beaucoup plus progressif. CFG (*Classifier Free Guidance*)Par dรฉfaut, le modรจle ne fait pas souvent ce que nous lui demandons. Si nous voulons qu'il suive mieux le prompt, nous utilisons un hack appelรฉ CFG. Il y a une bonne explication dans cette vidรฉo [video](https://www.youtube.com/watch?v=344w5h24-h8) d'AI Coffee Break with Letitia.Dans le code, cela revient ร faire :`noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)`Cela fonctionne รฉtonnamment bien :) Essayez de changer le guidance_scale dans le code ci-dessus et voyez comment cela affecte les rรฉsultats. Jusqu'oรน pouvez-vous aller avant que les rรฉsultats n'empirent ? รchantillonnageIl y a encore de la complexitรฉ cachรฉe dans `latents = scheduler.step(noise_pred, i, latents)["prev_sample"]`. Comment l'รฉchantillonneur passe-t-il exactement des latents bruyants actuels ร une version lรฉgรจrement moins bruyante ? Pourquoi ne pas utiliser le modรจle en une seule รฉtape ? Existe-t-il d'autres faรงons de voir les choses ?Le modรจle tente de prรฉdire le bruit dans une image. Pour des valeurs de bruit faibles, nous supposons qu'il fait un assez bon travail. Pour des niveaux de bruit plus รฉlevรฉs, la tรขche est ardue ! Ainsi, au lieu de produire une image parfaite, les rรฉsultats ont tendance ร ressembler ร un dรฉsordre flou. Voir le dรฉbut de la vidรฉo citรฉe ร l'instant pour une illustration ! Les รฉchantillonneurs utilisent donc les prรฉdictions du modรจle pour s'en rapprocher lรฉgรจrement (en รฉliminant une partie du bruit), puis obtiennent une autre prรฉdiction basรฉe sur cette entrรฉe marginalement moins mauvaise, en espรฉrant que cela amรฉliorera le rรฉsultat de maniรจre itรฉrative.Les diffรฉrents รฉchantillonneurs procรจdent de diffรฉrentes maniรจres. Vous pouvez essayer d'inspecter le code de l'รฉchantillonneur LMS par dรฉfaut avec :<jupyter_code># ??scheduler.step<jupyter_output><empty_output><jupyter_text>GuidageOk, derniรจre astuce ! Comment pouvons-nous ajouter un contrรดle supplรฉmentaire ร ce processus de gรฉnรฉration ?ร chaque รฉtape, nous allons utiliser notre modรจle comme prรฉcรฉdemment pour prรฉdire la composante bruit de $x$. Ensuite, nous allons l'utiliser pour produire une image de sortie prรฉdite, et appliquer une fonction de perte ร cette image.Cette fonction peut รชtre n'importe quoi, mais nous allons faire une dรฉmonstration avec un exemple trรจs simple. Si nous voulons des images avec beaucoup de bleu, nous pouvons crรฉer une fonction de perte qui donne une perte รฉlevรฉe si les pixels ont une faible composante bleue :<jupyter_code>def blue_loss(images):
# Quelle est la distance entre les valeurs du canal bleu et 0,9 ?
error = torch.abs(images[:,2] - 0.9).mean() # [:,2] -> toutes les images dans le batch, seulement le canal bleu
return error<jupyter_output><empty_output><jupyter_text>Lors de chaque รฉtape de mise ร jour, nous trouvons le gradient de la perte par rapport aux latents bruyants actuels et nous les modifions dans la direction qui rรฉduit cette perte tout en effectuant l'รฉtape de mise ร jour normale :<jupyter_code>prompt = 'A campfire (oil on canvas)' #@param
height = 512 # hauteur par dรฉfaut de Stable Diffusion
width = 512 # largeur par dรฉfaut de Stable Diffusion
num_inference_steps = 50 #@param # Nombre d'รฉtapes de dรฉbruitage
guidance_scale = 8 #@param # รchelle pour un guidage sans classifieur
generator = torch.manual_seed(32) # Gรฉnรฉrateur de graines pour crรฉer le bruit latent initial
batch_size = 1
blue_loss_scale = 200 #@param
# Preparation du texte
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
# Et l'entrรฉe non conditionnelle comme prรฉcรฉdemment :
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Preparation du planificateur
scheduler.set_timesteps(num_inference_steps)
# Preparation des latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Boucle
for i, t in tqdm(enumerate(scheduler.timesteps)):
# รฉtendre les latents si nous procรฉdons ร un guidage sans classifieur afin d'รฉviter de faire deux passages en avant
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# prรฉdire le bruit rรฉsiduel
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# rรฉaliser le CFG
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
#### GUIDAGES SUPPLรMENTAIRES ###
if i%5 == 0:
# Requires_grad sur les latents
latents = latents.detach().requires_grad_()
# Obtenir la valeur prรฉdite x0 :
# latents_x0 = latents - sigma * noise_pred
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
# Dรฉcodage vers l'espace d'image
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
# Calculer la perte
loss = blue_loss(denoised_images) * blue_loss_scale
# Imprimer occasionnellement
if i%10==0:
print(i, 'loss:', loss.item())
# Obtenir le gradient
cond_grad = torch.autograd.grad(loss, latents)[0]
# Modifier les latents en fonction de ce gradient
latents = latents.detach() - cond_grad * sigma**2
# Etape avec le planificateur
latents = scheduler.step(noise_pred, t, latents).prev_sample
latents_to_pil(latents)[0]<jupyter_output><empty_output> | diffusion-models-class/units/fr/unit3/stable_diffusion_deep_dive.ipynb/0 | {
"file_path": "diffusion-models-class/units/fr/unit3/stable_diffusion_deep_dive.ipynb",
"repo_id": "diffusion-models-class",
"token_count": 19297
} | 137 |
<jupyter_start><jupyter_text>Derriรจre le pipeline (TensorFlow) Installez la bibliothรจque ๐ค *Transformers* pour exรฉcuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece]
from transformers import pipeline
classifier = pipeline("sentiment-analysis", model="tblard/tf-allocine")
classifier(
["J'ai attendu un cours d'HuggingFace toute ma vie.",
"Je dรฉteste tellement รงa !"]
)
from transformers import AutoTokenizer
checkpoint = "tblard/tf-allocine"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
raw_inputs = [
"J'ai attendu un cours d'HuggingFace toute ma vie.",
"Je dรฉteste tellement รงa !",
]
inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors="tf")
print(inputs)
from transformers import AutoModel
checkpoint = "tblard/tf-allocine"
model = AutoModel.from_pretrained(checkpoint, from_tf=True)
outputs = model(**inputs)
print(outputs.last_hidden_state.shape)
from transformers import AutoModelForSequenceClassification
checkpoint = "tblard/tf-allocine"
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, from_tf=True)
outputs = model(**inputs)
print(outputs.logits.shape)
print(outputs.logits)
import tensorflow as tf
predictions = tf.math.softmax(outputs.logits, axis=-1)
print(predictions)
model.config.id2label<jupyter_output><empty_output> | notebooks/course/fr/chapter2/section2_tf.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter2/section2_tf.ipynb",
"repo_id": "notebooks",
"token_count": 473
} | 138 |
<jupyter_start><jupyter_text>Utilisation de modรจles prรฉ-entraรฎnรฉs (PyTorch) Installez la bibliothรจque ๐ค Transformers pour exรฉcuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece]
from transformers import pipeline
camembert_fill_mask = pipeline("fill-mask", model="camembert-base")
results = camembert_fill_mask("Le camembert est <mask> :)")
from transformers import CamembertTokenizer, CamembertForMaskedLM
tokenizer = CamembertTokenizer.from_pretrained("camembert-base")
model = CamembertForMaskedLM.from_pretrained("camembert-base")
from transformers import AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("camembert-base")
model = AutoModelForMaskedLM.from_pretrained("camembert-base")<jupyter_output><empty_output> | notebooks/course/fr/chapter4/section2_pt.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter4/section2_pt.ipynb",
"repo_id": "notebooks",
"token_count": 259
} | 139 |
<jupyter_start><jupyter_text>Normalisation et prรฉtokenization. Installez les bibliothรจques ๐ค *Transformers* et ๐ค *Datasets* pour exรฉcuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("camembert-base")
print(type(tokenizer.backend_tokenizer))
print(tokenizer.backend_tokenizer.normalizer.normalize_str("Hรฉllรฒ hรดw are รผ?"))
# Ne semble pas marcher sur le franรงais
tokenizer_fr = AutoTokenizer.from_pretrained("camembert-base")
tokenizer_fr.backend_tokenizer.normalizer.normalize_str("Bรถnjoรนr commรจnt vas tรผ ?")
tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?")
tokenizer = AutoTokenizer.from_pretrained("gpt2")
tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?")
tokenizer = AutoTokenizer.from_pretrained("t5-small")
tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?")<jupyter_output><empty_output> | notebooks/course/fr/chapter6/section4.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter6/section4.ipynb",
"repo_id": "notebooks",
"token_count": 362
} | 140 |
<jupyter_start><jupyter_text>Rรฉponses aux questions (TensorFlow) Installez les bibliothรจques Transformers et Datasets pour exรฉcuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece]
!apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]"
!git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez รฉgalement รชtre connectรฉ au Hub d'Hugging Face. Exรฉcutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login
notebook_login()
from datasets import load_dataset
raw_datasets = load_dataset("piaf")
# piaf n'ayant pas de jeu de donnรฉes de validation, nous en crรฉons un
raw_datasets = raw_datasets['train']
raw_datasets = raw_datasets.train_test_split(test_size=0.2, shuffle=True)
raw_datasets
print("Context: ", raw_datasets["train"][0]["context"])
print("Question: ", raw_datasets["train"][0]["question"])
print("Answer: ", raw_datasets["train"][0]["answers"])
raw_datasets["train"].filter(lambda x: len(x["answers"]["text"]) != 1)
print(raw_datasets["test"][0]["answers"])
print(raw_datasets["test"][2]["answers"])
print(raw_datasets["test"][2]["context"])
print(raw_datasets["test"][2]["question"])
from transformers import AutoTokenizer
model_checkpoint = "camembert-base"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
tokenizer.is_fast
context = raw_datasets["train"][0]["context"]
question = raw_datasets["train"][0]["question"]
inputs = tokenizer(question, context)
tokenizer.decode(inputs["input_ids"])
inputs = tokenizer(
question,
context,
max_length=100,
truncation="only_second",
stride=50,
return_overflowing_tokens=True,
)
for ids in inputs["input_ids"]:
print(tokenizer.decode(ids))
inputs = tokenizer(
question,
context,
max_length=100,
truncation="only_second",
stride=50,
return_overflowing_tokens=True,
return_offsets_mapping=True,
)
inputs.keys()
inputs["overflow_to_sample_mapping"]
inputs = tokenizer(
raw_datasets["train"][2:6]["question"],
raw_datasets["train"][2:6]["context"],
max_length=100,
truncation="only_second",
stride=50,
return_overflowing_tokens=True,
return_offsets_mapping=True,
)
print(f"The 4 examples gave {len(inputs['input_ids'])} features.")
print(f"Here is where each comes from: {inputs['overflow_to_sample_mapping']}.")
answers = raw_datasets["train"][2:6]["answers"]
start_positions = []
end_positions = []
for i, offset in enumerate(inputs["offset_mapping"]):
sample_idx = inputs["overflow_to_sample_mapping"][i]
answer = answers[sample_idx]
start_char = answer["answer_start"][0]
end_char = answer["answer_start"][0] + len(answer["text"][0])
sequence_ids = inputs.sequence_ids(i)
# Trouver le dรฉbut et la fin du contexte
idx = 0
while sequence_ids[idx] != 1:
idx += 1
context_start = idx
while sequence_ids[idx] == 1:
idx += 1
context_end = idx - 1
# Si la rรฉponse n'est pas entiรจrement dans le contexte, l'รฉtiquette est (0, 0)
if offset[context_start][0] > start_char or offset[context_end][1] < end_char:
start_positions.append(0)
end_positions.append(0)
else:
# Sinon, ce sont les positions de dรฉbut et de fin du token
idx = context_start
while idx <= context_end and offset[idx][0] <= start_char:
idx += 1
start_positions.append(idx - 1)
idx = context_end
while idx >= context_start and offset[idx][1] >= end_char:
idx -= 1
end_positions.append(idx + 1)
start_positions, end_positions
idx = 0
sample_idx = inputs["overflow_to_sample_mapping"][idx]
answer = answers[sample_idx]["text"][0]
start = start_positions[idx]
end = end_positions[idx]
labeled_answer = tokenizer.decode(inputs["input_ids"][idx][start : end + 1])
print(f"Theoretical answer: {answer}, labels give: {labeled_answer}")
idx = 4
sample_idx = inputs["overflow_to_sample_mapping"][idx]
answer = answers[sample_idx]["text"][0]
decoded_example = tokenizer.decode(inputs["input_ids"][idx])
print(f"Theoretical answer: {answer}, decoded example: {decoded_example}")
max_length = 384
stride = 128
def preprocess_training_examples(examples):
questions = [q.strip() for q in examples["question"]]
inputs = tokenizer(
questions,
examples["context"],
max_length=max_length,
truncation="only_second",
stride=stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
offset_mapping = inputs.pop("offset_mapping")
sample_map = inputs.pop("overflow_to_sample_mapping")
answers = examples["answers"]
start_positions = []
end_positions = []
for i, offset in enumerate(offset_mapping):
sample_idx = sample_map[i]
answer = answers[sample_idx]
start_char = answer["answer_start"][0]
end_char = answer["answer_start"][0] + len(answer["text"][0])
sequence_ids = inputs.sequence_ids(i)
# Trouver le dรฉbut et la fin du contexte
idx = 0
while sequence_ids[idx] != 1:
idx += 1
context_start = idx
while sequence_ids[idx] == 1:
idx += 1
context_end = idx - 1
# Si la rรฉponse n'est pas entiรจrement dans le contexte, l'รฉtiquette est (0, 0)
if offset[context_start][0] > start_char or offset[context_end][1] < end_char:
start_positions.append(0)
end_positions.append(0)
else:
# Sinon, ce sont les positions de dรฉbut et de fin du token
idx = context_start
while idx <= context_end and offset[idx][0] <= start_char:
idx += 1
start_positions.append(idx - 1)
idx = context_end
while idx >= context_start and offset[idx][1] >= end_char:
idx -= 1
end_positions.append(idx + 1)
inputs["start_positions"] = start_positions
inputs["end_positions"] = end_positions
return inputs
train_dataset = raw_datasets["train"].map(
preprocess_training_examples,
batched=True,
remove_columns=raw_datasets["train"].column_names,
)
len(raw_datasets["train"]), len(train_dataset)
def preprocess_validation_examples(examples):
questions = [q.strip() for q in examples["question"]]
inputs = tokenizer(
questions,
examples["context"],
max_length=max_length,
truncation="only_second",
stride=stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
sample_map = inputs.pop("overflow_to_sample_mapping")
example_ids = []
for i in range(len(inputs["input_ids"])):
sample_idx = sample_map[i]
example_ids.append(examples["id"][sample_idx])
sequence_ids = inputs.sequence_ids(i)
offset = inputs["offset_mapping"][i]
inputs["offset_mapping"][i] = [
o if sequence_ids[k] == 1 else None for k, o in enumerate(offset)
]
inputs["example_id"] = example_ids
return inputs
validation_dataset = raw_datasets["test"].map(
preprocess_validation_examples,
batched=True,
remove_columns=raw_datasets["test"].column_names,
)
len(raw_datasets["test"]), len(validation_dataset)
small_eval_set = raw_datasets["test"].select(range(100))
trained_checkpoint = "etalab-ia/camembert-base-squadFR-fquad-piaf"
tokenizer = AutoTokenizer.from_pretrained(trained_checkpoint)
eval_set = small_eval_set.map(
preprocess_validation_examples,
batched=True,
remove_columns=raw_datasets["test"].column_names,
)
import tensorflow as tf
from transformers import TFAutoModelForQuestionAnswering
eval_set_for_model = eval_set.remove_columns(["example_id", "offset_mapping"])
eval_set_for_model.set_format("numpy")
batch = {k: eval_set_for_model[k] for k in eval_set_for_model.column_names}
trained_model = TFAutoModelForQuestionAnswering.from_pretrained(trained_checkpoint)
outputs = trained_model(**batch)
start_logits = outputs.start_logits.numpy()
end_logits = outputs.end_logits.numpy()
import collections
example_to_features = collections.defaultdict(list)
for idx, feature in enumerate(eval_set):
example_to_features[feature["example_id"]].append(idx)
import numpy as np
n_best = 20
max_answer_length = 30
predicted_answers = []
for example in small_eval_set:
example_id = example["id"]
context = example["context"]
answers = []
for feature_index in example_to_features[example_id]:
start_logit = start_logits[feature_index]
end_logit = end_logits[feature_index]
offsets = eval_set["offset_mapping"][feature_index]
start_indexes = np.argsort(start_logit)[-1 : -n_best - 1 : -1].tolist()
end_indexes = np.argsort(end_logit)[-1 : -n_best - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Ignorez les rรฉponses qui ne sont pas entiรจrement dans le contexte
if offsets[start_index] is None or offsets[end_index] is None:
continue
# Ignorer les rรฉponses dont la longueur est soit < 0 soit > max_answer_length
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
answers.append(
{
"text": context[offsets[start_index][0] : offsets[end_index][1]],
"logit_score": start_logit[start_index] + end_logit[end_index],
}
)
best_answer = max(answers, key=lambda x: x["logit_score"])
predicted_answers.append({"id": example_id, "prediction_text": best_answer["text"]})
from datasets import load_metric
metric = load_metric("squad")
theoretical_answers = [
{"id": ex["id"], "answers": ex["answers"]} for ex in small_eval_set
]
print(predicted_answers[0])
print(theoretical_answers[0])
metric.compute(predictions=predicted_answers, references=theoretical_answers)
from tqdm.auto import tqdm
def compute_metrics(start_logits, end_logits, features, examples):
example_to_features = collections.defaultdict(list)
for idx, feature in enumerate(features):
example_to_features[feature["example_id"]].append(idx)
predicted_answers = []
for example in tqdm(examples):
example_id = example["id"]
context = example["context"]
answers = []
# Parcourir en boucle toutes les fonctionnalitรฉs associรฉes ร cet exemple
for feature_index in example_to_features[example_id]:
start_logit = start_logits[feature_index]
end_logit = end_logits[feature_index]
offsets = features[feature_index]["offset_mapping"]
start_indexes = np.argsort(start_logit)[-1 : -n_best - 1 : -1].tolist()
end_indexes = np.argsort(end_logit)[-1 : -n_best - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Ignorez les rรฉponses qui ne sont pas entiรจrement dans le contexte
if offsets[start_index] is None or offsets[end_index] is None:
continue
# Sauter les rรฉponses dont la longueur est soit < 0, soit > max_answer_length
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
answer = {
"text": context[offsets[start_index][0] : offsets[end_index][1]],
"logit_score": start_logit[start_index] + end_logit[end_index],
}
answers.append(answer)
# Sรฉlectionnez la rรฉponse avec le meilleur score
if len(answers) > 0:
best_answer = max(answers, key=lambda x: x["logit_score"])
predicted_answers.append(
{"id": example_id, "prediction_text": best_answer["text"]}
)
else:
predicted_answers.append({"id": example_id, "prediction_text": ""})
theoretical_answers = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples]
return metric.compute(predictions=predicted_answers, references=theoretical_answers)
compute_metrics(start_logits, end_logits, eval_set, small_eval_set)
model = TFAutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
from transformers import DefaultDataCollator
data_collator = DefaultDataCollator(return_tensors="tf")
tf_train_dataset = model.prepare_tf_dataset(
train_dataset,
collate_fn=data_collator,
shuffle=True,
batch_size=16)
tf_eval_dataset = model.prepare_tf_dataset(
validation_dataset,
collate_fn=data_collator,
shuffle=False,
batch_size=16)
from transformers import create_optimizer
from transformers.keras_callbacks import PushToHubCallback
import tensorflow as tf
# Le nombre d'รฉtapes d'entraรฎnement est le nombre d'รฉchantillons dans le jeu de donnรฉes, divisรฉ par la taille du batch puis multipliรฉ
# par le nombre total d'รฉpoques. Notez que le jeu de donnรฉes tf_train_dataset est ici un batch de donnรฉes tf.data.Dataset,
# pas le jeu de donnรฉes original Hugging Face, donc son len() est dรฉjร num_samples // batch_size.
num_train_epochs = 3
num_train_steps = len(tf_train_dataset) * num_train_epochs
optimizer, schedule = create_optimizer(
init_lr=2e-5,
num_warmup_steps=0,
num_train_steps=num_train_steps,
weight_decay_rate=0.01,
)
model.compile(optimizer=optimizer)
# Entraรฎner en mixed-precision float16
tf.keras.mixed_precision.set_global_policy("mixed_float16")
from transformers.keras_callbacks import PushToHubCallback
callback = PushToHubCallback(output_dir="camembert-base-finetuned-piaf", tokenizer=tokenizer)
# Nous allons faire la validation aprรจs, donc pas de validation au milieu de l'entraรฎnement
model.fit(tf_train_dataset, callbacks=[callback], epochs=num_train_epochs)
predictions = model.predict(tf_eval_dataset)
compute_metrics(
predictions["start_logits"],
predictions["end_logits"],
validation_dataset,
raw_datasets["test"],
)
from transformers import pipeline
# Remplacez par votre propre checkpoint
model_checkpoint = "huggingface-course/camembert-finetuned-piaf"
question_answerer = pipeline("question-answering", model=model_checkpoint)
context = """
๐ค Transformers est soutenu par les trois bibliothรจques d'apprentissage profond les plus populaires - Jax, PyTorch et TensorFlow - avec une intรฉgration transparente entre elles. Il est simple d'entraรฎner vos modรจles avec l'une avant de les charger pour l'infรฉrence avec l'autre.
"""
question = "Quelles sont les bibliothรจques d'apprentissage profond derriรจre ๐ค Transformers ?"
question_answerer(question=question, context=context)<jupyter_output><empty_output> | notebooks/course/fr/chapter7/section7_tf.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter7/section7_tf.ipynb",
"repo_id": "notebooks",
"token_count": 6501
} | 141 |
<jupyter_start><jupyter_text>ver since Stable Diffusion took the world by storm, people have been looking for ways to have more control over the results of the generation process. ControlNet provides a minimal interface allowing users to customize the generation process up to a great extent. With [ControlNet](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet), users can easily condition the generation with different spatial contexts such as a depth map, a segmentation map, a scribble, keypoints, and so on!We can turn a cartoon drawing into a realistic photo with incredible coherence. Realistic Lofi Girl Or even use it as your interior designer. Before After You can turn your sketch scribble into an artistic drawing. Before After Also, make some of the famous logos coming to life. Before After With ControlNet, the sky is the limit ๐ In this notebook, we first introduce the [`StableDiffusionControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet) and then show how it can be applied for various control conditionings. Letโs get controlling! ControlNet: TL;DRControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) by Lvmin Zhang and Maneesh Agrawala. It introduces a framework that allows for supporting various spatial contexts that can serve as additional conditionings to Diffusion models such as Stable Diffusion. Training ControlNet is comprised of the following steps:1. Cloning the pre-trained parameters of a Diffusion model, such as Stable Diffusion's latent UNet, (referred to as โtrainable copyโ) while also maintaining the pre-trained parameters separately (โlocked copyโ). It is done so that the locked parameter copy can preserve the vast knowledge learned from a large dataset, whereas the trainable copy is employed to learn task-specific aspects. 2. The trainable and locked copies of the parameters are connected via โzero convolutionโ layers (see [here](https://github.com/lllyasviel/ControlNetcontrolnet) for more information) which are optimized as a part of the ControlNet framework. This is a training trick to preserve the semantics already learned by frozen model as the new conditions are trained.Pictorially, training a ControlNet looks like so: The diagram is taken from here.A sample from the training set for ControlNet-like training looks like this (additional conditioning is via edge maps): Prompt Original Image Conditioning "bird" Similarly, if we were to condition ControlNet with semantic segmentation maps, a training sample would be like so: Prompt Original Image Conditioning "big house" Every new type of conditioning requires training a new copy of ControlNet weights. The paper proposed 8 different conditioning models that are all [supported](https://huggingface.co/lllyasviel?search=controlnet) in Diffusers! For inference, both the pre-trained diffusion models weights as well as the trained ControlNet weights are needed. For example, using [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) with a ControlNet checkpoint require roughly 700 million more parameters compared to just using the original Stable Diffusion model, which makes ControlNet a bit more memory-expensive for inference.Because the pre-trained diffusion models are looked during training, one only needs to switch out the ControlNet parameters when using a different conditioning. This makes it fairly simple to deploy multiple ControlNet weights in one application as we will see below. The `StableDiffusionControlNetPipeline`Before we begin, we want to give a huge shout-out to the community contributor [Takuma Mori](https://github.com/takuma104) for having led the integration of ControlNet into Diffusers โค๏ธ .To experiment with ControlNet, Diffusers exposes the [`StableDiffusionControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet) similar tothe [other Diffusers pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview). Central to the [`StableDiffusionControlNetPipeline`] is the `controlnet` argument which lets us provide a particular trained [`ControlNetModel`](https://huggingface.co/docs/diffusers/main/en/api/modelsdiffusers.ControlNetModel) instance while keeping the pre-trained diffusion model weights the same.We will explore different use cases with the `StableDiffusionControlNetPipeline` in this blog post. The first ControlNet model we are going to walk through is the [Canny model](https://huggingface.co/runwayml/stable-diffusion-v1-5) - this is one of the most popular models that generated some of the amazing images you are libely seeing on the internet.We welcome you to run the code snippets shown in the sections below with [this Colab Notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb).Before we begin, let's make sure we have all the necessary libraries installed:<jupyter_code>!pip install -q diffusers==0.14.0 transformers xformers git+https://github.com/huggingface/accelerate.git<jupyter_output><empty_output><jupyter_text>To process different conditionings depending on the chosen ControlNet, we also need to install some additional dependencies:- [OpenCV](https://opencv.org/)- [controlnet-aux](https://github.com/patrickvonplaten/controlnet_auxcontrolnet-auxiliary-models) - a simple collection of pre-processing models for ControlNet<jupyter_code>!pip install -q opencv-contrib-python
!pip install -q controlnet_aux<jupyter_output><empty_output><jupyter_text>We will use the famous painting ["Girl With A Pearl"](https://en.wikipedia.org/wiki/Girl_with_a_Pearl_Earring) for this example. So, let's download the image and take a look:<jupyter_code>from diffusers import StableDiffusionControlNetPipeline
from diffusers.utils import load_image
image = load_image(
"https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
)
image<jupyter_output>WARNING:xformers:A matching Triton is not available, some optimizations will not be enabled.
Error caught was: No module named 'triton'<jupyter_text>Next, we will put the image through the canny pre-processor:<jupyter_code>import cv2
from PIL import Image
import numpy as np
image = np.array(image)
low_threshold = 100
high_threshold = 200
image = cv2.Canny(image, low_threshold, high_threshold)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
canny_image = Image.fromarray(image)
canny_image<jupyter_output><empty_output><jupyter_text>As we can see, it is essentially edge detection.Now, we load [runwaylml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) as well as the [ControlNet model for canny edges](https://huggingface.co/lllyasviel/sd-controlnet-canny). The models are loaded in half-precision (`torch.dtype`) to allow for fast and memory-efficient inference.<jupyter_code>from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
import torch
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
)<jupyter_output><empty_output><jupyter_text>Instead of using Stable Diffusion's default [PNDMScheduler](https://huggingface.co/docs/diffusers/main/en/api/schedulers/pndm), we use one of the currently fastest diffusion model schedulers, called [UniPCMultistepScheduler](https://huggingface.co/docs/diffusers/main/en/api/schedulers/unipc).Choosing an improved scheduler can drastically reduce inference time - in our case we are able to reduce the number of inference steps from 50 to 20 while more or less keeping the same image generation quality. More information regarding schedulers can be found [here](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers).<jupyter_code>from diffusers import UniPCMultistepScheduler
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)<jupyter_output><empty_output><jupyter_text>Instead of loading our pipeline directly to GPU, we instead enable smart CPU offloading which can be achieved with the [`enable_model_cpu_offload` function](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnetdiffusers.StableDiffusionControlNetPipeline.enable_model_cpu_offload).Remember that during inference diffusion models, such as Stable Diffusion require not just one but multiple model components that are run sequentially.In the case of Stable Diffusion with ControlNet, we first use the CLIP text encoder, then the diffusion model unet and control net, then the VAE decoder and finally run a safety checker.Most components are only run once during the diffusion process and are thus not required to occupy GPU memory all the time. By enabling smart model offloading, we make sure that each component is only loaded into GPU when it's needed so that we can significantly save memory consumption without significantly slowing down infenence.**Note**: When running `enable_model_cpu_offload`, do not manually move the pipeline to GPU with `.to("cuda")` - once CPU offloading is enabled, the pipeline automatically takes care of GPU memory management.<jupyter_code>pipe.enable_model_cpu_offload()<jupyter_output><empty_output><jupyter_text>Finally, we want to take full advantage of the amazing [FlashAttention/xformers](https://github.com/facebookresearch/xformers) attention layer acceleration, so let's enable this! If this command does not work for you, you might not have `xformers` correctly installed.In this case, you can just skip the following line of code.<jupyter_code>pipe.enable_xformers_memory_efficient_attention()<jupyter_output><empty_output><jupyter_text>Now we are ready to run the ControlNet pipeline!We still provide a prompt to guide the image generation process, just like what we would normally do with a Stable Diffusion image-to-image pipeline. However, ControlNet will allow a lot more control over the generated image because we will be able to control the exact composition in generated image with the canny edge image we just created.It will be fun to see some images where contemporary celebrities posing for this exact same painting from the 17th century. And it's really easy to do that with ControlNet, all we have to do is to include the names of these celebrities in the prompt!<jupyter_code>def image_grid(imgs, rows, cols):
assert len(imgs) == rows * cols
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
prompt = ", best quality, extremely detailed"
prompt = [t + prompt for t in ["Sandra Oh", "Kim Kardashian", "rihanna", "taylor swift"]]
generator = [torch.Generator(device="cpu").manual_seed(2) for i in range(len(prompt))]
output = pipe(
prompt,
canny_image,
negative_prompt=["monochrome, lowres, bad anatomy, worst quality, low quality"] * len(prompt),
generator=generator,
num_inference_steps=20,
)
image_grid(output.images, 2, 2)<jupyter_output><empty_output><jupyter_text>We can effortlessly combine ControlNet combines with fine-tuning too! For example, we can fine-tune a model with [DreamBooth](https://huggingface.co/docs/diffusers/main/en/training/dreambooth), and use it to render ourselves into different scenes.In this post, we are going to use our beloved Mr Potato Head as an example to show how to use ControlNet with DreamBooth.We can use the same ContrlNet, however instead of using the Stable Diffusion 1.5, we are going to load the [Mr Potato Head model](https://huggingface.co/sd-dreambooth-library/mr-potato-head) into our pipeline - Mr Potato Head is a Stable Diffusion model fine-tuned with Mr Potato Head concept using Dreambooth ๐ฅLet's run the above commands again, keeping the same `controlnet` though!<jupyter_code>model_id = "sd-dreambooth-library/mr-potato-head"
pipe = StableDiffusionControlNetPipeline.from_pretrained(
model_id,
controlnet=controlnet,
torch_dtype=torch.float16,
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.enable_xformers_memory_efficient_attention()<jupyter_output><empty_output><jupyter_text>Now let's make Mr Potato posing for [Johannes Vermeer](https://en.wikipedia.org/wiki/Johannes_Vermeer)!<jupyter_code>generator = torch.manual_seed(2)
prompt = "a photo of sks mr potato head, best quality, extremely detailed"
output = pipe(
prompt,
canny_image,
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
generator=generator,
num_inference_steps=20,
)<jupyter_output><empty_output><jupyter_text>It is noticeable that Mr Potato Head is not the best candidate but he tried his best and did a pretty good job in capture some of the essence ๐<jupyter_code>output.images[0]<jupyter_output><empty_output><jupyter_text>It is noticeable that Mr Potato Head is not the best candidate but he tried his best and did a pretty good job in capture some of the essence ๐ Another exclusive application of ControlNet is that we can take a pose from one image and reuse it to generate a different image with the exact same pose. So in this next example, we are going to teach superheroes how to do yoga using [Open Pose ControlNet](https://huggingface.co/lllyasviel/sd-controlnet-openpose)!First, we will need to get some images of people doing yoga:<jupyter_code>urls = "yoga1.jpeg", "yoga2.jpeg", "yoga3.jpeg", "yoga4.jpeg"
imgs = [
load_image("https://hf.co/datasets/YiYiXu/controlnet-testing/resolve/main/" + url)
for url in urls
]
image_grid(imgs, 2, 2)<jupyter_output><empty_output><jupyter_text>Now let's extract yoga poses using the OpenPose pre-processors that are handily available via `controlnet_aux`.<jupyter_code>from controlnet_aux import OpenposeDetector
model = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
poses = [model(img) for img in imgs]
image_grid(poses, 2, 2)<jupyter_output><empty_output><jupyter_text>To use these yoga poses to generate new images, let's create a [Open Pose ControlNet](https://huggingface.co/lllyasviel/sd-controlnet-openpose). We will generate some super-hero images but in the yoga poses shown above. Let's go ๐<jupyter_code>controlnet = ControlNetModel.from_pretrained(
"fusing/stable-diffusion-v1-5-controlnet-openpose", torch_dtype=torch.float16
)
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionControlNetPipeline.from_pretrained(
model_id,
controlnet=controlnet,
torch_dtype=torch.float16,
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.enable_xformers_memory_efficient_attention()<jupyter_output><empty_output><jupyter_text>Now it's yoga time!<jupyter_code>generator = [torch.Generator(device="cpu").manual_seed(2) for i in range(4)]
prompt = "super-hero character, best quality, extremely detailed"
output = pipe(
[prompt] * 4,
poses,
negative_prompt=["monochrome, lowres, bad anatomy, worst quality, low quality"] * 4,
generator=generator,
num_inference_steps=20,
)
image_grid(output.images, 2, 2)<jupyter_output><empty_output> | notebooks/diffusers/controlnet.ipynb/0 | {
"file_path": "notebooks/diffusers/controlnet.ipynb",
"repo_id": "notebooks",
"token_count": 4606
} | 142 |
<jupyter_start><jupyter_text>๐งจ Fast Stable Diffusion in free Colab with JAX / Flax on TPU!๐ค Hugging Face [Diffusers](https://github.com/huggingface/diffusers) supports Flax since version `0.5.1`! This allows for snappy inference on Google TPUs, such as those available in Colab, Kaggle or through Google Cloud Platform.If you want more details about how Stable Diffusion works using JAX please refer to [our blog](https://huggingface.co/blog/stable_diffusion_jax) or [this Colab notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb). Initial Steps<jupyter_code>#@title Install required libraries
!pip install huggingface_hub==0.10.0 gradio
#@title Login to the Hugging Face Hub
#@markdown Make sure you also have read and accept the LICENSE of the [Stable Diffusion model](https://huggingface.co/CompVis/stable-diffusion-v1-4), otherwise you may find an error
from huggingface_hub import notebook_login
!git config --global credential.helper store
notebook_login()<jupyter_output><empty_output><jupyter_text>SetupRun all cells for setting up JAX and the model<jupyter_code>#@title Set up JAX
#@markdown If you see an error, make sure you are using a TPU backend. Select `Runtime` in the menu above, then select the option "Change runtime type" and then select `TPU` under the `Hardware accelerator` setting.
!pip install --upgrade jax jaxlib
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu('tpu_driver_20221011')
!pip install flax diffusers transformers ftfy
jax.devices()
#@title Import required libraries
import numpy as np
import jax
import jax.numpy as jnp
from pathlib import Path
from jax import pmap
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from PIL import Image
from huggingface_hub import notebook_login
from diffusers import FlaxStableDiffusionPipeline
import torch
def image_grid(imgs, rows, cols):
w,h = imgs[0].size
grid = Image.new('RGB', size=(cols*w, rows*h))
for i, img in enumerate(imgs): grid.paste(img, box=(i%cols*w, i//cols*h))
return grid
#@title Load the model
#@markdown It's safe to ignore the warning messages, everything is okay
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16)
p_params = replicate(params)<jupyter_output><empty_output><jupyter_text>Run!<jupyter_code>#@title Set and go!
#@markdown First run takes ~50s as it compiles stuff. Then, it should take around ~8s per prompt!
prompt = "the spirit of a tamagotchi wandering in the city of Vienna" #@param {type:"string"}
num_inference_steps = 50 #@param {type:"integer"}
seed = -1 #@param {type:"integer"}
#@markdown `-1` will set a random seed. You can replace that to any integer for reproducible results
if(seed == -1):
import random
random_int = random.randint(0, 2147483647)
real_seed = random_int
else:
real_seed = seed
prng_seed = jax.random.PRNGKey(real_seed)
prng_seed = jax.random.split(prng_seed, jax.device_count())
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = pipeline.prepare_inputs(prompt)
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, p_params, prng_seed, num_inference_steps, jit=True).images
images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
if(seed == -1):
print(f"Seed used {real_seed}")
image_grid(images_pil, 2, 4)
#@title Easy to use and shareble UI with Gradio
#@markdown Run your demo using a Gradio UI like on this screenshot
#@markdown <img src="https://i.imgur.com/H6MtbI5.png" width="900" />
import gradio as gr
def inference(prompt, seed):
all_images = []
print(seed)
prng_seed = jax.random.PRNGKey(int(seed))
prng_seed = jax.random.split(prng_seed, jax.device_count())
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = pipeline.prepare_inputs(prompt)
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, p_params, prng_seed, num_inference_steps, jit=True).images
images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
return images_pil
import random
random_int = random.randint(0, 2147483647)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="prompt")
seed = gr.Number(label="seed", value=random_int)
run = gr.Button(value="Run")
with gr.Column():
gallery = gr.Gallery(show_label=False).style(grid=[2])
run.click(inference, inputs=[prompt, seed], outputs=gallery)
gr.Examples([["the spirit of a tamagotchi wandering in the city of Vienna", 1,1]], [prompt], gallery, inference, cache_examples=False)
demo.launch(debug=True)<jupyter_output>Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().
Running on public URL: https://28004.gradio.app
This share link expires in 72 hours. For free permanent hosting, check out Spaces: https://huggingface.co/spaces | notebooks/diffusers/stable_diffusion_fast_jax.ipynb/0 | {
"file_path": "notebooks/diffusers/stable_diffusion_fast_jax.ipynb",
"repo_id": "notebooks",
"token_count": 1808
} | 143 |
<jupyter_start><jupyter_text>Before we can browse the rest of the notebook, we need to install the dependencies: this example uses `datasets` and `transformers`. To use TPUs on colab, we need to install `torch_xla` and the last line install `accelerate` from source since we the features we are using are very recent and not released yet.<jupyter_code>! pip install datasets transformers evaluate
! pip install cloud-tpu-client==0.10 torch==2.0.0
! pip install https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-2.0-cp310-cp310-linux_x86_64.whl
! pip install git+https://github.com/huggingface/accelerate<jupyter_output><empty_output><jupyter_text>Here are all the imports we will need for this notebook.<jupyter_code>import torch
from torch.utils.data import DataLoader
from accelerate import Accelerator, DistributedType
from datasets import load_dataset, load_metric
from transformers import (
AdamW,
AutoModelForSequenceClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
set_seed,
)
from tqdm.auto import tqdm
import datasets
import transformers<jupyter_output>WARNING:root:TPU has started up successfully with version pytorch-1.8<jupyter_text>This notebook can run with any model checkpoint on the [model hub](https://huggingface.co/models) that has a version with a classification head. Here we select [`bert-base-cased`](https://huggingface.co/bert-base-cased).<jupyter_code>model_checkpoint = "bert-base-cased"<jupyter_output><empty_output><jupyter_text>The next two sections explain how we load and prepare our data for our model, If you are only interested on seeing how ๐ค Accelerate works, feel free to skip them (but make sure to execute all cells!) Load the data To load the dataset, we use the `load_dataset` function from ๐ค Datasets. It will download and cache it (so the download won't happen if we restart the notebook).<jupyter_code>raw_datasets = load_dataset("glue", "mrpc")<jupyter_output>WARNING:datasets.builder:Reusing dataset glue (/root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)<jupyter_text>The `raw_datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set (with more keys for the mismatched validation and test set in the special case of `mnli`).<jupyter_code>raw_datasets<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>raw_datasets["train"][0]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=10):
assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset)-1)
while pick in picks:
pick = random.randint(0, len(dataset)-1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(raw_datasets["train"])<jupyter_output><empty_output><jupyter_text>Preprocess the data Before we can feed those texts to our model, we need to preprocess them. This is done by a ๐ค Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:we get a tokenizer that corresponds to the model architecture we want to use,we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>By default (unless you pass `use_fast=Fast` to the call above) it will use one of the fast tokenizers (backed by Rust) from the ๐ค Tokenizers library. Those fast tokenizers are available for almost all models, but if you got an error with the previous call, remove that argument.You can directly call this tokenizer on one sentence or a pair of sentences:<jupyter_code>tokenizer("Hello, this one sentence!", "And this sentence goes with it.")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested. We can them write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. We also need all of our samples to have the same length (we will train on TPU and they need fixed shapes so we won't pad to the maximum length of a batch) which is done with `padding=True`. The `max_length` argument is used both for the truncation and padding (short inputs are padded to that length and long inputs are truncated to it).<jupyter_code>def tokenize_function(examples):
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, padding="max_length", max_length=128)
return outputs<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>tokenize_function(raw_datasets['train'][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"])<jupyter_output><empty_output><jupyter_text>Even better, the results are automatically cached by the ๐ค Datasets library to avoid spending time on this step the next time you run your notebook. The ๐ค Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. ๐ค Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently.Lastly, we remove the columns that our model will not use. We also need to rename the `label` column to `labels` as this is what our model will expect.<jupyter_code>tokenized_datasets = tokenized_datasets.rename_column("label", "labels")<jupyter_output><empty_output><jupyter_text>To double-check we only have columns that are accepted as arguments for the model we will instantiate, we can look at them here.<jupyter_code>tokenized_datasets["train"].features<jupyter_output><empty_output><jupyter_text>The model we will be using is a `BertModelForSequenceClassification`. We can check its signature in the [Transformers documentation](https://huggingface.co/transformers/model_doc/bert.htmltransformers.BertForSequenceClassification) and all seems to be right! The last step is to set our datasets in the `"torch"` format, so that each item in it is now a dictionary with tensor values.<jupyter_code>tokenized_datasets.set_format("torch")<jupyter_output><empty_output><jupyter_text>A first look at the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our tasks are about sentence classification, we use the `AutoModelForSequenceClassification` class. Like with the tokenizer, the from_pretrained method will download and cache the model for us. The only thing we have to specify is the number of labels for our problem (which is 2 here):<jupyter_code>from transformers import AutoModelForSequenceClassification
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=2)<jupyter_output>Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias']
- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of BertForSequenceClassification were not initialized from the model checkpoint at b[...]<jupyter_text>The warning is telling us we are throwing away some weights (the vocab_transform and vocab_layer_norm layers) and randomly initializing some other (the pre_classifier and classifier layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do.Note that we will are only creating the model here to look at it and debug problems. We will create the model we will train inside our training function: to train on TPU in colab, we have to create a big training function that will be executed on each code of the TPU. It's fine to do use the datasets defined before (they will be copied to each TPU core) but the model itself will need to be re-instantiated and placed on each device for it to work.Now to get the data we need to define our training and evaluation dataloaders. Again, we only create them here for debugging purposes, they will be re-instantiated in our training function, which is why we define a function that builds them.<jupyter_code>def create_dataloaders(train_batch_size=8, eval_batch_size=32):
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, batch_size=train_batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, batch_size=eval_batch_size
)
return train_dataloader, eval_dataloader<jupyter_output><empty_output><jupyter_text>Let's have a look at our train and evaluation dataloaders to check a batch can go through the model.<jupyter_code>train_dataloader, eval_dataloader = create_dataloaders()<jupyter_output><empty_output><jupyter_text>We just loop through one batch. Since our datasets elements are dictionaries of tensors, it's the same for our batch and we can have a quick look at all the shapes. Note that this cell takes a bit of time to execute since we run a batch of our data through the model on the CPU (if you changed the checkpoint to a bigger model, it might take too much time so comment it out).โ **WARNING: Running this cell will cause training_function to malfunction, as model will be used before notebook_launcher**<jupyter_code>for batch in train_dataloader:
print({k: v.shape for k, v in batch.items()})
outputs = model(**batch)
break<jupyter_output>{'attention_mask': torch.Size([8, 128]), 'input_ids': torch.Size([8, 128]), 'labels': torch.Size([8]), 'token_type_ids': torch.Size([8, 128])}<jupyter_text>The output of our model is a `SequenceClassifierOutput`, with the `loss` (since we provided labels) and `logits` (of shape 8, our batch size, by 2, the number of labels).<jupyter_code>outputs<jupyter_output><empty_output><jupyter_text>The last piece we will need for the model evaluation is the metric. The `datasets` library provides a function `load_metric` that allows us to easily create a `datasets.Metric` object we can use.<jupyter_code>metric = load_metric("glue", "mrpc")<jupyter_output><empty_output><jupyter_text>To use this object on some predictions we call the `compute` methode to get our metric results:<jupyter_code>predictions = outputs.logits.detach().argmax(dim=-1)
metric.compute(predictions=predictions, references=batch["labels"])<jupyter_output><empty_output><jupyter_text>Unsurpringly, our model with its random head does not perform well, which is why we need to fine-tune it! Fine-tuning the model We are now ready to fine-tune this model on our dataset. As mentioned before, everything related to training needs to be in one big training function that will be executed on each TPU core, thanks to our `notebook_launcher`.It will use this dictionary of hyperparameters, so tweak anything you like in here!<jupyter_code>hyperparameters = {
"learning_rate": 2e-5,
"num_epochs": 3,
"train_batch_size": 8, # Actual batch size will this x 8
"eval_batch_size": 32, # Actual batch size will this x 8
"seed": 42,
}<jupyter_output><empty_output><jupyter_text>The two most important things to remember for training on TPUs is that your accelerator object has to be defined inside your training function, and your model should be created outside the training function. If you define your Accelerator in another cell that gets executed before the final launch (for debugging), you will need to restart your notebook as the line `accelerator = Accelerator()` needs to be executed for the first time inside the training function spwaned on each TPU core.This is because that line will look for a TPU device, and if you set it outside of the distributed training launched by `notebook_launcher`, it will perform setup that cannot be undone in your runtime and you will only have access to one TPU core until you restart the notebook.The reason we declare the model outside the loop is because on a TPU when launched from a notebook the same singular model object is used, and it is passed back and forth between all the cores automatically. Since we can't explore each piece in separate cells, comments have been left in the code. This is all pretty standard and you will notice how little the code changes from a regular training loop! The main lines added are:- `accelerator = Accelerator()` to initalize the distributed setup,- sending all objects to `accelerator.prepare`,- replace `loss.backward()` with `accelerator.backward(loss)`,- use `accelerator.gather` to gather all predictions and labels before storing them in our list of predictions/labels,- truncate predictions and labels as the prepared evaluation dataloader has a few more samples to make batches of the same size on each process.The first three are for distributed training, the last two for distributed evaluation. If you don't care about distributed evaluation, you can also just replace that part by your standard evaluation loop launched on the main process only.Other changes (which are purely cosmetic to make the output of the training readable) are:- some logging behavior behind a `if accelerator.is_main_process:`,- disable the progress bar if `accelerator.is_main_process` is `False`,- use `accelerator.print` instead of `print`.<jupyter_code>def training_function(model):
# Initialize accelerator
accelerator = Accelerator()
# To have only one message (and not 8) per logs of Transformers or Datasets, we set the logging verbosity
# to INFO for the main process only.
if accelerator.is_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
train_dataloader, eval_dataloader = create_dataloaders(
train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"]
)
# The seed need to be set before we instantiate the model, as it will determine the random head.
set_seed(hyperparameters["seed"])
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=hyperparameters["learning_rate"])
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
num_epochs = hyperparameters["num_epochs"]
# Instantiate learning rate scheduler after preparing the training dataloader as the prepare method
# may change its length.
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=len(train_dataloader) * num_epochs,
)
# Instantiate a progress bar to keep track of training. Note that we only enable it on the main
# process to avoid having 8 progress bars.
progress_bar = tqdm(range(num_epochs * len(train_dataloader)), disable=not accelerator.is_main_process)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
model.eval()
all_predictions = []
all_labels = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
# We gather predictions and labels from the 8 TPUs to have them all.
all_predictions.append(accelerator.gather(predictions))
all_labels.append(accelerator.gather(batch["labels"]))
# Concatenate all predictions and labels.
# The last thing we need to do is to truncate the predictions and labels we concatenated
# together as the prepared evaluation dataloader has a little bit more elements to make
# batches of the same size on each process.
all_predictions = torch.cat(all_predictions)[:len(tokenized_datasets["validation"])]
all_labels = torch.cat(all_labels)[:len(tokenized_datasets["validation"])]
eval_metric = metric.compute(predictions=all_predictions, references=all_labels)
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)<jupyter_output><empty_output><jupyter_text>And we're ready for launch! It's super easy with the `notebook_launcher` from the Accelerate library.<jupyter_code>from accelerate import notebook_launcher
notebook_launcher(training_function, (model,))<jupyter_output>loading configuration file https://huggingface.co/bert-base-cased/resolve/main/config.json from cache at /root/.cache/huggingface/transformers/a803e0468a8fe090683bdc453f4fac622804f49de86d7cecaee92365d4a0f829.a64a22196690e0e82ead56f388a3ef3a50de93335926ccfa20610217db589307
Model config BertConfig {
"architectures": [
"BertForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"transformers_version": "4.5.1",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 28996
}
loading weights file https://huggingface.co/bert-base-cased/resolve/main/pytorch_model.bin from cache at /root/.cache/huggingface/transfo[...] | notebooks/examples/accelerate_examples/simple_nlp_example.ipynb/0 | {
"file_path": "notebooks/examples/accelerate_examples/simple_nlp_example.ipynb",
"repo_id": "notebooks",
"token_count": 6454
} | 144 |
<jupyter_start><jupyter_text>Fine-tune Pix2Struct using Hugging Face `transformers` and `datasets` ๐คThis tutorial is largely based from the [GiT tutorial](https://colab.research.google.com/drive/1HLxgrG7xZJ9FvXckNG61J72FkyrbqKAA?usp=sharing) on how to fine-tune GiT on a custom image captioning dataset. Here we will use a dummy dataset of [football players](https://huggingface.co/datasets/ybelkada/football-dataset) โฝ that is uploaded on the Hub. The images have been manually selected together with the captions. Check the ๐ค [documentation](https://huggingface.co/docs/datasets/image_dataset) on how to create and upload your own image-text dataset. Model overviewIn this tutorial, we will load an architecture called Pix2Struct recently released by Google and made them available on ๐ค Hub! This architecture differs from other models from its pretraining procedure and the way the model extract patches from the image by using the aspect-ratio preserving patch extraction method.The release came with no more than 20 checkpoints! As each checkpoint has been finetuned on specific domain, let's finetune our own Pix2Struct to our target domain: Football players! For that we will use the [`google/pix2struct-base`](https://huggingface.co/ybelkada/pix2struct-base) which corresponds to a general usecase model that you can load to fine-tune your model. Set-up environment Run the cells below to setup the environment<jupyter_code>!pip install -q git+https://github.com/huggingface/transformers.git
!pip install -q datasets<jupyter_output>[2K [90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ[0m [32m469.0/469.0 KB[0m [31m20.1 MB/s[0m eta [36m0:00:00[0m
[2K [90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ[0m [32m1.0/1.0 MB[0m [31m63.8 MB/s[0m eta [36m0:00:00[0m
[2K [90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ[0m [32m132.9/132.9 KB[0m [31m17.4 MB/s[0m eta [36m0:00:00[0m
[2K [90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ[0m [32m212.2/212.2 KB[0m [31m23.2 MB/s[0m eta [36m0:00:00[0m
[2K [90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ[0m [32m110.5/110.5 KB[0m [31m12.9 MB/s[0m eta [36m0:00:00[0m
[2K [90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ[0m [32m264.6/264.6 KB[0m [31m24.1 MB/s[0m eta [36m0:00:00[0m
[2K [90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ[0m [32m199.2/199.2 KB[0m [31m18.9 MB/s[0m eta [36m0:00:00[0m
[2K [90mโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ[0m [32m114.2/114.2 KB[0m [31m14.9 MB/s[0m eta [36m0:00:00[0m
[2K [90mโโโโโโโโโโโโโโโโโโโโ[...]<jupyter_text>Load the image captioning datasetLet's load the image captioning dataset, you just need few lines of code for that. The dataset only consists of 6 images that we have manually labeled for the sake of the tutorial.<jupyter_code>from datasets import load_dataset
dataset = load_dataset("ybelkada/football-dataset", split="train")<jupyter_output><empty_output><jupyter_text>Let's retrieve the caption of the first example:<jupyter_code>dataset[0]["text"]<jupyter_output><empty_output><jupyter_text>And the corresponding image<jupyter_code>dataset[0]["image"]<jupyter_output><empty_output><jupyter_text>Create PyTorch Dataset Understanding `max_patches` argumentThe paper introduces a new paradigm for processing the input image. It takes the image and create `n_patches` aspect-ratio preserving patches, and concatenates the remaining sequence with padding tokens to finally get `max_patches` patches. It appears that this argument is quite crucial for training and evaluation, as the model becomes very sensitive to this parameter.For the sake of our example, we will fine-tune a model with `max_patches=1024`.Note that most of the `-base` models have been fine-tuned with `max_patches=2048`, and `4096` for `-large` models.<jupyter_code>from torch.utils.data import Dataset, DataLoader
MAX_PATCHES = 1024
class ImageCaptioningDataset(Dataset):
def __init__(self, dataset, processor):
self.dataset = dataset
self.processor = processor
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
item = self.dataset[idx]
encoding = self.processor(images=item["image"], return_tensors="pt", add_special_tokens=True, max_patches=MAX_PATCHES)
encoding = {k:v.squeeze() for k,v in encoding.items()}
encoding["text"] = item["text"]
return encoding<jupyter_output><empty_output><jupyter_text>Load model and processor<jupyter_code>from transformers import AutoProcessor, Pix2StructForConditionalGeneration
processor = AutoProcessor.from_pretrained("ybelkada/pix2struct-base")
model = Pix2StructForConditionalGeneration.from_pretrained("ybelkada/pix2struct-base")<jupyter_output><empty_output><jupyter_text>Now that we have loaded the processor, let's load the dataset and the dataloader:<jupyter_code>def collator(batch):
new_batch = {"flattened_patches":[], "attention_mask":[]}
texts = [item["text"] for item in batch]
text_inputs = processor(text=texts, padding="max_length", return_tensors="pt", add_special_tokens=True, max_length=20)
new_batch["labels"] = text_inputs.input_ids
for item in batch:
new_batch["flattened_patches"].append(item["flattened_patches"])
new_batch["attention_mask"].append(item["attention_mask"])
new_batch["flattened_patches"] = torch.stack(new_batch["flattened_patches"])
new_batch["attention_mask"] = torch.stack(new_batch["attention_mask"])
return new_batch
train_dataset = ImageCaptioningDataset(dataset, processor)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=2, collate_fn=collator)<jupyter_output><empty_output><jupyter_text>Train the model Let's train the model! Run the simply the cell below for training the model. We have observed that finding the best hyper-parameters was quite challenging and required a lot of trials and errors, as the model can easily enter in "collapse-model" (always predicting the same output, no matter the input) if the HP are not chosen correctly. In this example, we found out that using `AdamW` optimizer with `lr=1e-5` seemed to be the best approach.Let's also print the generation output of the model each 20 epochs!Bear in mind that the model took some time to converge, for instance to get decent results we had to let the script run for ~1hour.<jupyter_code>import torch
EPOCHS = 5000
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
model.train()
for epoch in range(EPOCHS):
print("Epoch:", epoch)
for idx, batch in enumerate(train_dataloader):
labels = batch.pop("labels").to(device)
flattened_patches = batch.pop("flattened_patches").to(device)
attention_mask = batch.pop("attention_mask").to(device)
outputs = model(flattened_patches=flattened_patches,
attention_mask=attention_mask,
labels=labels)
loss = outputs.loss
print("Loss:", loss.item())
loss.backward()
optimizer.step()
optimizer.zero_grad()
if (epoch + 1) % 20 == 0:
model.eval()
predictions = model.generate(flattened_patches=flattened_patches, attention_mask=attention_mask)
print("Predictions:", processor.batch_decode(predictions, skip_special_tokens=True))
model.train()<jupyter_output><empty_output><jupyter_text>Inference Let's check the results on our train dataset<jupyter_code># load image
example = dataset[0]
image = example["image"]
image
# prepare image for the model
model.eval()
inputs = processor(images=image, return_tensors="pt", max_patches=512).to(device)
flattened_patches = inputs.flattened_patches
attention_mask = inputs.attention_mask
generated_ids = model.generate(flattened_patches=flattened_patches, attention_mask=attention_mask, max_length=50)
generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(generated_caption)<jupyter_output><empty_output><jupyter_text>Load from the Hub Once trained you can push the model and processor on the Hub to use them later. Meanwhile you can play with the model that we have fine-tuned!<jupyter_code>import torch
from transformers import Pix2StructForConditionalGeneration, AutoProcessor
device = "cuda" if torch.cuda.is_available() else "cpu"
model = Pix2StructForConditionalGeneration.from_pretrained("ybelkada/pix2struct-base-football").to(device)
processor = AutoProcessor.from_pretrained("ybelkada/pix2struct-base-football")<jupyter_output><empty_output><jupyter_text>Let's check the results on our train dataset!<jupyter_code>from matplotlib import pyplot as plt
fig = plt.figure(figsize=(18, 14))
# prepare image for the model
for i, example in enumerate(dataset):
image = example["image"]
inputs = processor(images=image, return_tensors="pt", max_patches=1024).to(device)
flattened_patches = inputs.flattened_patches
attention_mask = inputs.attention_mask
generated_ids = model.generate(flattened_patches=flattened_patches, attention_mask=attention_mask, max_length=50)
generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
fig.add_subplot(2, 3, i+1)
plt.imshow(image)
plt.axis("off")
plt.title(f"Generated caption: {generated_caption}")<jupyter_output>A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.
A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.
A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.
A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.
A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer.
A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='le[...] | notebooks/examples/image_captioning_pix2struct.ipynb/0 | {
"file_path": "notebooks/examples/image_captioning_pix2struct.ipynb",
"repo_id": "notebooks",
"token_count": 3848
} | 145 |
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install ๐ค Transformers and ๐ค Datasets. Uncomment the following cell and run it.<jupyter_code>#! pip install transformers datasets huggingface_hub<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your token:<jupyter_code>from huggingface_hub import notebook_login
notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS and setup Git if you haven't already. On Linux, uncomment the following instructions and adapt with your name and email. On Windows, please download git-lfs at https://git-lfs.github.com/<jupyter_code># !apt install git-lfs
# !git config --global user.email "[email protected]"
# !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.16.0 since some of the functionality we use was introduced in that version:<jupyter_code>import transformers
print(transformers.__version__)<jupyter_output>4.16.0.dev0<jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry
send_example_telemetry("language_modeling_from_scratch_notebook", framework="tensorflow")<jupyter_output><empty_output><jupyter_text>Train a language model In this notebook, we'll see how to train a [๐ค Transformers](https://github.com/huggingface/transformers) model on a language modeling task. We will cover two types of language modeling tasks which are:- Causal language modeling: the model has to predict the next token in the sentence (so the labels are the same as the inputs shifted to the right). To make sure the model does not cheat, its attention computations are masked so that tokens cannot attend to tokens to their right, as this would result in label leakage.- Masked language modeling: the model has to predict some tokens that are masked in the input. It still has access to the whole sentence, so it can use the tokens before and after the tokens masked to predict their value.We will see how to easily load and preprocess the dataset for each one of those tasks, and how to use the `Trainer` API to train a model on it.This notebooks assumes you have trained a tokenizer on the corpus you are using, see the [How to train a tokenizer](https://github.com/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb) notebook ([open in colab](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb)).A script version of this notebook you can directly run on a distributed environment or on TPU is available in our [examples folder](https://github.com/huggingface/transformers/tree/master/examples). Preparing the dataset For each of those tasks, we will use the [Wikitext 2]() dataset as an example. You can load it very easily with the ๐ค Datasets library.<jupyter_code>from datasets import load_dataset
datasets = load_dataset("wikitext", "wikitext-2-raw-v1")<jupyter_output>Reusing dataset wikitext (/home/matt/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/a241db52902eaf2c6aa732210bead40c090019a499ceb13bcbfa3f8ab646a126)<jupyter_text>You can replace the dataset above with any dataset hosted on [the hub](https://huggingface.co/datasets) or use your own files. Just uncomment the following cell and replace the paths with your own input files:<jupyter_code># datasets = load_dataset("text", data_files={"train": path_to_train.txt, "validation": path_to_validation.txt}<jupyter_output><empty_output><jupyter_text>You can also load datasets from a csv or a JSON file, see the [full documentation](https://huggingface.co/docs/datasets/loading_datasets.htmlfrom-local-files) for more information. To access an actual element, you need to select a split first, then give an index:<jupyter_code>datasets["train"][10]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>from datasets import ClassLabel
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=10):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(datasets["train"])<jupyter_output><empty_output><jupyter_text>As we can see, some of the texts are a full paragraph of a Wikipedia article while others are just titles or empty lines. Causal Language modeling For causal language modeling (CLM) we are going to take all the texts in our dataset, tokenize them and concatenate them. Then we will split them into examples of a fixed sequence length. This way the model will receive chunks of contiguous text that may look like:```part of text 1```or ```end of text 1 [BOS_TOKEN] beginning of text 2```depending on whether they span multiple original texts or not. The labels will be the same as the inputs, shifted to the right.We will use the [`gpt2`](https://huggingface.co/gpt2) architecture for this example. You can pick any of the checkpoints listed [here](https://huggingface.co/models?filter=causal-lm) instead. For the tokenizer, you can optionally replace the checkpoint with one that you trained yourself.<jupyter_code>model_checkpoint = "gpt2"
tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer"
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)<jupyter_output><empty_output><jupyter_text>We can now call the tokenizer on all our texts. This is very simple, using the [`map`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Dataset.map) method from the Datasets library. First we define a function that calls the tokenizer on our texts:<jupyter_code>def tokenize_function(examples):
return tokenizer(examples["text"])<jupyter_output><empty_output><jupyter_text>Then we apply it to all the splits in our `datasets` object, using `batched=True` and 4 processes to speed up the preprocessing. We won't need the `text` column afterward, so we discard it.<jupyter_code>tokenized_datasets = datasets.map(
tokenize_function, batched=True, num_proc=4, remove_columns=["text"]
)<jupyter_output><empty_output><jupyter_text>If we now look at an element of our datasets, we will see the text have been replaced by the `input_ids` the model will need:<jupyter_code>tokenized_datasets["train"][1]<jupyter_output><empty_output><jupyter_text>Now for the harder part: We need to concatenate all our texts together, and then split the result into chunks of a fixed size, which we will call `block_size`. To do this, we will use the `map` method again, with the option `batched=True`. When we use `batched=True`, the function we pass to `map()` will be passed multiple inputs at once, allowing us to group them into more or fewer examples than we had in the input. This allows us to create our new fixed-length samples.We can use any `block_size`, but high values might be too big to fit in your GPU RAM, so let's use something a bit smaller: 128.<jupyter_code># block_size = tokenizer.model_max_length
block_size = 128<jupyter_output><empty_output><jupyter_text>Then we write the preprocessing function that will group our texts:<jupyter_code>def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result<jupyter_output><empty_output><jupyter_text>Note that we duplicate the inputs for our labels, without shifting them, even though we told you the labels need to be shifted! This is because CausalLM models in the ๐ค Transformers library automatically apply right-shifting to the inputs, so we don't need to do it manually.Also note that by default, the `map` method will send a batch of 1,000 examples to be treated by the preprocessing function. So here, we will drop the remainder to make the concatenated tokenized texts a multiple of `block_size` every 1,000 examples. You can adjust this behavior by passing a higher batch size (which will also be processed slower). You can also speed-up the preprocessing by using multiprocessing:<jupyter_code>lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=1000,
num_proc=4,
)<jupyter_output><empty_output><jupyter_text>And we can check our datasets have changed: now the samples contain chunks of `block_size` contiguous tokens, potentially spanning several of our original texts.<jupyter_code>tokenizer.decode(lm_datasets["train"][1]["input_ids"])<jupyter_output><empty_output><jupyter_text>Now that the data has been cleaned, we're ready to initialize our `Model`. First we create the model using the same config as our checkpoint, but initialized with random weights:<jupyter_code>from transformers import AutoConfig, TFAutoModelForCausalLM
config = AutoConfig.from_pretrained(model_checkpoint)
model = TFAutoModelForCausalLM.from_config(config)<jupyter_output>2022-01-28 14:15:22.987842: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-28 14:15:22.992329: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-28 14:15:22.993015: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-28 14:15:22.994216: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags[...]<jupyter_text>Now let's set some hyperparameters like the learning rate and weight decay, as well as the model ID, if we want to upload our model to the Hub afterwards.<jupyter_code>learning_rate = 2e-5
weight_decay = 0.01
push_to_hub_model_id = f"{model_checkpoint}-wikitext2"<jupyter_output><empty_output><jupyter_text>Now we initialize our optimizer.<jupyter_code>from transformers import AdamWeightDecay
optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)<jupyter_output><empty_output><jupyter_text>Next, we compile our model. Note that most Transformers models compute loss internally, so we actually don't have to specify anything for that argument! You can of course set your own loss function if you want, but by default our models will choose the 'obvious' loss that matches their task, such as cross-entropy in the case of language modelling. The built-in loss will also correctly handle things like masking the loss on padding tokens, or unlabelled tokens in the case of masked language modelling, so we recommend using it unless you're an advanced user!We also use the `jit_compile` argument to compile the model with [XLA](https://www.tensorflow.org/xla). XLA compilation adds a delay at the start of training, but this is quickly repaid by faster training iterations after that. It has one downside, though - if the shape of your input changes at all, then it will need to rerun the compilation again! This isn't a problem for us in this notebook, because all of our examples are exactly the same length. Be careful with it when that isn't true, though - if you have a variable sequence length in your batches, then you might spend more time compiling your model than actually training, especially for small datasets!If you encounter difficulties when training with XLA, it's a good idea to remove the `jit_compile` argument and see if that fixes things. In fact, when debugging, it can be helpful to skip graph compilation entirely with the `run_eagerly=True` argument to [`compile()`](https://www.tensorflow.org/api_docs/python/tf/keras/Modelcompile). This will let you identify the exact line of code where problems arise, but it will significantly reduce your performance, so make sure to remove it again when you've fixed the problem!<jupyter_code>import tensorflow as tf
model.compile(optimizer=optimizer, jit_compile=True)<jupyter_output><empty_output><jupyter_text>Next, we convert our datasets to `tf.data.Dataset`, which Keras understands natively. There are two ways to do this - we can use the slightly more low-level [`Dataset.to_tf_dataset()`](https://huggingface.co/docs/datasets/package_reference/main_classesdatasets.Dataset.to_tf_dataset) method, or we can use [`Model.prepare_tf_dataset()`](https://huggingface.co/docs/transformers/main_classes/modeltransformers.TFPreTrainedModel.prepare_tf_dataset). The main difference between these two is that the `Model` method can inspect the model to determine which column names it can use as input, which means you don't need to specify them yourself. It also supplies a data collator by default which is appropriate for most tasks.<jupyter_code>train_set = model.prepare_tf_dataset(
lm_datasets["train"],
shuffle=True,
batch_size=16,
)
validation_set = model.prepare_tf_dataset(
lm_datasets["validation"],
shuffle=False,
batch_size=16,
)<jupyter_output><empty_output><jupyter_text>Now we can train our model. We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! Make sure to change the `username` if you do. If you don't want to do this, simply remove the callbacks argument in the call to `fit()`.<jupyter_code>from transformers.keras_callbacks import PushToHubCallback
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-wikitext2"
callback = PushToHubCallback(
output_dir="./clm_from_scratch_model_save",
tokenizer=tokenizer,
hub_model_id=push_to_hub_model_id,
)
model.fit(train_set, validation_data=validation_set, epochs=2, callbacks=[callback])<jupyter_output>/home/matt/PycharmProjects/notebooks/examples/clm_from_scratch_model_save is already a clone of https://huggingface.co/Rocketknight1/gpt2-finetuned-wikitext2. Make sure you pull the latest changes with `repo.git_pull()`.<jupyter_text>Once the training is completed, we can evaluate our model and get its loss on the validation set like this:<jupyter_code>eval_loss = model.evaluate(validation_set)<jupyter_output>121/121 [==============================] - 6s 51ms/step - loss: 6.3490<jupyter_text>The quality of language models is often measured in 'perplexity' rather than cross-entropy. To convert to perplexity, we simply raise e to the power of the cross-entropy loss.<jupyter_code>import math
print(f"Perplexity: {math.exp(eval_loss):.2f}")<jupyter_output>Perplexity: 571.92<jupyter_text>The perplexity is still quite high since for this demo we trained on a small dataset for a small number of epochs. For a real LM training, you would need a larger dataset and more epochs. If you used the callback above, you can now share this model with all your friends, family or favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:```pythonfrom transformers import TFAutoModelForCausalLMmodel = TFAutoModelForCausalLM.from_pretrained("your-username/my-awesome-model")``` Inference Models trained from scratch on small amounts of data will generally not output useful text - you'll need a much bigger dataset and a much longer training time before it starts writing text that you'd want to read! If you want to see an example of inference with causal language models, see the `language_modeling-tf` notebook, where we start with a pre-trained model and get higher-quality output much sooner as a result. Masked language modeling For masked language modeling (MLM) we are going to use the same preprocessing as before for our dataset with one additional step: we will randomly mask some tokens (by replacing them by `[MASK]`) and the labels will be adjusted to only include the masked tokens (we don't have to predict the non-masked tokens). If you use a tokenizer you trained yourself, make sure the `[MASK]` token is among the special tokens you passed during training!We will use the [`bert-base-cased`](https://huggingface.co/bert-based-cased) model for this example. You can pick any of the checkpoints listed [here](https://huggingface.co/models?filter=masked-lm) instead. For the tokenizer, replace the checkpoint by the one you trained.<jupyter_code>model_checkpoint = "bert-base-cased"<jupyter_output><empty_output><jupyter_text>We can apply the same tokenization function as before, we just need to update our tokenizer to use the checkpoint we just picked:<jupyter_code>tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
tokenized_datasets = datasets.map(
tokenize_function, batched=True, num_proc=4, remove_columns=["text"]
)<jupyter_output>Token indices sequence length is longer than the specified maximum sequence length for this model (571 > 512). Running this sequence through the model will result in indexing errors
Token indices sequence length is longer than the specified maximum sequence length for this model (554 > 512). Running this sequence through the model will result in indexing errors
Token indices sequence length is longer than the specified maximum sequence length for this model (522 > 512). Running this sequence through the model will result in indexing errors
Token indices sequence length is longer than the specified maximum sequence length for this model (657 > 512). Running this sequence through the model will result in indexing errors
Token indices sequence length is longer than the specified maximum sequence length for this model (514 > 512). Running this sequence through the model will result in indexing errors<jupyter_text>And like before, we group texts together and chunk them in samples of length `block_size`. You can skip that step if your dataset is composed of individual sentences.<jupyter_code>lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
batch_size=1000,
num_proc=4,
)<jupyter_output><empty_output><jupyter_text>The rest is very similar to what we used before, with two exceptions. First we use a model suitable for masked LM:<jupyter_code>from transformers import AutoConfig, TFAutoModelForMaskedLM
config = AutoConfig.from_pretrained(model_checkpoint)
model = TFAutoModelForMaskedLM.from_config(config)<jupyter_output><empty_output><jupyter_text>We redefine our hyperparameters and choose a new name:<jupyter_code>learning_rate = 2e-5
weight_decay = 0.01
push_to_hub_model_id = f"{model_checkpoint}-wikitext2"<jupyter_output><empty_output><jupyter_text>Now we initialize our optimizer.<jupyter_code>from transformers import AdamWeightDecay
optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)<jupyter_output><empty_output><jupyter_text>And as before, we leave the `loss` argument blank to use the internal loss, and use `jit_compile` to enable XLA.<jupyter_code>import tensorflow as tf
model.compile(optimizer=optimizer, jit_compile=True)<jupyter_output>No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! Please ensure your labels are passed as keys in the input dict so that they are accessible to the model during the forward pass. To disable this behaviour, please pass a loss argument, or explicitly pass loss=None if you do not want your model to compute a loss.<jupyter_text>Finally, we use a special `data_collator`. The `data_collator` is a function that is responsible for taking the samples and batching them in tensors. In the previous example, we had nothing special to do, so we just used the default for this argument. Here we want to randomly mask tokens. We could do it as a pre-processing step (like the tokenization) but then the tokens would always be masked the same way at each epoch. By doing this step inside the `data_collator`, we ensure this random masking is done in a new way each time we go over the data.To do this masking for us, the library provides a `DataCollatorForLanguageModeling`. We can adjust the probability of the masking. Note that our data collators are designed to work for multiple frameworks, so ensure you set the `return_tensors='np'` argument to get NumPy arrays out - you don't want to accidentally get a load of `torch.Tensor` objects in the middle of your nice TF code! You could also use `return_tensors='tf'` to get TensorFlow tensors, but our TF dataset pipeline actually uses a NumPy loader internally, which is wrapped at the end with a `tf.data.Dataset`. As a result, `np` is usually more reliable and performant when you're using it!<jupyter_code>from transformers import DataCollatorForLanguageModeling
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm_probability=0.15, return_tensors="np"
)<jupyter_output><empty_output><jupyter_text>Now we pass our data collator to the `prepare_tf_dataset()` argument.<jupyter_code>train_set = model.prepare_tf_dataset(
lm_datasets["train"],
shuffle=True,
batch_size=16,
collate_fn=data_collator,
)
validation_set = model.prepare_tf_dataset(
lm_datasets["validation"],
shuffle=False,
batch_size=16,
collate_fn=data_collator,
)<jupyter_output><empty_output><jupyter_text>And now we can train our model:<jupyter_code>from transformers.keras_callbacks import PushToHubCallback
from tensorflow.keras.callbacks import TensorBoard
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-wikitext2"
tensorboard_callback = TensorBoard(log_dir="./mlm_from_scratch_model_save/logs")
push_to_hub_callback = PushToHubCallback(
output_dir="./mlm_from_scratch_model_save",
tokenizer=tokenizer,
hub_model_id=push_to_hub_model_id,
)
callbacks = [tensorboard_callback, push_to_hub_callback]
model.fit(train_set, validation_data=validation_set, epochs=2, callbacks=callbacks)<jupyter_output>/home/matt/PycharmProjects/notebooks/examples/mlm_from_scratch_model_save is already a clone of https://huggingface.co/Rocketknight1/bert-base-cased-finetuned-wikitext2. Make sure you pull the latest changes with `repo.git_pull()`.<jupyter_text>Like before, we can evaluate our model on the validation set. As training progresses, the perplexity will be much lower for MLM than for the CLM objective because for the MLM objective, we only have to make predictions for the masked tokens (which represent 15% of the total here) while having access to the rest of the tokens. It's thus an easier task for the model.<jupyter_code>eval_loss = model.evaluate(validation_set)
print(f"Perplexity: {math.exp(eval_loss):.2f}")<jupyter_output>126/126 [==============================] - 6s 44ms/step - loss: 6.2834
Perplexity: 535.62 | notebooks/examples/language_modeling_from_scratch-tf.ipynb/0 | {
"file_path": "notebooks/examples/language_modeling_from_scratch-tf.ipynb",
"repo_id": "notebooks",
"token_count": 7396
} | 146 |
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install ๐ค Transformers and ๐ค Datasets. Uncomment the following cell and run it.<jupyter_code>#! pip install datasets transformers<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login
notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code># !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.11.0 since the functionality was introduced in that version:<jupyter_code>import transformers
print(transformers.__version__)<jupyter_output><empty_output><jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/question-answering). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry
send_example_telemetry("question_answering_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a question-answering task In this notebook, we will see how to fine-tune one of the [๐ค Transformers](https://github.com/huggingface/transformers) model to a question answering task, which is the task of extracting the answer to a question from a given context. We will see how to easily load a dataset for these kinds of tasks and use the `Trainer` API to fine-tune a model on it.**Note:** This notebook finetunes models that answer question by taking a substring of a context, not by generating new text. This notebook is built to run on any question answering task with the same format as SQUAD (version 1 or 2), with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a token classification head and a fast tokenizer (check on [this table](https://huggingface.co/transformers/index.htmlbigtable) if this is the case). It might just need some small adjustments if you decide to use a different dataset than the one used here. Depending on you model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those three parameters, then the rest of the notebook should run smoothly:<jupyter_code># This flag is the difference between SQUAD v1 or 2 (if you're using another dataset, it indicates if impossible
# answers are allowed or not).
squad_v2 = False
model_checkpoint = "distilbert-base-uncased"
batch_size = 16<jupyter_output><empty_output><jupyter_text>Loading the dataset We will use the [๐ค Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.<jupyter_code>from datasets import load_dataset, load_metric<jupyter_output><empty_output><jupyter_text>For our example here, we'll use the [SQUAD dataset](https://rajpurkar.github.io/SQuAD-explorer/). The notebook should work with any question answering dataset provided by the ๐ค Datasets library. If you're using your own dataset defined from a JSON or csv file (see the [Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets.htmlfrom-local-files) on how to load them), it might need some adjustments in the names of the columns used.<jupyter_code>datasets = load_dataset("squad_v2" if squad_v2 else "squad")<jupyter_output>Reusing dataset squad (/home/sgugger/.cache/huggingface/datasets/squad/plain_text/1.0.0/4c81550d83a2ac7c7ce23783bd8ff36642800e6633c1f18417fb58c3ff50cdd7)<jupyter_text>The `datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set.<jupyter_code>datasets<jupyter_output><empty_output><jupyter_text>We can see the training, validation and test sets all have a column for the context, the question and the answers to those questions. To access an actual element, you need to select a split first, then give an index:<jupyter_code>datasets["train"][0]<jupyter_output><empty_output><jupyter_text>We can see the answers are indicated by their start position in the text (here at character 515) and their full text, which is a substring of the context as we mentioned above. To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset (automatically decoding the labels in passing).<jupyter_code>from datasets import ClassLabel, Sequence
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=10):
assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset)-1)
while pick in picks:
pick = random.randint(0, len(dataset)-1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel):
df[column] = df[column].transform(lambda x: [typ.feature.names[i] for i in x])
display(HTML(df.to_html()))
show_random_elements(datasets["train"])<jupyter_output><empty_output><jupyter_text>Preprocessing the training data Before we can feed those texts to our model, we need to preprocess them. This is done by a ๐ค Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>The following assertion ensures that our tokenizer is a fast tokenizers (backed by Rust) from the ๐ค Tokenizers library. Those fast tokenizers are available for almost all models, and we will need some of the special features they have for our preprocessing.<jupyter_code>import transformers
assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast)<jupyter_output><empty_output><jupyter_text>You can check which type of models have a fast tokenizer available and which don't on the [big table of models](https://huggingface.co/transformers/index.htmlbigtable). You can directly call this tokenizer on two sentences (one for the answer, one for the context):<jupyter_code>tokenizer("What is your name?", "My name is Sylvain.")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Now one specific thing for the preprocessing in question answering is how to deal with very long documents. We usually truncate them in other tasks, when they are longer than the model maximum sentence length, but here, removing part of the the context might result in losing the answer we are looking for. To deal with this, we will allow one (long) example in our dataset to give several input features, each of length shorter than the maximum length of the model (or the one we set as a hyper-parameter). Also, just in case the answer lies at the point we split a long context, we allow some overlap between the features we generate controlled by the hyper-parameter `doc_stride`:<jupyter_code>max_length = 384 # The maximum length of a feature (question and context)
doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed.<jupyter_output><empty_output><jupyter_text>Let's find one long example in our dataset:<jupyter_code>for i, example in enumerate(datasets["train"]):
if len(tokenizer(example["question"], example["context"])["input_ids"]) > 384:
break
example = datasets["train"][i]<jupyter_output><empty_output><jupyter_text>Without any truncation, we get the following length for the input IDs:<jupyter_code>len(tokenizer(example["question"], example["context"])["input_ids"])<jupyter_output><empty_output><jupyter_text>Now, if we just truncate, we will lose information (and possibly the answer to our question):<jupyter_code>len(tokenizer(example["question"], example["context"], max_length=max_length, truncation="only_second")["input_ids"])<jupyter_output><empty_output><jupyter_text>Note that we never want to truncate the question, only the context, else the `only_second` truncation picked. Now, our tokenizer can automatically return us a list of features capped by a certain maximum length, with the overlap we talked above, we just have to tell it with `return_overflowing_tokens=True` and by passing the stride:<jupyter_code>tokenized_example = tokenizer(
example["question"],
example["context"],
max_length=max_length,
truncation="only_second",
return_overflowing_tokens=True,
stride=doc_stride
)<jupyter_output><empty_output><jupyter_text>Now we don't have one list of `input_ids`, but several:<jupyter_code>[len(x) for x in tokenized_example["input_ids"]]<jupyter_output><empty_output><jupyter_text>And if we decode them, we can see the overlap:<jupyter_code>for x in tokenized_example["input_ids"][:2]:
print(tokenizer.decode(x))<jupyter_output>[CLS] how many wins does the notre dame men's basketball team have? [SEP] the men's basketball team has over 1, 600 wins, one of only 12 schools who have reached that mark, and have appeared in 28 ncaa tournaments. former player austin carr holds the record for most points scored in a single game of the tournament with 61. although the team has never won the ncaa tournament, they were named by the helms athletic foundation as national champions twice. the team has orchestrated a number of upsets of number one ranked teams, the most notable of which was ending ucla's record 88 - game winning streak in 1974. the team has beaten an additional eight number - one teams, and those nine wins rank second, to ucla's 10, all - time in wins against the top team. the team plays in newly renovated purcell pavilion ( within the edmund p. joyce center ), which reopened for the beginning of the 2009 โ 2010 season. the team is coached by mike brey, who, as of the 2014 โ 15 season, his fifteenth at notr[...]<jupyter_text>Now this will give us some work to properly treat the answers: we need to find in which of those features the answer actually is, and where exactly in that feature. The models we will use require the start and end positions of these answers in the tokens, so we will also need to to map parts of the original context to some tokens. Thankfully, the tokenizer we're using can help us with that by returning an `offset_mapping`:<jupyter_code>tokenized_example = tokenizer(
example["question"],
example["context"],
max_length=max_length,
truncation="only_second",
return_overflowing_tokens=True,
return_offsets_mapping=True,
stride=doc_stride
)
print(tokenized_example["offset_mapping"][0][:100])<jupyter_output>[(0, 0), (0, 3), (4, 8), (9, 13), (14, 18), (19, 22), (23, 28), (29, 33), (34, 37), (37, 38), (38, 39), (40, 50), (51, 55), (56, 60), (60, 61), (0, 0), (0, 3), (4, 7), (7, 8), (8, 9), (10, 20), (21, 25), (26, 29), (30, 34), (35, 36), (36, 37), (37, 40), (41, 45), (45, 46), (47, 50), (51, 53), (54, 58), (59, 61), (62, 69), (70, 73), (74, 78), (79, 86), (87, 91), (92, 96), (96, 97), (98, 101), (102, 106), (107, 115), (116, 118), (119, 121), (122, 126), (127, 138), (138, 139), (140, 146), (147, 153), (154, 160), (161, 165), (166, 171), (172, 175), (176, 182), (183, 186), (187, 191), (192, 198), (199, 205), (206, 208), (209, 210), (211, 217), (218, 222), (223, 225), (226, 229), (230, 240), (241, 245), (246, 248), (248, 249), (250, 258), (259, 262), (263, 267), (268, 271), (272, 277), (278, 281), (282, 285), (286, 290), (291, 301), (301, 302), (303, 307), (308, 312), (313, 318), (319, 321), (322, 325), (326, 330), (330, 331), (332, 340), (341, 351), (352, 354), (355, 363), (364, 373), (374,[...]<jupyter_text>This gives, for each index of our input IDS, the corresponding start and end character in the original text that gave our token. The very first token (`[CLS]`) has (0, 0) because it doesn't correspond to any part of the question/answer, then the second token is the same as the characters 0 to 3 of the question:<jupyter_code>first_token_id = tokenized_example["input_ids"][0][1]
offsets = tokenized_example["offset_mapping"][0][1]
print(tokenizer.convert_ids_to_tokens([first_token_id])[0], example["question"][offsets[0]:offsets[1]])<jupyter_output>how How<jupyter_text>So we can use this mapping to find the position of the start and end tokens of our answer in a given feature. We just have to distinguish which parts of the offsets correspond to the question and which part correspond to the context, this is where the `sequence_ids` method of our `tokenized_example` can be useful:<jupyter_code>sequence_ids = tokenized_example.sequence_ids()
print(sequence_ids)<jupyter_output>[None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, None, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, [...]<jupyter_text>It returns `None` for the special tokens, then 0 or 1 depending on whether the corresponding token comes from the first sentence past (the question) or the second (the context). Now with all of this, we can find the first and last token of the answer in one of our input feature (or if the answer is not in this feature):<jupyter_code>answers = example["answers"]
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(tokenized_example["input_ids"][0]) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
offsets = tokenized_example["offset_mapping"][0]
if (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
# Move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
start_position = token_start_index - 1
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
end_position = token_end_index + 1
print(start_position, end_position)
else:
print("The answer is not in this feature.")<jupyter_output>23 26<jupyter_text>And we can double check that it is indeed the theoretical answer:<jupyter_code>print(tokenizer.decode(tokenized_example["input_ids"][0][start_position: end_position+1]))
print(answers["text"][0])<jupyter_output>over 1, 600
over 1,600<jupyter_text>For this notebook to work with any kind of models, we need to account for the special case where the model expects padding on the left (in which case we switch the order of the question and the context):<jupyter_code>pad_on_right = tokenizer.padding_side == "right"<jupyter_output><empty_output><jupyter_text>Now let's put everything together in one function we will apply to our training set. In the case of impossible answers (the answer is in another feature given by an example with a long context), we set the cls index for both the start and end position. We could also simply discard those examples from the training set if the flag `allow_impossible_answers` is `False`. Since the preprocessing is already complex enough as it is, we've kept is simple for this part.<jupyter_code>def prepare_train_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples["question"] = [q.lstrip() for q in examples["question"]]
# Tokenize our examples with truncation and padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples["question" if pad_on_right else "context"],
examples["context" if pad_on_right else "question"],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples["answers"][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>features = prepare_train_features(datasets['train'][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command. Since our preprocessing changes the number of samples, we need to remove the old columns when applying it.<jupyter_code>tokenized_datasets = datasets.map(prepare_train_features, batched=True, remove_columns=datasets["train"].column_names)<jupyter_output>Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/squad/plain_text/1.0.0/4c81550d83a2ac7c7ce23783bd8ff36642800e6633c1f18417fb58c3ff50cdd7/cache-a5c71e98733887b0.arrow
Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/squad/plain_text/1.0.0/4c81550d83a2ac7c7ce23783bd8ff36642800e6633c1f18417fb58c3ff50cdd7/cache-14932a8c6aecc96d.arrow<jupyter_text>Even better, the results are automatically cached by the ๐ค Datasets library to avoid spending time on this step the next time you run your notebook. The ๐ค Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. ๐ค Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready for training, we can download the pretrained model and fine-tune it. Since our task is question answering, we use the `AutoModelForQuestionAnswering` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us:<jupyter_code>from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer
model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)<jupyter_output>Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForQuestionAnswering: ['vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias']
- This IS expected if you are initializing DistilBertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing DistilBertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of DistilBertForQuestionAnswering were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['qa_outputs.weight', 'qa_outputs.bias']
You should probably TRAIN this mode[...]<jupyter_text>The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. To instantiate a `Trainer`, we will need to define three more things. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>model_name = model_checkpoint.split("/")[-1]
args = TrainingArguments(
f"{model_name}-finetuned-squad",
evaluation_strategy = "epoch",
learning_rate=2e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True,
)<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay.The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/bert-finetuned-squad"` or `"huggingface/bert-finetuned-squad"`). Then we will need a data collator that will batch our processed examples together, here the default one will work:<jupyter_code>from transformers import default_data_collator
data_collator = default_data_collator<jupyter_output><empty_output><jupyter_text>We will evaluate our model and compute metrics in the next section (this is a very long operation, so we will only compute the evaluation loss during training).Then we just need to pass all of this along with our datasets to the `Trainer`:<jupyter_code>trainer = Trainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
)<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>trainer.train()<jupyter_output><empty_output><jupyter_text>Since this training is particularly long, let's save the model just in case we need to restart.<jupyter_code>trainer.save_model("test-squad-trained")<jupyter_output><empty_output><jupyter_text>Evaluation Evaluating our model will require a bit more work, as we will need to map the predictions of our model back to parts of the context. The model itself predicts logits for the start and en position of our answers: if we take a batch from our validation datalaoder, here is the output our model gives us:<jupyter_code>import torch
for batch in trainer.get_eval_dataloader():
break
batch = {k: v.to(trainer.args.device) for k, v in batch.items()}
with torch.no_grad():
output = trainer.model(**batch)
output.keys()<jupyter_output><empty_output><jupyter_text>The output of the model is a dict-like object that contains the loss (since we provided labels), the start and end logits. We won't need the loss for our predictions, let's have a look a the logits:<jupyter_code>output.start_logits.shape, output.end_logits.shape<jupyter_output><empty_output><jupyter_text>We have one logit for each feature and each token. The most obvious thing to predict an answer for each featyre is to take the index for the maximum of the start logits as a start position and the index of the maximum of the end logits as an end position.<jupyter_code>output.start_logits.argmax(dim=-1), output.end_logits.argmax(dim=-1)<jupyter_output><empty_output><jupyter_text>This will work great in a lot of cases, but what if this prediction gives us something impossible: the start position could be greater than the end position, or point to a span of text in the question instead of the answer. In that case, we might want to look at the second best prediction to see if it gives a possible answer and select that instead.However, picking the second best answer is not as easy as picking the best one: is it the second best index in the start logits with the best index in the end logits? Or the best index in the start logits with the second best index in the end logits? And if that second best answer is not possible either, it gets even trickier for the third best answer.To classify our answers, we will use the score obtained by adding the start and end logits. We won't try to order all the possible answers and limit ourselves to with a hyper-parameter we call `n_best_size`. We'll pick the best indices in the start and end logits and gather all the answers this predicts. After checking if each one is valid, we will sort them by their score and keep the best one. Here is how we would do this on the first feature in the batch:<jupyter_code>n_best_size = 20
import numpy as np
start_logits = output.start_logits[0].cpu().numpy()
end_logits = output.end_logits[0].cpu().numpy()
# Gather the indices the best start/end logits:
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
valid_answers = []
for start_index in start_indexes:
for end_index in end_indexes:
if start_index <= end_index: # We need to refine that test to check the answer is inside the context
valid_answers.append(
{
"score": start_logits[start_index] + end_logits[end_index],
"text": "" # We need to find a way to get back the original substring corresponding to the answer in the context
}
)<jupyter_output><empty_output><jupyter_text>And then we can sort the `valid_answers` according to their `score` and only keep the best one. The only point left is how to check a given span is inside the context (and not the question) and how to get back the text inside. To do this, we need to add two things to our validation features:- the ID of the example that generated the feature (since each example can generate several features, as seen before);- the offset mapping that will give us a map from token indices to character positions in the context.That's why we will re-process the validation set with the following function, slightly different from `prepare_train_features`:<jupyter_code>def prepare_validation_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples["question"] = [q.lstrip() for q in examples["question"]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples["question" if pad_on_right else "context"],
examples["context" if pad_on_right else "question"],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# We keep the example_id that gave us this feature and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples<jupyter_output><empty_output><jupyter_text>And like before, we can apply that function to our validation set easily:<jupyter_code>validation_features = datasets["validation"].map(
prepare_validation_features,
batched=True,
remove_columns=datasets["validation"].column_names
)<jupyter_output><empty_output><jupyter_text>Now we can grab the predictions for all features by using the `Trainer.predict` method:<jupyter_code>raw_predictions = trainer.predict(validation_features)<jupyter_output><empty_output><jupyter_text>The `Trainer` *hides* the columns that are not used by the model (here `example_id` and `offset_mapping` which we will need for our post-processing), so we set them back:<jupyter_code>validation_features.set_format(type=validation_features.format["type"], columns=list(validation_features.features.keys()))<jupyter_output><empty_output><jupyter_text>We can now refine the test we had before: since we set `None` in the offset mappings when it corresponds to a part of the question, it's easy to check if an answer is fully inside the context. We also eliminate very long answers from our considerations (with an hyper-parameter we can tune)<jupyter_code>max_answer_length = 30
start_logits = output.start_logits[0].cpu().numpy()
end_logits = output.end_logits[0].cpu().numpy()
offset_mapping = validation_features[0]["offset_mapping"]
# The first feature comes from the first example. For the more general case, we will need to be match the example_id to
# an example index
context = datasets["validation"][0]["context"]
# Gather the indices the best start/end logits:
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
valid_answers = []
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
if start_index <= end_index: # We need to refine that test to check the answer is inside the context
start_char = offset_mapping[start_index][0]
end_char = offset_mapping[end_index][1]
valid_answers.append(
{
"score": start_logits[start_index] + end_logits[end_index],
"text": context[start_char: end_char]
}
)
valid_answers = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[:n_best_size]
valid_answers<jupyter_output><empty_output><jupyter_text>We can compare to the actual ground-truth answer:<jupyter_code>datasets["validation"][0]["answers"]<jupyter_output><empty_output><jupyter_text>Our model picked the right as the most likely answer!As we mentioned in the code above, this was easy on the first feature because we knew it comes from the first example. For the other features, we will need a map between examples and their corresponding features. Also, since one example can give several features, we will need to gather together all the answers in all the features generated by a given example, then pick the best one. The following code builds a map from example index to its corresponding features indices:<jupyter_code>import collections
examples = datasets["validation"]
features = validation_features
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)<jupyter_output><empty_output><jupyter_text>We're almost ready for our post-processing function. The last bit to deal with is the impossible answer (when `squad_v2 = True`). The code above only keeps answers that are inside the context, we need to also grab the score for the impossible answer (which has start and end indices corresponding to the index of the CLS token). When one example gives several features, we have to predict the impossible answer when all the features give a high score to the impossible answer (since one feature could predict the impossible answer just because the answer isn't in the part of the context it has access too), which is why the score of the impossible answer for one example is the *minimum* of the scores for the impossible answer in each feature generated by the example.We then predict the impossible answer when that score is greater than the score of the best non-impossible answer. All combined together, this gives us this post-processing function:<jupyter_code>from tqdm.auto import tqdm
def postprocess_qa_predictions(examples, features, raw_predictions, n_best_size = 20, max_answer_length = 30):
all_start_logits, all_end_logits = raw_predictions
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
predictions = collections.OrderedDict()
# Logging.
print(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None # Only used if squad_v2 is True.
valid_answers = []
context = example["context"]
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Update minimum null prediction.
cls_index = features[feature_index]["input_ids"].index(tokenizer.cls_token_id)
feature_null_score = start_logits[cls_index] + end_logits[cls_index]
if min_null_score is None or min_null_score < feature_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
start_char = offset_mapping[start_index][0]
end_char = offset_mapping[end_index][1]
valid_answers.append(
{
"score": start_logits[start_index] + end_logits[end_index],
"text": context[start_char: end_char]
}
)
if len(valid_answers) > 0:
best_answer = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[0]
else:
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
best_answer = {"text": "", "score": 0.0}
# Let's pick our final answer: the best one or the null answer (only for squad_v2)
if not squad_v2:
predictions[example["id"]] = best_answer["text"]
else:
answer = best_answer["text"] if best_answer["score"] > min_null_score else ""
predictions[example["id"]] = answer
return predictions<jupyter_output><empty_output><jupyter_text>And we can apply our post-processing function to our raw predictions:<jupyter_code>final_predictions = postprocess_qa_predictions(datasets["validation"], validation_features, raw_predictions.predictions)<jupyter_output>Post-processing 10570 example predictions split into 10784 features.<jupyter_text>Then we can load the metric from the datasets library.<jupyter_code>metric = load_metric("squad_v2" if squad_v2 else "squad")<jupyter_output><empty_output><jupyter_text>Then we can call compute on it. We just need to format predictions and labels a bit as it expects a list of dictionaries and not one big dictionary. In the case of squad_v2, we also have to set a `no_answer_probability` argument (which we set to 0.0 here as we have already set the answer to empty if we picked it).<jupyter_code>if squad_v2:
formatted_predictions = [{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in final_predictions.items()]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in final_predictions.items()]
references = [{"id": ex["id"], "answers": ex["answers"]} for ex in datasets["validation"]]
metric.compute(predictions=formatted_predictions, references=references)<jupyter_output><empty_output><jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output> | notebooks/examples/question_answering.ipynb/0 | {
"file_path": "notebooks/examples/question_answering.ipynb",
"repo_id": "notebooks",
"token_count": 15331
} | 147 |
<jupyter_start><jupyter_text>Time Series DatasetsThis notebook shows how to create a time series dataset from some csv file in order to then share it on the [๐ค hub](https://huggingface.co/docs/datasets/index). We will use the GluonTS library to read the csv into the appropriate format. We start by installing the libraries<jupyter_code>! pip install -q datasets gluonts orjson<jupyter_output><empty_output><jupyter_text>GluonTS comes with a pandas DataFrame based dataset so our strategy will be to read the csv file, and process it as a `PandasDataset`. We will then iterate over it and convert it to a ๐ค dataset with the appropriate schema for time series. So lets get started! `PandasDataset`Suppose we are given multiple (10) time series stacked on top of each other in a dataframe with an `item_id` column that distinguishes different series:<jupyter_code>import pandas as pd
url = (
"https://gist.githubusercontent.com/rsnirwan/a8b424085c9f44ef2598da74ce43e7a3"
"/raw/b6fdef21fe1f654787fa0493846c546b7f9c4df2/ts_long.csv"
)
df = pd.read_csv(url, index_col=0, parse_dates=True)
df.head()<jupyter_output><empty_output><jupyter_text>After converting it into a `pd.Dataframe` we can then convert it into GluonTS's `PandasDataset`:<jupyter_code>from gluonts.dataset.pandas import PandasDataset
ds = PandasDataset.from_long_dataframe(df, target="target", item_id="item_id")<jupyter_output><empty_output><jupyter_text>๐ค DatasetsFrom here we have to map the pandas dataset's `start` field into a time stamp instead of a `pd.Period`. We do this by defining the following class:<jupyter_code>class ProcessStartField():
ts_id = 0
def __call__(self, data):
data["start"] = data["start"].to_timestamp()
data["feat_static_cat"] = [self.ts_id]
self.ts_id += 1
return data
from gluonts.itertools import Map
process_start = ProcessStartField()
list_ds = list(Map(process_start, ds))<jupyter_output><empty_output><jupyter_text>Next we need to define our schema features and create our dataset from this list via the `from_list` function:<jupyter_code>from datasets import Dataset, Features, Value, Sequence
features = Features(
{
"start": Value("timestamp[s]"),
"target": Sequence(Value("float32")),
"feat_static_cat": Sequence(Value("uint64")),
# "feat_static_real": Sequence(Value("float32")),
# "feat_dynamic_real": Sequence(Sequence(Value("uint64"))),
# "feat_dynamic_cat": Sequence(Sequence(Value("uint64"))),
"item_id": Value("string"),
}
)
dataset = Dataset.from_list(list_ds, features=features)<jupyter_output><empty_output> | notebooks/examples/time_series_datasets.ipynb/0 | {
"file_path": "notebooks/examples/time_series_datasets.ipynb",
"repo_id": "notebooks",
"token_count": 975
} | 148 |
import argparse
import logging
import os
import sys
import tensorflow as tf
from datasets import load_dataset
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification, DataCollatorWithPadding, create_optimizer
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--epochs", type=int, default=3)
parser.add_argument("--train_batch_size", type=int, default=16)
parser.add_argument("--eval_batch_size", type=int, default=8)
parser.add_argument("--model_id", type=str)
parser.add_argument("--learning_rate", type=str, default=3e-5)
# Data, model, and output directories
parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"])
parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"])
args, _ = parser.parse_known_args()
# Set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.getLevelName("INFO"),
handlers=[logging.StreamHandler(sys.stdout)],
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(args.model_id)
# Load DatasetDict
dataset = load_dataset("imdb")
# Preprocess train dataset
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True)
encoded_dataset = dataset.map(preprocess_function, batched=True)
# define tokenizer_columns
# tokenizer_columns is the list of keys from the dataset that get passed to the TensorFlow model
tokenizer_columns = ["attention_mask", "input_ids"]
# convert to TF datasets
data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")
encoded_dataset["train"] = encoded_dataset["train"].rename_column("label", "labels")
tf_train_dataset = encoded_dataset["train"].to_tf_dataset(
columns=tokenizer_columns,
label_cols=["labels"],
shuffle=True,
batch_size=8,
collate_fn=data_collator,
)
encoded_dataset["test"] = encoded_dataset["test"].rename_column("label", "labels")
tf_validation_dataset = encoded_dataset["test"].to_tf_dataset(
columns=tokenizer_columns,
label_cols=["labels"],
shuffle=False,
batch_size=8,
collate_fn=data_collator,
)
# Prepare model labels - useful in inference API
labels = encoded_dataset["train"].features["labels"].names
num_labels = len(labels)
label2id, id2label = dict(), dict()
for i, label in enumerate(labels):
label2id[label] = str(i)
id2label[str(i)] = label
# download model from model hub
model = TFAutoModelForSequenceClassification.from_pretrained(
args.model_id, num_labels=num_labels, label2id=label2id, id2label=id2label
)
# create Adam optimizer with learning rate scheduling
batches_per_epoch = len(encoded_dataset["train"]) // args.train_batch_size
total_train_steps = int(batches_per_epoch * args.epochs)
optimizer, _ = create_optimizer(init_lr=args.learning_rate, num_warmup_steps=0, num_train_steps=total_train_steps)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# define metric and compile model
metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# Training
logger.info("*** Train ***")
train_results = model.fit(
tf_train_dataset,
epochs=args.epochs,
validation_data=tf_validation_dataset,
)
output_eval_file = os.path.join(args.output_data_dir, "train_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Train results *****")
logger.info(train_results)
for key, value in train_results.history.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save result
model.save_pretrained(args.model_dir)
tokenizer.save_pretrained(args.model_dir)
| notebooks/sagemaker/02_getting_started_tensorflow/scripts/train.py/0 | {
"file_path": "notebooks/sagemaker/02_getting_started_tensorflow/scripts/train.py",
"repo_id": "notebooks",
"token_count": 1677
} | 149 |
base_job_name: accelerate-sagemaker-1
compute_environment: AMAZON_SAGEMAKER
distributed_type: DATA_PARALLEL
ec2_instance_type: ml.p3.16xlarge
iam_role_name: xxxxx
image_uri: null
mixed_precision: fp16
num_machines: 1
profile: xxxxx
py_version: py38
pytorch_version: 1.10.2
region: us-east-1
transformers_version: 4.17.0
use_cpu: false | notebooks/sagemaker/22_accelerate_sagemaker_examples/src/seq2seq/accelerate_config.yaml/0 | {
"file_path": "notebooks/sagemaker/22_accelerate_sagemaker_examples/src/seq2seq/accelerate_config.yaml",
"repo_id": "notebooks",
"token_count": 138
} | 150 |
<jupyter_start><jupyter_text>Efficient Large Language Model training with LoRA and Hugging FaceIn this sagemaker example, we are going to learn how to apply [Low-Rank Adaptation of Large Language Models (LoRA)](https://arxiv.org/abs/2106.09685) to fine-tune BLOOMZ (7 billion parameter version instruction tuned version of BLOOM) on a single GPU. We are going to leverage Hugging Face [Transformers](https://huggingface.co/docs/transformers/index), [Accelerate](https://huggingface.co/docs/accelerate/index), and [PEFT](https://github.com/huggingface/peft). You will learn how to:1. Setup Development Environment2. Load and prepare the dataset3. Fine-Tune BLOOM with LoRA and bnb int-8 on Amazon SageMaker4. Deploy the model to Amazon SageMaker Endpoint Quick intro: PEFT or Parameter Efficient Fine-tuning[PEFT](https://github.com/huggingface/peft), or Parameter Efficient Fine-tuning, is a new open-source library from Hugging Face to enable efficient adaptation of pre-trained language models (PLMs) to various downstream applications without fine-tuning all the model's parameters. PEFT currently includes techniques for:- LoRA:ย [LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/pdf/2106.09685.pdf)- Prefix Tuning:ย [P-Tuning v2: Prompt Tuning Can Be Comparable to Fine-tuning Universally Across Scales and Tasks](https://arxiv.org/pdf/2110.07602.pdf)- P-Tuning:ย [GPT Understands, Too](https://arxiv.org/pdf/2103.10385.pdf)- Prompt Tuning:ย [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/pdf/2104.08691.pdf)<jupyter_code>!pip install "transformers==4.26.0" "datasets[s3]==2.9.0" sagemaker py7zr --upgrade --quiet<jupyter_output><empty_output><jupyter_text>If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it.<jupyter_code>import sagemaker
import boto3
sess = sagemaker.Session()
# sagemaker session bucket -> used for uploading data, models and logs
# sagemaker will automatically create this bucket if it not exists
sagemaker_session_bucket=None
if sagemaker_session_bucket is None and sess is not None:
# set to default bucket if a bucket name is not given
sagemaker_session_bucket = sess.default_bucket()
try:
role = sagemaker.get_execution_role()
except ValueError:
iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)
print(f"sagemaker role arn: {role}")
print(f"sagemaker bucket: {sess.default_bucket()}")
print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>2. Load and prepare the datasetWe will use theย [samsum](https://huggingface.co/datasets/samsum)ย dataset, a collection of about 16k messenger-like conversations with summaries. Conversations were created and written down by linguists fluent in English.```python{ "id": "13818513", "summary": "Amanda baked cookies and will bring Jerry some tomorrow.", "dialogue": "Amanda: I baked cookies. Do you want some?\r\nJerry: Sure!\r\nAmanda: I'll bring you tomorrow :-)"}```To load theย `samsum`ย dataset, we use theย `load_dataset()`ย method from the ๐ค Datasets library.<jupyter_code>from datasets import load_dataset
# Load dataset from the hub
dataset = load_dataset("samsum", split="train")
print(f"Train dataset size: {len(dataset)}")
# Train dataset size: 14732<jupyter_output><empty_output><jupyter_text>To train our model, we need to convert our inputs (text) to token IDs. This is done by a ๐ค Transformers Tokenizer. If you are not sure what this means, check outย **[chapter 6](https://huggingface.co/course/chapter6/1?fw=tf)**ย of the Hugging Face Course.<jupyter_code>from transformers import AutoTokenizer
model_id="bigscience/bloomz-7b1"
# Load tokenizer of BLOOMZ
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.model_max_length = 2048 # overwrite wrong value<jupyter_output><empty_output><jupyter_text>Before we can start training, we need to preprocess our data. Abstractive Summarization is a text-generation task. Our model will take a text as input and generate a summary as output. We want to understand how long our input and output will take to batch our data efficiently.We defined a `prompt_template` which we will use to construct an instruct prompt for better performance of our model. Our `prompt_template` has a โfixedโ start and end, and our document is in the middle. This means we need to ensure that the โfixedโ template parts + document are not exceeding the max length of the model. We preprocess our dataset before training and save it to disk to then upload it to S3. You could run this step on your local machine or a CPU and upload it to the [Hugging Face Hub](https://huggingface.co/docs/hub/datasets-overview).<jupyter_code>from random import randint
from itertools import chain
from functools import partial
# custom instruct prompt start
prompt_template = f"Summarize the chat dialogue:\n{{dialogue}}\n---\nSummary:\n{{summary}}{{eos_token}}"
# template dataset to add prompt to each sample
def template_dataset(sample):
sample["text"] = prompt_template.format(dialogue=sample["dialogue"],
summary=sample["summary"],
eos_token=tokenizer.eos_token)
return sample
# apply prompt template per sample
dataset = dataset.map(template_dataset, remove_columns=list(dataset.features))
print(dataset[randint(0, len(dataset))]["text"])
# empty list to save remainder from batches to use in next batch
remainder = {"input_ids": [], "attention_mask": []}
def chunk(sample, chunk_length=2048):
# define global remainder variable to save remainder from batches to use in next batch
global remainder
# Concatenate all texts and add remainder from previous batch
concatenated_examples = {k: list(chain(*sample[k])) for k in sample.keys()}
concatenated_examples = {k: remainder[k] + concatenated_examples[k] for k in concatenated_examples.keys()}
# get total number of tokens for batch
batch_total_length = len(concatenated_examples[list(sample.keys())[0]])
# get max number of chunks for batch
if batch_total_length >= chunk_length:
batch_chunk_length = (batch_total_length // chunk_length) * chunk_length
# Split by chunks of max_len.
result = {
k: [t[i : i + chunk_length] for i in range(0, batch_chunk_length, chunk_length)]
for k, t in concatenated_examples.items()
}
# add remainder to global variable for next batch
remainder = {k: concatenated_examples[k][batch_chunk_length:] for k in concatenated_examples.keys()}
# prepare labels
result["labels"] = result["input_ids"].copy()
return result
# tokenize and chunk dataset
lm_dataset = dataset.map(
lambda sample: tokenizer(sample["text"]), batched=True, remove_columns=list(dataset.features)
).map(
partial(chunk, chunk_length=1536),
batched=True,
)
# Print total number of samples
print(f"Total number of samples: {len(lm_dataset)}")<jupyter_output><empty_output><jupyter_text>After we processed the datasets we are going to use the new [FileSystem integration](https://huggingface.co/docs/datasets/filesystems) to upload our dataset to S3. We are using the `sess.default_bucket()`, adjust this if you want to store the dataset in a different S3 bucket. We will use the S3 path later in our training script.<jupyter_code># save train_dataset to s3
training_input_path = f's3://{sess.default_bucket()}/processed/samsum-sagemaker/train'
lm_dataset.save_to_disk(training_input_path)
print("uploaded data to:")
print(f"training dataset to: {training_input_path}")<jupyter_output><empty_output><jupyter_text>3. Fine-Tune BLOOM with LoRA and bnb int-8 on Amazon SageMakerIn addition to the LoRA technique, we will use [bitsanbytes LLM.int8()](https://huggingface.co/blog/hf-bitsandbytes-integration) to quantize out frozen LLM to int8. This allows us to reduce the needed memory for BLOOMZ ~4x. We prepared a [run_clm.py](./scripts/run_clm.py), which implements uses PEFT to train our model. If you are interested in how this works check-out [Efficient Large Language Model training with LoRA and Hugging Face](https://www.philschmid.de/fine-tune-flan-t5-peft) blog, where we explain the training script in detail.In order to create a sagemaker training job we need an `HuggingFace` Estimator. The Estimator handles end-to-end Amazon SageMaker training and deployment tasks. The Estimator manages the infrastructure use. SagMaker takes care of starting and managing all the required ec2 instances for us, provides the correct huggingface container, uploads the provided scripts and downloads the data from our S3 bucket into the container at `/opt/ml/input/data`. Then, it starts the training job by running.<jupyter_code>import time
# define Training Job Name
job_name = f'huggingface-peft-{time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())}'
from sagemaker.huggingface import HuggingFace
# hyperparameters, which are passed into the training job
hyperparameters ={
'model_id': model_id, # pre-trained model
'dataset_path': '/opt/ml/input/data/training', # path where sagemaker will save training dataset
'epochs': 3, # number of training epochs
'per_device_train_batch_size': 1, # batch size for training
'lr': 2e-4, # learning rate used during training
}
# create the Estimator
huggingface_estimator = HuggingFace(
entry_point = 'run_clm.py', # train script
source_dir = 'scripts', # directory which includes all the files needed for training
instance_type = 'ml.g5.2xlarge', # instances type used for the training job
instance_count = 1, # the number of instances used for training
base_job_name = job_name, # the name of the training job
role = role, # Iam role used in training job to access AWS ressources, e.g. S3
volume_size = 300, # the size of the EBS volume in GB
transformers_version = '4.26', # the transformers version used in the training job
pytorch_version = '1.13', # the pytorch_version version used in the training job
py_version = 'py39', # the python version used in the training job
hyperparameters = hyperparameters
)<jupyter_output><empty_output><jupyter_text>We can now start our training job, with the `.fit()` method passing our S3 path to the training script.<jupyter_code># define a data input dictonary with our uploaded s3 uris
data = {'training': training_input_path}
# starting the train job with our uploaded datasets as input
huggingface_estimator.fit(data, wait=True)<jupyter_output><empty_output><jupyter_text>In our example, the SageMaker training job took `20632 seconds`, which is about `5.7 hours`. The ml.g5.2xlarge instance we used costs `$1.515 per hour` for on-demand usage. As a result, the total cost for training our fine-tuned BLOOMZ-7B model was only `$8.63`.We could further reduce the training costs by using spot instances. However, there is a possibility this would result in the total training time increasing due to spot instance interruptions. See the SageMaker pricing page for instance pricing details." 4. Deploy the model to Amazon SageMaker EndpointWhen using `peft` for training, you normally end up with adapter weights. We added the `merge_and_unload()` method to merge the base model with the adatper to make it easier to deploy the model. Since we can now use the `pipelines` feature of the `transformers` library. SageMaker starts the deployment process by creating a SageMaker Endpoint Configuration and a SageMaker Endpoint. The Endpoint Configuration defines the model and the instance type.<jupyter_code>from sagemaker.huggingface import HuggingFaceModel
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
model_data=huggingface_estimator.model_data,
#model_data="s3://hf-sagemaker-inference/model.tar.gz", # Change to your model path
role=role,
transformers_version="4.26",
pytorch_version="1.13",
py_version="py39",
model_server_workers=1
)<jupyter_output><empty_output><jupyter_text>We can now deploy our model using the `deploy()` on our HuggingFace estimator object, passing in our desired number of instances and instance type.<jupyter_code># deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1,
instance_type= "ml.g5.4xlarge"
)<jupyter_output><empty_output><jupyter_text>Note: it may take 5-10 min for the SageMaker endpoint to bring your instance online and download your model in order to be ready to accept inference requests. Lets test by using a example from the `test` split.<jupyter_code>from random import randint
from datasets import load_dataset
# Load dataset from the hub
test_dataset = load_dataset("samsum", split="test")
# select a random test sample
sample = test_dataset[randint(0,len(test_dataset))]
# format sample
prompt_template = f"Summarize the chat dialogue:\n{{dialogue}}\n---\nSummary:\n"
fomatted_sample = {
"inputs": prompt_template.format(dialogue=sample["dialogue"]),
"parameters": {
"do_sample": True, # sample output predicted probabilities
"top_p": 0.9, # sampling technique Fan et. al (2018)
"temperature": 0.1, # increasing the likelihood of high probability words and decreasing the likelihood of low probability words
"max_new_tokens": 100, # The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt
}
}
# predict
res = predictor.predict(fomatted_sample)
print(res[0]["generated_text"].split("Summary:")[-1])
# Sample model output: Kirsten and Alex are going bowling this Friday at 7 pm. They will meet up and then go together.<jupyter_output><empty_output><jupyter_text>Now let's compare the model summarized dialog output to the test sample summary.<jupyter_code>print(sample["summary"])
# Test sample summary: Kirsten reminds Alex that the youth group meets this Friday at 7 pm to go bowling.<jupyter_output><empty_output><jupyter_text>Finally, we delete the endpoint again.<jupyter_code>predictor.delete_model()
predictor.delete_endpoint()<jupyter_output><empty_output> | notebooks/sagemaker/24_train_bloom_peft_lora/sagemaker-notebook.ipynb/0 | {
"file_path": "notebooks/sagemaker/24_train_bloom_peft_lora/sagemaker-notebook.ipynb",
"repo_id": "notebooks",
"token_count": 4824
} | 151 |
<jupyter_start><jupyter_text>Deploy Zephyr 7B on AWS Inferentia2 using Amazon SageMakerThis tutorial will show how easy it is to deploy Zephyr 7B on AWS Infernetia2 using Amazon SageMaker. Zephyr is a 7B parameter LLM fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) that was trained on a mix of publicly available, synthetic datasets using [Direct Preference Optimization (DPO)](https://arxiv.org/abs/2305.18290). More details are in the [technical report](https://arxiv.org/abs/2310.16944). The model is released under the Apache 2.0 license, ensuring wide accessibility and use. We are going to show you how to:1. Setup development environment2. Retrieve the TGI Neuronx Image3. Deploy Zephyr 7B to Amazon SageMaker4. Run inference and chat with the modelLetโs get started. 1. Setup development environmentWe are going to use the `sagemaker` python SDK to deploy Mixtral to Amazon SageMaker. We need to make sure to have an AWS account configured and the `sagemaker` python SDK installed.<jupyter_code>!pip install transformers "sagemaker>=2.206.0" --upgrade --quiet<jupyter_output>/bin/pip:6: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html
from pkg_resources import load_entry_point
[31mERROR: sagemaker 2.206.0 has requirement PyYAML~=6.0, but you'll have pyyaml 5.3.1 which is incompatible.[0m<jupyter_text>If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it.<jupyter_code>import sagemaker
import boto3
sess = sagemaker.Session()
# sagemaker session bucket -> used for uploading data, models and logs
# sagemaker will automatically create this bucket if it not exists
sagemaker_session_bucket=None
if sagemaker_session_bucket is None and sess is not None:
# set to default bucket if a bucket name is not given
sagemaker_session_bucket = sess.default_bucket()
try:
role = sagemaker.get_execution_role()
except ValueError:
iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)
print(f"sagemaker role arn: {role}")
print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output>sagemaker.config INFO - Not applying SDK defaults from location: /etc/xdg/sagemaker/config.yaml
sagemaker.config INFO - Not applying SDK defaults from location: /home/ubuntu/.config/sagemaker/config.yaml<jupyter_text>2. Retrieve TGI Neuronx ImageThe new Hugging Face TGI Neuronx DLC can be used to run inference on AWS Inferentia2. To retrieve the URI for the desired Hugging Face TGI Neuronx DLC we can use the `get_huggingface_tgi_neuronx_image_uri` method provided by the `sagemaker` SDK. This method allows us to retrieve the URI for the desired Hugging Face TGI Neuronx DLC based on the specified `backend`, `session`, `region`, and `version`. You can find the available versions [here](https://github.com/aws/deep-learning-containers/releases?q=tgi+AND+neuronx&expanded=true)_Note: At the time of writing this blog post the latest version of the Hugging Face LLM DLC is not yet available via the `get_huggingface_llm_image_uri` method. We are going to use the raw container uri instead._<jupyter_code>from sagemaker.huggingface import get_huggingface_llm_image_uri
# retrieve the llm image uri
llm_image = get_huggingface_llm_image_uri(
"huggingface-neuronx",
version="0.0.17"
)
# print ecr image uri
print(f"llm image uri: {llm_image}")<jupyter_output>llm image uri: 763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-tgi-inference:1.13.1-optimum0.0.17-neuronx-py310-ubuntu22.04<jupyter_text>4. Deploy Zephyr 7B to Amazon SageMakerText Generation Inference (TGI) on Inferentia2 supports popular open LLMs, including Llama, Mistral, and more. You can check the full list of supported models (text-generation) [here](https://huggingface.co/docs/optimum-neuron/package_reference/exportsupported-architectures). In this example, we will deploy [Hugging Face Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) to Amazon SageMaker. Zephyr is a 7B parameter LLM fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) that was trained on a mix of publicly available, synthetic datasets using [Direct Preference Optimization (DPO)](https://arxiv.org/abs/2305.18290). You can find more details in the [technical report](https://arxiv.org/abs/2310.16944). Compiling LLMs for Inferenetia2At the time of writing, [AWS Inferentia2 does not support dynamic shapes for inference](https://awsdocs-neuron.readthedocs-hosted.com/en/v2.6.0/general/arch/neuron-features/dynamic-shapes.htmlneuron-dynamic-shapes), which means that we need to specify our sequence length and batch size in advanced. To make it easier for customers to utilize the full power of Inferentia2, we created a [neuron model cache](https://huggingface.co/docs/optimum-neuron/guides/cache_system), which contains pre-compiled configurations for the most popular LLMs. A cached configuration is defined through a model architecture (Mistral), model size (7B), neuron version (2.16), number of inferentia cores (2), batch size (2), and sequence length (2048). This means compiling fine-tuned checkpoints for Mistral 7B with the same configuration will take only a few minutes. Examples of this are [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) and [HuggingFaceH4/zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)._**Note:** Currently, TGI can only load compiled checkpoints and models. We are working on an on-the-fly compilation based on the cache. This means that you can pass any model ID from the Hugging face Hub, e.g., `HuggingFaceH4/zephyr-7b-beta` if there is a cached configuration. This should be added in the next release. We update the blog here once released._For the blog we compiled `HuggingFaceH4/zephyr-7b-beta` using the following command and parameters on a `inf2.8xlarge` instance and pushed it to the hub at []():```bash compile model with optimum for batch size 4 and sequence length 2048optimum-cli export neuron -m HuggingFaceH4/zephyr-7b-beta --batch_size 4 --sequence_length 2048 --num_cores 2 --auto_cast_type bf16 ./zephyr-7b-beta-neuron push model to hub [repo_id] [local_path] [path_in_repo]huggingface-cli upload aws-neuron/zephyr-7b-seqlen-2048-bs-4 ./zephyr-7b-beta-neuron ./ --exclude "checkpoint/**" Move tokenizer to neuron model repositorypython -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('HuggingFaceH4/zephyr-7b-beta').push_to_hub('aws-neuron/zephyr-7b-seqlen-2048-bs-4')"```If you are trying to compile an LLM with a configuration that is not yet cached, it can take up to 45 minutes. Deploying TGI Neuronx EndpointBefore deploying the model to Amazon SageMaker, we must define the TGI Neuronx endpoint configuration. Due to the current boundaries of Inferentia2, we need to make sure that the following parameters are set to the same value:* `MAX_CONCURRENT_REQUESTS`: Equals to the batch size, which was used to compile the model.* `MAX_INPUT_LENGTH`: Equals or lower than the sequence length, which was used to compile the model.* `MAX_TOTAL_TOKENS`: Equals to the sequence length, which was used to compile the model.* `MAX_BATCH_PREFILL_TOKENS`: half of the max tokens [batch_size * sequence_length] / 2* `MAX_BATCH_TOTAL_TOKENS`: Equals to the max tokens [batch_size * sequence_length]In addition, we need to set the `HF_MODEL_ID` pointing to the Hugging Face model ID.<jupyter_code>import json
from sagemaker.huggingface import HuggingFaceModel
# sagemaker config & model config
instance_type = "ml.inf2.8xlarge"
health_check_timeout = 900
batch_size = 4
sequence_length = 2048
# Define Model and Endpoint configuration parameter
config = {
'HF_MODEL_ID': "aws-neuron/zephyr-7b-seqlen-2048-bs-4-cores-2",
'MAX_CONCURRENT_REQUESTS': json.dumps(batch_size),
'MAX_INPUT_LENGTH': json.dumps(1512),
'MAX_TOTAL_TOKENS': json.dumps(sequence_length),
'MAX_BATCH_PREFILL_TOKENS': json.dumps(int(sequence_length*batch_size / 2)),
'MAX_BATCH_TOTAL_TOKENS': json.dumps(sequence_length*batch_size),
}
# create HuggingFaceModel with the image uri
llm_model = HuggingFaceModel(
role=role,
image_uri=llm_image,
env=config
)<jupyter_output><empty_output><jupyter_text>After we have created the `HuggingFaceModel` we can deploy it to Amazon SageMaker using the `deploy` method. We will deploy the model with the `ml.inf2.8xlarge` instance type.<jupyter_code># Deploy model to an endpoint
# https://sagemaker.readthedocs.io/en/stable/api/inference/model.html#sagemaker.model.Model.deploy
llm = llm_model.deploy(
initial_instance_count=1,
instance_type=instance_type,
container_startup_health_check_timeout=health_check_timeout, # 10 minutes to be able to load the model
)<jupyter_output>Your model is not compiled. Please compile your model before using Inferentia.<jupyter_text>SageMaker will create our endpoint and deploy the model to it. This can takes a 10-15 minutes. 5. Run inference and chat with the modelAfter our endpoint is deployed, we can run inference on it. We will use the `predict` method from the `predictor` to run inference on our endpoint. We can inference with different parameters to impact the generation. Parameters can be defined in the `parameters` attribute of the payload. You can find supported parameters in the [here](https://www.philschmid.de/sagemaker-llama-llm5-run-inference-and-chat-with-the-model) or in the open API specification of the TGI in the [swagger documentation](https://huggingface.github.io/text-generation-inference/)The `HuggingFaceH4/zephyr-7b-beta` is a conversational chat model, meaning we can chat with it using the following prompt: ```\nYou are a friendly.\n\nInstruction\n\n```To avoid drafting the prompt, we can use the `apply_chat_template` method from the tokenizer, which expects a `messages` dictionary with the known OpenAI format and converts it into the correct format for the model. Let's see if Zephyr knows some facts about AWS.<jupyter_code>from transformers import AutoTokenizer
# load the tokenizer
tokenizer = AutoTokenizer.from_pretrained("aws-neuron/zephyr-7b-seqlen-2048-bs-4-cores-2")
# Prompt to generate
messages = [
{"role": "system", "content": "You are the AWS expert"},
{"role": "user", "content": "Can you tell me an interesting fact abou AWS?"},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
# Generation arguments
payload = {
"do_sample": True,
"top_p": 0.6,
"temperature": 0.9,
"top_k": 50,
"max_new_tokens": 256,
"repetition_penalty": 1.03,
"return_full_text": False,
"stop": ["</s>"]
}
chat = llm.predict({"inputs":prompt, "parameters":payload})
print(chat[0]["generated_text"][len(prompt):])
# Sure, here's an interesting fact about AWS: As of 2021, AWS has more than 200 services in its portfolio, ranging from compute power and storage to databases,<jupyter_output>Sure, here's an interesting fact about AWS: As of 2021, AWS has more than 200 services in its portfolio, ranging from compute power and storage to databases, analytics, and machine learning. This vast array of services allows developers and businesses to build and deploy complex applications and workflows with flexibility and agility, without having to manage the underlying infrastructure. In fact, AWS's extensive service offerings have contributed to its dominance in the cloud computing market, with a market share of over 30% as of 2021.</s><jupyter_text>Awesome, we have successfully deployed Zephyr to Amazon SageMaker on Inferentia2 and chatted with it. 6. Clean upTo clean up, we can delete the model and endpoint.<jupyter_code>llm.delete_model()
llm.delete_endpoint()<jupyter_output><empty_output> | notebooks/sagemaker/29_deploy_llms_on_inferentia2/sagemaker-notebook.ipynb/0 | {
"file_path": "notebooks/sagemaker/29_deploy_llms_on_inferentia2/sagemaker-notebook.ipynb",
"repo_id": "notebooks",
"token_count": 3797
} | 152 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Working with mixed adapter types
Normally, it is not possible to mix different adapter types in ๐ค PEFT. For example, even though it is possible to create a PEFT model that has two different LoRA adapters (that can have different config options), it is not possible to combine a LoRA adapter with a LoHa adapter. However, by using a mixed model, this works as long as the adapter types are compatible.
## Loading different adapter types into a PEFT model
To load different adapter types into a PEFT model, proceed the same as if you were loading two adapters of the same type, but use `PeftMixedModel` instead of `PeftModel`:
```py
from peft import PeftMixedModel
base_model = ... # load the base model, e.g. from transformers
# load first adapter, which will be called "default"
peft_model = PeftMixedModel.from_pretrained(base_model, <path_to_adapter1>)
peft_model.load_adapter(<path_to_adapter2>, adapter_name="other")
peft_model.set_adapter(["default", "other"])
```
The last line is necessary if you want to activate both adapters, otherwise, only the first adapter would be active. Of course, you can add more different adapters by calling `add_adapter` repeatedly.
Currently, the main purpose of mixed adapter types is to combine trained adapters for inference. Although it is technically also possible to train a mixed adapter model, this has not been tested and is not recommended.
## Tips
- Not all adapter types can be combined. See `peft.tuners.mixed.COMPATIBLE_TUNER_TYPES` for a list of compatible types. An error will be raised if you are trying to combine incompatible adapter types.
- It is possible to mix multiple adapters of the same type. This can be useful to combine adapters with very different configs.
- If you want to combine a lot of different adapters, it is most performant to add the same types of adapters consecutively. E.g., add LoRA1, LoRA2, LoHa1, LoHa2 in this order, instead of LoRA1, LoHa1, LoRA2, LoHa2. The order will make a difference for the outcome in most cases, but since no order is better a priori, it is best to choose the order that is most performant.
| peft/docs/source/developer_guides/mixed_models.md/0 | {
"file_path": "peft/docs/source/developer_guides/mixed_models.md",
"repo_id": "peft",
"token_count": 697
} | 153 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# P-tuning
[P-tuning](https://hf.co/papers/2103.10385) adds trainable prompt embeddings to the input that is optimized by a prompt encoder to find a better prompt, eliminating the need to manually design prompts. The prompt tokens can be added anywhere in the input sequence, and p-tuning also introduces anchor tokens for improving performance.
The abstract from the paper is:
*While GPTs with traditional fine-tuning fail to achieve strong results on natural language understanding (NLU), we show that GPTs can be better than or comparable to similar-sized BERTs on NLU tasks with a novel method P-tuning -- which employs trainable continuous prompt embeddings. On the knowledge probing (LAMA) benchmark, the best GPT recovers 64\% (P@1) of world knowledge without any additional text provided during test time, which substantially improves the previous best by 20+ percentage points. On the SuperGlue benchmark, GPTs achieve comparable and sometimes better performance to similar-sized BERTs in supervised learning. Importantly, we find that P-tuning also improves BERTs' performance in both few-shot and supervised settings while largely reducing the need for prompt engineering. Consequently, P-tuning outperforms the state-of-the-art approaches on the few-shot SuperGlue benchmark.*.
## PromptEncoderConfig
[[autodoc]] tuners.p_tuning.config.PromptEncoderConfig
## PromptEncoder
[[autodoc]] tuners.p_tuning.model.PromptEncoder | peft/docs/source/package_reference/p_tuning.md/0 | {
"file_path": "peft/docs/source/package_reference/p_tuning.md",
"repo_id": "peft",
"token_count": 540
} | 154 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# PEFT configurations and models
The sheer size of today's large pretrained models - which commonly have billions of parameters - present a significant training challenge because they require more storage space and more computational power to crunch all those calculations. You'll need access to powerful GPUs or TPUs to train these large pretrained models which is expensive, not widely accessible to everyone, not environmentally friendly, and not very practical. PEFT methods address many of these challenges. There are several types of PEFT methods (soft prompting, matrix decomposition, adapters), but they all focus on the same thing, reduce the number of trainable parameters. This makes it more accessible to train and store large models on consumer hardware.
The PEFT library is designed to help you quickly train large models on free or low-cost GPUs, and in this tutorial, you'll learn how to setup a configuration to apply a PEFT method to a pretrained base model for training. Once the PEFT configuration is setup, you can use any training framework you like (Transformer's [`~transformers.Trainer`] class, [Accelerate](https://hf.co/docs/accelerate), a custom PyTorch training loop).
## PEFT configurations
<Tip>
Learn more about the parameters you can configure for each PEFT method in their respective API reference page.
</Tip>
A configuration stores important parameters that specify how a particular PEFT method should be applied.
For example, take a look at the following [`LoraConfig`](https://huggingface.co/ybelkada/opt-350m-lora/blob/main/adapter_config.json) for applying LoRA and [`PromptEncoderConfig`](https://huggingface.co/smangrul/roberta-large-peft-p-tuning/blob/main/adapter_config.json) for applying p-tuning (these configuration files are already JSON-serialized). Whenever you load a PEFT adapter, it is a good idea to check whether it has an associated adapter_config.json file which is required.
<hfoptions id="config">
<hfoption id="LoraConfig">
```json
{
"base_model_name_or_path": "facebook/opt-350m", #base model to apply LoRA to
"bias": "none",
"fan_in_fan_out": false,
"inference_mode": true,
"init_lora_weights": true,
"layers_pattern": null,
"layers_to_transform": null,
"lora_alpha": 32,
"lora_dropout": 0.05,
"modules_to_save": null,
"peft_type": "LORA", #PEFT method type
"r": 16,
"revision": null,
"target_modules": [
"q_proj", #model modules to apply LoRA to (query and value projection layers)
"v_proj"
],
"task_type": "CAUSAL_LM" #type of task to train model on
}
```
You can create your own configuration for training by initializing a [`LoraConfig`].
```py
from peft import LoraConfig, TaskType
lora_config = LoraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
task_type=TaskType.CAUSAL_LM,
lora_alpha=32,
lora_dropout=0.05
)
```
</hfoption>
<hfoption id="PromptEncoderConfig">
```json
{
"base_model_name_or_path": "roberta-large", #base model to apply p-tuning to
"encoder_dropout": 0.0,
"encoder_hidden_size": 128,
"encoder_num_layers": 2,
"encoder_reparameterization_type": "MLP",
"inference_mode": true,
"num_attention_heads": 16,
"num_layers": 24,
"num_transformer_submodules": 1,
"num_virtual_tokens": 20,
"peft_type": "P_TUNING", #PEFT method type
"task_type": "SEQ_CLS", #type of task to train model on
"token_dim": 1024
}
```
You can create your own configuration for training by initializing a [`PromptEncoderConfig`].
```py
from peft import PromptEncoderConfig, TaskType
p_tuning_config = PromptEncoderConfig(
encoder_reprameterization_type="MLP",
encoder_hidden_size=128,
num_attention_heads=16,
num_layers=24,
num_transformer_submodules=1,
num_virtual_tokens=20,
token_dim=1024,
task_type=TaskType.SEQ_CLS
)
```
</hfoption>
</hfoptions>
## PEFT models
With a PEFT configuration in hand, you can now apply it to any pretrained model to create a [`PeftModel`]. Choose from any of the state-of-the-art models from the [Transformers](https://hf.co/docs/transformers) library, a custom model, and even new and unsupported transformer architectures.
For this tutorial, load a base [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) model to finetune.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
```
Use the [`get_peft_model`] function to create a [`PeftModel`] from the base facebook/opt-350m model and the `lora_config` you created earlier.
```py
from peft import get_peft_model
lora_model = get_peft_model(model, lora_config)
lora_model.print_trainable_parameters()
"trainable params: 1,572,864 || all params: 332,769,280 || trainable%: 0.472659014678278"
```
Now you can train the [`PeftModel`] with your preferred training framework! After training, you can save your model locally with [`~PeftModel.save_pretrained`] or upload it to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method.
```py
# save locally
lora_model.save_pretrained("your-name/opt-350m-lora")
# push to Hub
lora_model.push_to_hub("your-name/opt-350m-lora")
```
To load a [`PeftModel`] for inference, you'll need to provide the [`PeftConfig`] used to create it and the base model it was trained from.
```py
from peft import PeftModel, PeftConfig
config = PeftConfig.from_pretrained("ybelkada/opt-350m-lora")
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora")
```
<Tip>
By default, the [`PeftModel`] is set for inference, but if you'd like to train the adapter some more you can set `is_trainable=True`.
```py
lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora", is_trainable=True)
```
</Tip>
The [`PeftModel.from_pretrained`] method is the most flexible way to load a [`PeftModel`] because it doesn't matter what model framework was used (Transformers, timm, a generic PyTorch model). Other classes, like [`AutoPeftModel`], are just a convenient wrapper around the base [`PeftModel`], and makes it easier to load PEFT models directly from the Hub or locally where the PEFT weights are stored.
```py
from peft import AutoPeftModelForCausalLM
lora_model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora")
```
Take a look at the [AutoPeftModel](package_reference/auto_class) API reference to learn more about the [`AutoPeftModel`] classes.
## Next steps
With the appropriate [`PeftConfig`], you can apply it to any pretrained model to create a [`PeftModel`] and train large powerful models faster on freely available GPUs! To learn more about PEFT configurations and models, the following guide may be helpful:
* Learn how to configure a PEFT method for models that aren't from Transformers in the [Working with custom models](../developer_guides/custom_models) guide.
| peft/docs/source/tutorial/peft_model_config.md/0 | {
"file_path": "peft/docs/source/tutorial/peft_model_config.md",
"repo_id": "peft",
"token_count": 2415
} | 155 |
<jupyter_start><jupyter_code>from transformers import AutoModelForSeq2SeqLM
from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, PrefixTuningConfig, TaskType
import torch
from datasets import load_dataset
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from transformers import AutoTokenizer
from torch.utils.data import DataLoader
from transformers import default_data_collator, get_linear_schedule_with_warmup
from tqdm import tqdm
from datasets import load_dataset
device = "cuda"
model_name_or_path = "t5-large"
tokenizer_name_or_path = "t5-large"
checkpoint_name = "financial_sentiment_analysis_prefix_tuning_v1.pt"
text_column = "sentence"
label_column = "text_label"
max_length = 128
lr = 1e-2
num_epochs = 5
batch_size = 8
# creating model
peft_config = PrefixTuningConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, num_virtual_tokens=20)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
model
# loading dataset
dataset = load_dataset("financial_phrasebank", "sentences_allagree")
dataset = dataset["train"].train_test_split(test_size=0.1)
dataset["validation"] = dataset["test"]
del dataset["test"]
classes = dataset["train"].features["label"].names
dataset = dataset.map(
lambda x: {"text_label": [classes[label] for label in x["label"]]},
batched=True,
num_proc=1,
)
dataset["train"][0]
# data preprocessing
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[label_column]
model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt")
labels = labels["input_ids"]
labels[labels == tokenizer.pad_token_id] = -100
model_inputs["labels"] = labels
return model_inputs
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
# optimizer and lr scheduler
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
# training and evaluation
model = model.to(device)
for epoch in range(num_epochs):
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
eval_loss = 0
eval_preds = []
for step, batch in enumerate(tqdm(eval_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
eval_loss += loss.detach().float()
eval_preds.extend(
tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
)
eval_epoch_loss = eval_loss / len(eval_dataloader)
eval_ppl = torch.exp(eval_epoch_loss)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
# print accuracy
correct = 0
total = 0
for pred, true in zip(eval_preds, dataset["validation"]["text_label"]):
if pred.strip() == true.strip():
correct += 1
total += 1
accuracy = correct / total * 100
print(f"{accuracy=} % on the evaluation dataset")
print(f"{eval_preds[:10]=}")
print(f"{dataset['validation']['text_label'][:10]=}")
# saving model
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}"
model.save_pretrained(peft_model_id)
ckpt = f"{peft_model_id}/adapter_model.bin"
!du -h $ckpt
from peft import PeftModel, PeftConfig
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model_id)
model.eval()
i = 107
inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt")
print(dataset["validation"][text_column][i])
print(inputs)
with torch.no_grad():
outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10)
print(outputs)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>Acando AB ( ACANB SS ) fell 8.9 percent to 13.35 kronor , the lowest close since Dec. 11 .
{'input_ids': tensor([[ 4292, 232, 32, 3, 5359, 41, 3, 22029, 14972, 3,
4256, 3, 61, 4728, 4848, 1298, 1093, 12, 8808, 2469,
3, 22318, 29, 127, 3, 6, 8, 7402, 885, 437,
4451, 5, 850, 3, 5, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}
tensor([[ 0, 2841, 1]])
['negative'] | peft/examples/conditional_generation/peft_prefix_tuning_seq2seq.ipynb/0 | {
"file_path": "peft/examples/conditional_generation/peft_prefix_tuning_seq2seq.ipynb",
"repo_id": "peft",
"token_count": 2479
} | 156 |
accelerate launch --config_file config.yaml peft_adalora_whisper_large_training.py \
--model_name_or_path "openai/whisper-large-v2" \
--language "Marathi" \
--language_abbr "mr" \
--task "transcribe" \
--dataset_name "mozilla-foundation/common_voice_11_0" \
--push_to_hub \
--preprocessing_num_workers 2 \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 8 \
--dataloader_pin_memory \
--dataloader_num_workers 2 \
--learning_rate 1e-3 \
--weight_decay 1e-4 \
--num_train_epochs 3 \
--gradient_accumulation_steps 1 \
--lr_scheduler_type "linear" \
--num_warmup_steps 50 \
--output_dir "adalora_whisper_large_marathi_multi_adapter" \
--seed 42 \
--load_best_model \
--with_tracking \
--report_to "wandb" \
--hub_token $HUB_TOKEN \
--checkpointing_steps 2000 \
--evaluation_steps 2000 \
--logging_steps 25 \
--use_peft \
--use_adalora \
--init_r 12 \
--target_r 8 \
--tinit 100 \
--tfinal 800 \
--delta_t 10 \
--lora_alpha 32 \
--lora_dropout 0.1 \
--orth_reg_weight 0.5 | peft/examples/int8_training/run_adalora_whisper_int8.sh/0 | {
"file_path": "peft/examples/int8_training/run_adalora_whisper_int8.sh",
"repo_id": "peft",
"token_count": 509
} | 157 |
import json, os
import argparse
from pathlib import Path
from datetime import date
from tabulate import tabulate
MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters
parser = argparse.ArgumentParser()
parser.add_argument(
"--slack_channel_name",
default="peft-ci-daily"
)
def main(slack_channel_name=None):
failed = []
passed = []
group_info = []
total_num_failed = 0
empty_file = False or len(list(Path().glob("*.log"))) == 0
total_empty_files = []
for log in Path().glob("*.log"):
section_num_failed = 0
i = 0
with open(log, "r") as f:
for line in f:
line = json.loads(line)
i += 1
if line.get("nodeid", "") != "":
test = line["nodeid"]
if line.get("duration", None) is not None:
duration = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
else:
passed.append([test, duration, log.name.split('_')[0]])
empty_file = i == 0
group_info.append([str(log), section_num_failed, failed])
total_empty_files.append(empty_file)
os.remove(log)
failed = []
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": "๐ There were no failures!" if not any(total_empty_files) else "Something went wrong there is at least one empty file - please check GH action results.",
"emoji": True
}
}
message = ""
payload = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "๐ค Results of the {} PEFT scheduled tests.".format(os.environ.get("TEST_TYPE", "")),
}
},
]
if total_num_failed > 0:
for i, (name, num_failed, failed_tests) in enumerate(group_info):
if num_failed > 0:
if num_failed == 1:
message += f"*{name}: {num_failed} failed test*\n"
else:
message += f"*{name}: {num_failed} failed tests*\n"
failed_table = []
for test in failed_tests:
failed_table.append(test[0].split("::"))
failed_table = tabulate(failed_table, headers=["Test Location", "Test Case", "Test Name"], showindex="always", tablefmt="grid", maxcolwidths=[12, 12, 12])
message += '\n```\n' +failed_table + '\n```'
if total_empty_files[i]:
message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n"
print(f'### {message}')
else:
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
if len(message) > MAX_LEN_MESSAGE:
print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}")
message = message[:MAX_LEN_MESSAGE] + "..."
if len(message) != 0:
md_report = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message
},
}
payload.append(md_report)
action_button = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*"
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/peft/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
date_report = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
},
],
}
payload.append(date_report)
print(payload)
client = WebClient(token=os.environ.get("SLACK_API_TOKEN"))
client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload)
if __name__ == "__main__":
args = parser.parse_args()
main(args.slack_channel_name)
| peft/scripts/log_reports.py/0 | {
"file_path": "peft/scripts/log_reports.py",
"repo_id": "peft",
"token_count": 2483
} | 158 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .layer import AdaLoraLayer
class SVDQuantLinear(torch.nn.Module, AdaLoraLayer):
def __init__(
self,
base_layer,
adapter_name,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-5
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
if x.dtype != torch.float32:
x = x.float()
output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
# TODO: here, the dtype conversion is applied on the *whole expression*,
# not the intermediate result, unlike for SVDLinear8bitLT and
# SVDLinear4bit, is that correct?
if requires_conversion:
output = output.to(expected_dtype)
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "adalora." + rep
| peft/src/peft/tuners/adalora/gptq.py/0 | {
"file_path": "peft/src/peft/tuners/adalora/gptq.py",
"repo_id": "peft",
"token_count": 1180
} | 159 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.tuners.prompt_tuning import PromptTuningConfig
from peft.utils import PeftType
class MultitaskPromptTuningInit(str, enum.Enum):
# initialize prompt with text
TEXT = "TEXT"
# initialize prompt with random matrix
RANDOM = "RANDOM"
# average the prefix and column matrices obtained during source training
AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS"
# pick prefix and column matrices for a particular task obtained during source training
EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK"
# only use the prompt embeddings trained during source training
ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED"
@dataclass
class MultitaskPromptTuningConfig(PromptTuningConfig):
prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field(
default=MultitaskPromptTuningInit.RANDOM,
metadata={
"help": (
"How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, "
"EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED."
),
},
)
prompt_tuning_init_state_dict_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The path of source state dict. This is required when training the downstream target prompt from "
"the pretrained source prompt"
),
},
)
prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"})
num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"})
num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"})
def __post_init__(self):
self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
| peft/src/peft/tuners/multitask_prompt_tuning/config.py/0 | {
"file_path": "peft/src/peft/tuners/multitask_prompt_tuning/config.py",
"repo_id": "peft",
"token_count": 890
} | 160 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py
# with some refactor
import torch
class PrefixEncoder(torch.nn.Module):
r"""
The `torch.nn` model to encode the prefix.
Args:
config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.
Example:
```py
>>> from peft import PrefixEncoder, PrefixTuningConfig
>>> config = PrefixTuningConfig(
... peft_type="PREFIX_TUNING",
... task_type="SEQ_2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... encoder_hidden_size=768,
... )
>>> prefix_encoder = PrefixEncoder(config)
```
**Attributes**:
- **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.
- **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if
`prefix_projection` is `True`.
- **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.
Input shape: (`batch_size`, `num_virtual_tokens`)
Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)
"""
def __init__(self, config):
super().__init__()
self.prefix_projection = config.prefix_projection
token_dim = config.token_dim
num_layers = config.num_layers
encoder_hidden_size = config.encoder_hidden_size
num_virtual_tokens = config.num_virtual_tokens
if self.prefix_projection and not config.inference_mode:
# Use a two-layer MLP to encode the prefix
self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)
self.transform = torch.nn.Sequential(
torch.nn.Linear(token_dim, encoder_hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),
)
else:
self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)
def forward(self, prefix: torch.Tensor):
if self.prefix_projection:
prefix_tokens = self.embedding(prefix)
past_key_values = self.transform(prefix_tokens)
else:
past_key_values = self.embedding(prefix)
return past_key_values
| peft/src/peft/tuners/prefix_tuning/model.py/0 | {
"file_path": "peft/src/peft/tuners/prefix_tuning/model.py",
"repo_id": "peft",
"token_count": 1235
} | 161 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
import pytest
import torch
import torch.nn.functional as F
from parameterized import parameterized
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoTokenizer,
BitsAndBytesConfig,
LlamaForCausalLM,
WhisperForConditionalGeneration,
)
from peft import (
AdaLoraConfig,
AdaptionPromptConfig,
IA3Config,
LoraConfig,
PeftModel,
TaskType,
get_peft_model,
prepare_model_for_kbit_training,
)
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from .testing_utils import require_bitsandbytes, require_torch_gpu, require_torch_multi_gpu
if is_bnb_available():
import bitsandbytes as bnb
from peft.tuners.ia3 import Linear8bitLt as IA3Linear8bitLt
from peft.tuners.lora import Linear8bitLt as LoraLinear8bitLt
if is_bnb_4bit_available():
from peft.tuners.ia3 import Linear4bit as IA3Linear4bit
from peft.tuners.lora import Linear4bit as LoraLinear4bit
@require_torch_gpu
class PeftGPUCommonTests(unittest.TestCase):
r"""
A common tester to run common operations that are performed on GPU such as generation, loading in 8bit, etc.
"""
def setUp(self):
self.seq2seq_model_id = "google/flan-t5-base"
self.causal_lm_model_id = "facebook/opt-350m"
self.audio_model_id = "openai/whisper-large"
if torch.cuda.is_available():
self.device = torch.device("cuda:0")
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
@require_bitsandbytes
@pytest.mark.multi_gpu_tests
@pytest.mark.single_gpu_tests
def test_lora_bnb_8bit_quantization(self):
r"""
Test that tests if the 8bit quantization using LoRA works as expected
"""
whisper_8bit = WhisperForConditionalGeneration.from_pretrained(
self.audio_model_id,
device_map="auto",
load_in_8bit=True,
)
opt_8bit = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
load_in_8bit=True,
)
flan_8bit = AutoModelForSeq2SeqLM.from_pretrained(
self.seq2seq_model_id,
device_map="auto",
load_in_8bit=True,
)
flan_lora_config = LoraConfig(
r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM"
)
opt_lora_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none")
flan_8bit = get_peft_model(flan_8bit, flan_lora_config)
self.assertTrue(
isinstance(flan_8bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, LoraLinear8bitLt)
)
opt_8bit = get_peft_model(opt_8bit, opt_lora_config)
self.assertTrue(
isinstance(opt_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear8bitLt)
)
whisper_8bit = get_peft_model(whisper_8bit, config)
self.assertTrue(
isinstance(whisper_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear8bitLt)
)
@require_bitsandbytes
@pytest.mark.multi_gpu_tests
@pytest.mark.single_gpu_tests
def test_ia3_bnb_8bit_quantization(self):
r"""
Test that tests if the 8bit quantization using IA3 works as expected
"""
whisper_8bit = WhisperForConditionalGeneration.from_pretrained(
self.audio_model_id,
device_map="auto",
load_in_8bit=True,
)
opt_8bit = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
load_in_8bit=True,
)
flan_8bit = AutoModelForSeq2SeqLM.from_pretrained(
self.seq2seq_model_id,
device_map="auto",
load_in_8bit=True,
)
flan_ia3_config = IA3Config(target_modules=["q", "v"], task_type="SEQ_2_SEQ_LM")
opt_ia3_config = IA3Config(
target_modules=["q_proj", "v_proj", "fc2"],
feedforward_modules=["fc2"],
task_type="CAUSAL_LM",
)
config = IA3Config(target_modules=["q_proj", "v_proj", "fc2"], feedforward_modules=["fc2"])
flan_8bit = get_peft_model(flan_8bit, flan_ia3_config)
self.assertTrue(
isinstance(flan_8bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, IA3Linear8bitLt)
)
opt_8bit = get_peft_model(opt_8bit, opt_ia3_config)
self.assertTrue(
isinstance(opt_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, IA3Linear8bitLt)
)
whisper_8bit = get_peft_model(whisper_8bit, config)
self.assertTrue(
isinstance(whisper_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, IA3Linear8bitLt)
)
@require_bitsandbytes
@pytest.mark.multi_gpu_tests
@pytest.mark.single_gpu_tests
@parameterized.expand(["4bit", "8bit"])
def test_lora_bnb_quantization_from_pretrained_safetensors(self, quantization):
r"""
Tests that the bnb quantization using LoRA works as expected with safetensors weights.
"""
model_id = "facebook/opt-350m"
peft_model_id = "ybelkada/test-st-lora"
kwargs = {"device_map": "auto"}
if quantization == "4bit":
kwargs["load_in_4bit"] = True
else:
kwargs["load_in_8bit"] = True
model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
model = PeftModel.from_pretrained(model, peft_model_id)
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
# loading a 2nd adapter works, #1239
model.load_adapter(peft_model_id, "adapter2")
model.set_adapter("adapter2")
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
# check that both adapters are in the same layer
self.assertIn("default", model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A)
self.assertIn("adapter2", model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A)
@require_bitsandbytes
@pytest.mark.multi_gpu_tests
@pytest.mark.single_gpu_tests
@parameterized.expand(["4bit", "8bit"])
def test_adalora_bnb_quantization_from_pretrained_safetensors(self, quantization):
r"""
Tests that the bnb quantization using AdaLora works as expected with safetensors weights.
"""
model_id = "facebook/opt-350m"
kwargs = {"device_map": "auto"}
if quantization == "4bit":
kwargs["load_in_4bit"] = True
else:
kwargs["load_in_8bit"] = True
model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
config = AdaLoraConfig(task_type=TaskType.CAUSAL_LM)
peft_model = get_peft_model(model, config)
peft_model = prepare_model_for_kbit_training(peft_model)
peft_model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
with tempfile.TemporaryDirectory() as tmp_dir:
peft_model.save_pretrained(tmp_dir)
model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
model = PeftModel.from_pretrained(model, tmp_dir)
model = prepare_model_for_kbit_training(peft_model)
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
# loading a 2nd adapter works, #1239
model.load_adapter(tmp_dir, "adapter2")
model.set_adapter("adapter2")
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
# check that both adapters are in the same layer
self.assertIn("default", model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A)
self.assertIn("adapter2", model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A)
@require_bitsandbytes
@pytest.mark.multi_gpu_tests
@pytest.mark.single_gpu_tests
@parameterized.expand(["4bit", "8bit"])
def test_ia3_bnb_quantization_from_pretrained_safetensors(self, quantization):
r"""
Tests that the bnb quantization using IAยณ works as expected with safetensors weights.
"""
model_id = "facebook/opt-350m"
kwargs = {"device_map": "auto"}
if quantization == "4bit":
kwargs["load_in_4bit"] = True
else:
kwargs["load_in_8bit"] = True
model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
config = IA3Config(task_type=TaskType.CAUSAL_LM)
peft_model = get_peft_model(model, config)
peft_model = prepare_model_for_kbit_training(peft_model)
peft_model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
with tempfile.TemporaryDirectory() as tmp_dir:
peft_model.save_pretrained(tmp_dir)
model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
model = PeftModel.from_pretrained(model, tmp_dir)
model = prepare_model_for_kbit_training(model)
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
# loading a 2nd adapter works, #1239
model.load_adapter(tmp_dir, "adapter2")
model.set_adapter("adapter2")
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
# check that both adapters are in the same layer
self.assertIn("default", model.base_model.model.model.decoder.layers[0].self_attn.q_proj.ia3_l)
self.assertIn("adapter2", model.base_model.model.model.decoder.layers[0].self_attn.q_proj.ia3_l)
@pytest.mark.single_gpu_tests
def test_lora_gptq_quantization_from_pretrained_safetensors(self):
r"""
Tests that the autogptq quantization using LoRA works as expected with safetensors weights.
"""
from transformers import GPTQConfig
model_id = "marcsun13/opt-350m-gptq-4bit"
quantization_config = GPTQConfig(bits=4, use_exllama=False)
kwargs = {
"pretrained_model_name_or_path": model_id,
"torch_dtype": torch.float16,
"device_map": "auto",
"quantization_config": quantization_config,
}
model = AutoModelForCausalLM.from_pretrained(**kwargs)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(task_type="CAUSAL_LM")
peft_model = get_peft_model(model, config)
peft_model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
with tempfile.TemporaryDirectory() as tmp_dir:
peft_model.save_pretrained(tmp_dir)
model = AutoModelForCausalLM.from_pretrained(**kwargs)
model = PeftModel.from_pretrained(model, tmp_dir)
model = prepare_model_for_kbit_training(model)
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
# loading a 2nd adapter works, #1239
model.load_adapter(tmp_dir, "adapter2")
model.set_adapter("adapter2")
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
# check that both adapters are in the same layer
self.assertIn("default", model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A)
self.assertIn("adapter2", model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A)
@require_bitsandbytes
@pytest.mark.multi_gpu_tests
@pytest.mark.single_gpu_tests
def test_lora_bnb_4bit_quantization(self):
r"""
Test that tests if the 4bit quantization using LoRA works as expected
"""
whisper_4bit = WhisperForConditionalGeneration.from_pretrained(
self.audio_model_id,
device_map="auto",
load_in_4bit=True,
)
opt_4bit = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
load_in_4bit=True,
)
flan_4bit = AutoModelForSeq2SeqLM.from_pretrained(
self.seq2seq_model_id,
device_map="auto",
load_in_4bit=True,
)
flan_lora_config = LoraConfig(
r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM"
)
opt_lora_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none")
flan_4bit = get_peft_model(flan_4bit, flan_lora_config)
self.assertTrue(
isinstance(flan_4bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, LoraLinear4bit)
)
opt_4bit = get_peft_model(opt_4bit, opt_lora_config)
self.assertTrue(isinstance(opt_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear4bit))
whisper_4bit = get_peft_model(whisper_4bit, config)
self.assertTrue(
isinstance(whisper_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear4bit)
)
@require_bitsandbytes
@pytest.mark.multi_gpu_tests
@pytest.mark.single_gpu_tests
def test_ia3_bnb_4bit_quantization(self):
r"""
Test that tests if the 4bit quantization using IA3 works as expected
"""
whisper_4bit = WhisperForConditionalGeneration.from_pretrained(
self.audio_model_id,
device_map="auto",
load_in_4bit=True,
)
opt_4bit = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
load_in_4bit=True,
)
flan_4bit = AutoModelForSeq2SeqLM.from_pretrained(
self.seq2seq_model_id,
device_map="auto",
load_in_4bit=True,
)
flan_ia3_config = IA3Config(target_modules=["q", "v"], task_type="SEQ_2_SEQ_LM")
opt_ia3_config = IA3Config(
target_modules=["q_proj", "v_proj", "fc2"],
feedforward_modules=["fc2"],
task_type="CAUSAL_LM",
)
config = IA3Config(target_modules=["q_proj", "v_proj", "fc2"], feedforward_modules=["fc2"])
flan_4bit = get_peft_model(flan_4bit, flan_ia3_config)
self.assertTrue(
isinstance(flan_4bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, IA3Linear4bit)
)
opt_4bit = get_peft_model(opt_4bit, opt_ia3_config)
self.assertTrue(isinstance(opt_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, IA3Linear4bit))
whisper_4bit = get_peft_model(whisper_4bit, config)
self.assertTrue(
isinstance(whisper_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, IA3Linear4bit)
)
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_lora_causal_lm_multi_gpu_inference(self):
r"""
Test if LORA can be used for inference on multiple GPUs.
"""
lora_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map="balanced")
tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id)
self.assertEqual(set(model.hf_device_map.values()), set(range(torch.cuda.device_count())))
model = get_peft_model(model, lora_config)
self.assertTrue(isinstance(model, PeftModel))
dummy_input = "This is a dummy input:"
input_ids = tokenizer(dummy_input, return_tensors="pt").input_ids.to(self.device)
# this should work without any problem
_ = model.generate(input_ids=input_ids)
@require_torch_multi_gpu
@pytest.mark.multi_gpu_tests
@require_bitsandbytes
def test_lora_seq2seq_lm_multi_gpu_inference(self):
r"""
Test if LORA can be used for inference on multiple GPUs - 8bit version.
"""
lora_config = LoraConfig(
r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM"
)
model = AutoModelForSeq2SeqLM.from_pretrained(self.seq2seq_model_id, device_map="balanced", load_in_8bit=True)
tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id)
self.assertEqual(set(model.hf_device_map.values()), set(range(torch.cuda.device_count())))
model = get_peft_model(model, lora_config)
self.assertTrue(isinstance(model, PeftModel))
self.assertTrue(isinstance(model.base_model.model.encoder.block[0].layer[0].SelfAttention.q, LoraLinear8bitLt))
dummy_input = "This is a dummy input:"
input_ids = tokenizer(dummy_input, return_tensors="pt").input_ids.to(self.device)
# this should work without any problem
_ = model.generate(input_ids=input_ids)
@require_torch_multi_gpu
@pytest.mark.multi_gpu_tests
@require_bitsandbytes
def test_adaption_prompt_8bit(self):
model = LlamaForCausalLM.from_pretrained(
"HuggingFaceM4/tiny-random-LlamaForCausalLM",
load_in_8bit=True,
torch_dtype=torch.float16,
device_map="auto",
)
model = prepare_model_for_kbit_training(model)
config = AdaptionPromptConfig(
adapter_len=10,
adapter_layers=2,
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(0)
_ = model(random_input)
@require_torch_multi_gpu
@pytest.mark.multi_gpu_tests
@require_bitsandbytes
def test_adaption_prompt_4bit(self):
model = LlamaForCausalLM.from_pretrained(
"HuggingFaceM4/tiny-random-LlamaForCausalLM",
load_in_4bit=True,
torch_dtype=torch.float16,
device_map="auto",
)
model = prepare_model_for_kbit_training(model)
config = AdaptionPromptConfig(
adapter_len=10,
adapter_layers=2,
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(0)
_ = model(random_input)
@require_torch_gpu
@pytest.mark.single_gpu_tests
@require_bitsandbytes
def test_print_4bit_expected(self):
EXPECTED_TRAINABLE_PARAMS = 294912
EXPECTED_ALL_PARAMS = 125534208
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
load_in_4bit=True,
)
config = LoraConfig(
r=8,
)
model = get_peft_model(model, config)
trainable_params, all_params = model.get_nb_trainable_parameters()
self.assertEqual(trainable_params, EXPECTED_TRAINABLE_PARAMS)
self.assertEqual(all_params, EXPECTED_ALL_PARAMS)
# test with double quant
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
)
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
quantization_config=bnb_config,
)
config = LoraConfig(
r=8,
)
model = get_peft_model(model, config)
trainable_params, all_params = model.get_nb_trainable_parameters()
self.assertEqual(trainable_params, EXPECTED_TRAINABLE_PARAMS)
self.assertEqual(all_params, EXPECTED_ALL_PARAMS)
@require_torch_gpu
@pytest.mark.single_gpu_tests
@require_bitsandbytes
def test_modules_to_save_grad(self):
model_id = "bigscience/bloomz-560m"
load_in_4bit = True
model = AutoModelForSequenceClassification.from_pretrained(
model_id,
load_in_4bit=load_in_4bit,
torch_dtype=torch.float32,
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=16,
lora_dropout=0.05,
bias="none",
task_type="SEQ_CLS",
)
peft_model = get_peft_model(model, config)
lm_head = peft_model.base_model.model.score
original_module = lm_head.original_module
modules_to_save = lm_head.modules_to_save.default
inputs = torch.randn((1024))
o1 = lm_head(inputs)
o1.mean().backward()
self.assertTrue(modules_to_save.weight.requires_grad is True)
self.assertTrue(original_module.weight.grad is None)
self.assertTrue(modules_to_save.weight.grad is not None)
@require_torch_gpu
@pytest.mark.single_gpu_tests
@require_bitsandbytes
def test_8bit_merge_lora(self):
torch.manual_seed(1000)
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
load_in_8bit=True,
)
random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(model.device)
out_base = F.softmax(model(random_input).logits, dim=-1)
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(model, config)
with torch.inference_mode():
out_before_merge = F.softmax(model(random_input).logits, dim=-1)
model.merge_and_unload()
with torch.inference_mode():
out_after_merge = F.softmax(model(random_input).logits, dim=-1)
atol = 0.01
rtol = 10
self.assertFalse(torch.allclose(out_base, out_before_merge, atol=atol, rtol=rtol))
self.assertTrue(torch.allclose(out_before_merge, out_after_merge, atol=atol, rtol=rtol))
self.assertTrue(isinstance(model, PeftModel))
self.assertTrue(
isinstance(model.base_model.model.model.decoder.layers[0].self_attn.q_proj, bnb.nn.Linear8bitLt)
)
self.assertTrue(
isinstance(model.base_model.model.model.decoder.layers[0].self_attn.v_proj, bnb.nn.Linear8bitLt)
)
@require_torch_gpu
@pytest.mark.single_gpu_tests
@require_bitsandbytes
def test_8bit_merge_and_disable_lora(self):
torch.manual_seed(1000)
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
load_in_8bit=True,
)
random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(model.device)
# compare outputs in probability space, because logits can have outliers
# and token ids are not precise enough
out_base = F.softmax(model(random_input).logits, dim=-1)
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(model, config)
with torch.inference_mode():
out_before = F.softmax(model(random_input).logits, dim=-1)
model.merge_adapter()
with model.disable_adapter():
with torch.inference_mode():
out_after = F.softmax(model(random_input).logits, dim=-1)
atol = 0.01
rtol = 10
self.assertFalse(torch.allclose(out_base, out_before, atol=atol, rtol=rtol))
self.assertTrue(torch.allclose(out_base, out_after, atol=atol, rtol=rtol))
self.assertTrue(isinstance(model, PeftModel))
self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.q_proj, LoraLinear8bitLt))
self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear8bitLt))
@require_torch_gpu
@pytest.mark.single_gpu_tests
@require_bitsandbytes
def test_4bit_merge_lora(self):
torch.manual_seed(3000)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=False,
bnb_4bit_compute_type=torch.float32,
)
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
quantization_config=bnb_config,
torch_dtype=torch.float32,
)
random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(model.device)
# compare outputs in probability space, because logits can have outliers
# and token ids are not precise enough
out_base = F.softmax(model(random_input).logits, dim=-1)
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(model, config)
with torch.inference_mode():
out_before_merge = F.softmax(model(random_input).logits, dim=-1)
model.merge_and_unload()
with torch.inference_mode():
out_after_merge = F.softmax(model(random_input).logits, dim=-1)
# tolerances are pretty high because some deviations are expected with quantization
atol = 0.01
rtol = 10
self.assertFalse(torch.allclose(out_base, out_before_merge, atol=atol, rtol=rtol))
self.assertTrue(torch.allclose(out_before_merge, out_after_merge, atol=atol, rtol=rtol))
self.assertTrue(isinstance(model, PeftModel))
self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.q_proj, bnb.nn.Linear4bit))
self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.v_proj, bnb.nn.Linear4bit))
@require_torch_gpu
@pytest.mark.single_gpu_tests
@require_bitsandbytes
def test_4bit_merge_and_disable_lora(self):
torch.manual_seed(3000)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=False,
bnb_4bit_compute_type=torch.float32,
)
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
quantization_config=bnb_config,
torch_dtype=torch.float32,
)
random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(model.device)
# compare outputs in probability space, because logits can have outliers
# and token ids are not precise enough
out_base = F.softmax(model(random_input).logits, dim=-1)
config = LoraConfig(
r=8,
init_lora_weights=False,
)
model = get_peft_model(model, config)
with torch.inference_mode():
out_before = F.softmax(model(random_input).logits, dim=-1)
model.merge_adapter()
with model.disable_adapter():
with torch.inference_mode():
out_after = F.softmax(model(random_input).logits, dim=-1)
atol = 0.01
rtol = 10
self.assertFalse(torch.allclose(out_base, out_before, atol=atol, rtol=rtol))
self.assertTrue(torch.allclose(out_base, out_after, atol=atol, rtol=rtol))
self.assertTrue(isinstance(model, PeftModel))
self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.q_proj, LoraLinear4bit))
self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear4bit))
@require_torch_gpu
@pytest.mark.single_gpu_tests
def test_serialization_shared_tensors(self):
model_checkpoint = "roberta-base"
peft_config = LoraConfig(
task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all"
)
model = AutoModelForTokenClassification.from_pretrained(model_checkpoint, num_labels=11).to("cuda")
model = get_peft_model(model, peft_config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True)
| peft/tests/test_common_gpu.py/0 | {
"file_path": "peft/tests/test_common_gpu.py",
"repo_id": "peft",
"token_count": 14065
} | 162 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import pickle
import re
import tempfile
from collections import OrderedDict
from dataclasses import replace
import torch
import yaml
from diffusers import StableDiffusionPipeline
from peft import (
AdaLoraConfig,
IA3Config,
LoraConfig,
PeftModel,
PeftType,
PrefixTuningConfig,
PromptEncoderConfig,
PromptLearningConfig,
PromptTuningConfig,
get_peft_model,
get_peft_model_state_dict,
prepare_model_for_int8_training,
)
from peft.tuners.lora import LoraLayer
from peft.utils import _get_submodules, infer_device
from .testing_utils import get_state_dict
CONFIG_TESTING_KWARGS = (
# IAยณ
{
"target_modules": None,
"feedforward_modules": None,
},
# LoRA
{
"r": 8,
"lora_alpha": 32,
"target_modules": None,
"lora_dropout": 0.05,
"bias": "none",
},
# prefix tuning
{
"num_virtual_tokens": 10,
},
# prompt encoder
{
"num_virtual_tokens": 10,
"encoder_hidden_size": 32,
},
# prompt tuning
{
"num_virtual_tokens": 10,
},
# AdaLoRA
{
"target_modules": None,
},
)
CLASSES_MAPPING = {
"ia3": (IA3Config, CONFIG_TESTING_KWARGS[0]),
"lora": (LoraConfig, CONFIG_TESTING_KWARGS[1]),
"prefix_tuning": (PrefixTuningConfig, CONFIG_TESTING_KWARGS[2]),
"prompt_encoder": (PromptEncoderConfig, CONFIG_TESTING_KWARGS[3]),
"prompt_tuning": (PromptTuningConfig, CONFIG_TESTING_KWARGS[4]),
"adalora": (AdaLoraConfig, CONFIG_TESTING_KWARGS[5]),
}
# Adapted from https://github.com/huggingface/transformers/blob/48327c57182fdade7f7797d1eaad2d166de5c55b/src/transformers/activations.py#LL166C7-L166C22
class ClassInstantier(OrderedDict):
def __getitem__(self, key, *args, **kwargs):
# check if any of the kwargs is inside the config class kwargs
if any(kwarg in self[key][1] for kwarg in kwargs):
new_config_kwargs = self[key][1].copy()
new_config_kwargs.update(kwargs)
return (self[key][0], new_config_kwargs)
return super().__getitem__(key, *args, **kwargs)
def get_grid_parameters(self, grid_parameters, filter_params_func=None):
r"""
Returns a list of all possible combinations of the parameters in the config classes.
Args:
grid_parameters (`dict`):
A dictionary containing the parameters to be tested. There should be at least the key "model_ids" which
contains a list of model ids to be tested. The other keys should be the name of the config class
post-fixed with "_kwargs" and the value should be a dictionary containing the parameters to be tested
for that config class.
filter_params_func (`callable`, `optional`):
A function that takes a list of tuples and returns a list of tuples. This function is used to filter
out the tests that needs for example to be skipped.
Returns:
generated_tests (`list`):
A list of tuples containing the name of the test, the model id, the config class and the config class
kwargs.
"""
generated_tests = []
model_list = grid_parameters["model_ids"]
task_type = grid_parameters["task_type"] if "task_type" in grid_parameters else None
for model_id in model_list:
for key, value in self.items():
if "{}_kwargs".format(key) in grid_parameters:
peft_configs = []
current_peft_config = value[1].copy()
for current_key, current_value in grid_parameters[f"{key}_kwargs"].items():
for kwarg in current_value:
current_peft_config.update({current_key: kwarg})
if task_type is not None:
current_peft_config.update({"task_type": task_type})
peft_configs.append(current_peft_config.copy())
else:
current_peft_config = value[1].copy()
if task_type is not None:
current_peft_config.update({"task_type": task_type})
peft_configs = [current_peft_config]
for peft_config in peft_configs:
generated_tests.append((f"test_{model_id}_{key}", model_id, value[0], peft_config))
if filter_params_func is not None:
generated_tests = filter_params_func(generated_tests)
return generated_tests
PeftTestConfigManager = ClassInstantier(CLASSES_MAPPING)
class PeftCommonTester:
r"""
A large testing suite for testing common functionality of the PEFT models.
Attributes:
torch_device (`torch.device`):
The device on which the tests will be run.
transformers_class (`transformers.PreTrainedModel`):
The transformers class that is being tested.
"""
torch_device = infer_device()
transformers_class = None
def prepare_inputs_for_common(self):
raise NotImplementedError
def check_modelcard(self, tmp_dirname, model):
# check the generated README.md
filename = os.path.join(tmp_dirname, "README.md")
self.assertTrue(os.path.exists(filename))
with open(filename, "r", encoding="utf-8") as f:
readme = f.read()
metainfo = re.search(r"---\n(.*?)\n---", readme, re.DOTALL).group(1)
dct = yaml.safe_load(metainfo)
self.assertEqual(dct["library_name"], "peft")
if hasattr(model, "config"):
self.assertEqual(dct["base_model"], model.config.to_dict()["_name_or_path"])
else: # a custom model
self.assertTrue("base_model" not in dct)
def check_config_json(self, tmp_dirname, model):
# check the generated config.json
filename = os.path.join(tmp_dirname, "adapter_config.json")
self.assertTrue(os.path.exists(filename))
with open(filename, "r", encoding="utf-8") as f:
config = json.load(f)
if hasattr(model, "config"): # custom models don't have a config attribute
self.assertEqual(config["base_model_name_or_path"], model.config.to_dict()["_name_or_path"])
def _test_model_attr(self, model_id, config_cls, config_kwargs):
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
self.assertTrue(hasattr(model, "save_pretrained"))
self.assertTrue(hasattr(model, "from_pretrained"))
self.assertTrue(hasattr(model, "push_to_hub"))
def _test_adapter_name(self, model_id, config_cls, config_kwargs):
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config, adapter_name="test-adapter")
correctly_converted = False
for n, _ in model.named_parameters():
if "test-adapter" in n:
correctly_converted = True
break
self.assertTrue(correctly_converted)
def _test_prepare_for_training(self, model_id, config_cls, config_kwargs):
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
dummy_input = self.prepare_inputs_for_testing()
dummy_output = model.get_input_embeddings()(dummy_input["input_ids"])
self.assertFalse(dummy_output.requires_grad)
# load with `prepare_model_for_int8_training`
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
model = prepare_model_for_int8_training(model)
for param in model.parameters():
self.assertFalse(param.requires_grad)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
# For backward compatibility
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
dummy_input = self.prepare_inputs_for_testing()
dummy_output = model.get_input_embeddings()(dummy_input["input_ids"])
self.assertTrue(dummy_output.requires_grad)
def _test_save_pretrained(self, model_id, config_cls, config_kwargs, safe_serialization=True):
# ensure that the weights are randomly initialized
if issubclass(config_cls, LoraConfig):
config_kwargs = config_kwargs.copy()
config_kwargs["init_lora_weights"] = False
if issubclass(config_cls, IA3Config):
config_kwargs = config_kwargs.copy()
config_kwargs["init_ia3_weights"] = False
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
if safe_serialization:
model.save_pretrained(tmp_dirname)
else:
model.save_pretrained(tmp_dirname, safe_serialization=False)
model_from_pretrained = self.transformers_class.from_pretrained(model_id)
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
# check if the state dicts are equal
if issubclass(config_cls, PromptEncoderConfig):
# For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load
# adapter-specific weights for comparison.
# TODO: is this expected?
state_dict = get_peft_model_state_dict(model, unwrap_compiled=True)
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True)
else:
state_dict = get_state_dict(model, unwrap_compiled=True)
state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True)
# check if tensors equal
for key in state_dict.keys():
self.assertTrue(
torch.allclose(
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
)
)
target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin"
# check if `adapter_model.safetensors` is present
self.assertTrue(os.path.exists(os.path.join(tmp_dirname, target_adapter_filename)))
# check if `adapter_config.json` is present
self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")))
# check if `model.safetensors` is not present
self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "model.safetensors")))
# check if `config.json` is not present
self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json")))
self.check_modelcard(tmp_dirname, model)
self.check_config_json(tmp_dirname, model)
def _test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs, safe_serialization=True):
if issubclass(config_cls, AdaLoraConfig):
# AdaLora does not support adding more than 1 adapter
return
# ensure that the weights are randomly initialized
if issubclass(config_cls, LoraConfig):
config_kwargs = config_kwargs.copy()
config_kwargs["init_lora_weights"] = False
if issubclass(config_cls, IA3Config):
config_kwargs = config_kwargs.copy()
config_kwargs["init_ia3_weights"] = False
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
new_adapter_config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model.add_adapter("new_adapter", new_adapter_config)
with tempfile.TemporaryDirectory() as tmp_dirname:
if safe_serialization:
model.save_pretrained(tmp_dirname)
else:
model.save_pretrained(tmp_dirname, safe_serialization=False)
model_from_pretrained = self.transformers_class.from_pretrained(model_id)
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
new_adapter_dir = os.path.join(tmp_dirname, "new_adapter")
model_from_pretrained.load_adapter(new_adapter_dir, "new_adapter")
# check if the state dicts are equal
if issubclass(config_cls, PromptEncoderConfig):
# For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load
# adapter-specific weights for comparison.
# TODO: is this expected?
state_dict = get_peft_model_state_dict(model, unwrap_compiled=True)
state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True)
else:
state_dict = get_state_dict(model, unwrap_compiled=True)
state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True)
# check if same keys
self.assertEqual(state_dict.keys(), state_dict_from_pretrained.keys())
# check if tensors equal
for key in state_dict.keys():
self.assertTrue(
torch.allclose(
state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device)
)
)
target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin"
# check if `adapter_model.safetensors` is present
self.assertTrue(os.path.exists(os.path.join(tmp_dirname, target_adapter_filename)))
self.assertTrue(os.path.exists(os.path.join(new_adapter_dir, target_adapter_filename)))
# check if `adapter_config.json` is present
self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")))
self.assertTrue(os.path.exists(os.path.join(new_adapter_dir, "adapter_config.json")))
# check if `model.safetensors` is not present
self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "model.safetensors")))
self.assertFalse(os.path.exists(os.path.join(new_adapter_dir, "model.safetensors")))
# check if `config.json` is not present
self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json")))
self.assertFalse(os.path.exists(os.path.join(new_adapter_dir, "config.json")))
self.check_modelcard(tmp_dirname, model)
self.check_config_json(tmp_dirname, model)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname, selected_adapters=["default"])
model_from_pretrained = self.transformers_class.from_pretrained(model_id)
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
self.assertTrue("default" in model_from_pretrained.peft_config.keys())
self.assertTrue("new_adapter" not in model_from_pretrained.peft_config.keys())
def _test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs):
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(base_model_name_or_path=model_id, **config_kwargs)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model_from_pretrained = self.transformers_class.from_pretrained(model_id)
model_from_pretrained = PeftModel.from_pretrained(
model_from_pretrained, tmp_dirname, is_trainable=False, config=config
)
self.assertTrue(model_from_pretrained.peft_config["default"].inference_mode)
self.assertIs(model_from_pretrained.peft_config["default"], config)
def _test_merge_layers_fp16(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig, IA3Config):
# Merge layers only supported for LoRA and IAยณ
return
if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig):
self.skipTest("Merging GPT2 adapters not supported for IAยณ (yet)")
model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.float16)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(device="cpu", dtype=torch.float16)
model.eval()
# This should simply work
_ = model.merge_and_unload()
def _test_merge_layers_nan(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig, IA3Config, AdaLoraConfig):
# Merge layers only supported for LoRA and IAยณ
return
if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig):
self.skipTest("Merging GPT2 adapters not supported for IAยณ (yet)")
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
dummy_input = self.prepare_inputs_for_testing()
model.eval()
# This should work
logits_unmerged = model(**dummy_input)[0]
model = model.merge_and_unload()
logits_merged = model(**dummy_input)[0]
self.assertTrue(torch.allclose(logits_unmerged, logits_merged, atol=1e-3, rtol=1e-3))
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
for name, module in model.named_parameters():
if "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name:
module.data[0] = torch.nan
with self.assertRaises(ValueError) as error_context:
model = model.merge_and_unload(safe_merge=True)
self.assertEqual(
str(error_context.exception),
"NaNs detected in the merged weights. The adapter default seems to be broken",
)
for name, module in model.named_parameters():
if "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name:
module.data[0] = torch.inf
with self.assertRaises(ValueError) as error_context:
model = model.merge_and_unload(safe_merge=True)
self.assertEqual(
str(error_context.exception),
"NaNs detected in the merged weights. The adapter default seems to be broken",
)
def _test_merge_layers(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig, IA3Config):
# Merge layers only supported for LoRA and IAยณ
return
if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig):
self.skipTest("Merging GPT2 adapters not supported for IAยณ (yet)")
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
if config.peft_type not in ("IA3", "LORA"):
with self.assertRaises(AttributeError):
model = model.merge_and_unload()
dummy_input = self.prepare_inputs_for_testing()
model.eval()
logits = model(**dummy_input)[0]
model.merge_adapter()
logits_merged = model(**dummy_input)[0]
model.unmerge_adapter()
logits_unmerged = model(**dummy_input)[0]
model = model.merge_and_unload()
logits_merged_unloaded = model(**dummy_input)[0]
atol, rtol = 1e-4, 1e-4
if (config.peft_type == "IA3") and (model_id == "Conv2d"):
# for some reason, the IAยณ Conv2d introduces a larger error
atol, rtol = 0.3, 0.01
self.assertTrue(torch.allclose(logits, logits_merged, atol=atol, rtol=rtol))
self.assertTrue(torch.allclose(logits, logits_unmerged, atol=atol, rtol=rtol))
self.assertTrue(torch.allclose(logits, logits_merged_unloaded, atol=atol, rtol=rtol))
# For this test to work, weights should not be initialized to identity transform (e.g.
# init_lora_weights should be False).
transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
logits_transformers = transformers_model(**dummy_input)[0]
self.assertFalse(torch.allclose(logits_merged, logits_transformers, atol=1e-10, rtol=1e-10))
# test that the logits are identical after a save-load-roundtrip
if hasattr(model, "save_pretrained"):
# model is a transformers model
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model_from_pretrained = self.transformers_class.from_pretrained(tmp_dirname).to(self.torch_device)
else:
# model is not a transformers model
model_from_pretrained = pickle.loads(pickle.dumps(model))
logits_merged_from_pretrained = model_from_pretrained(**dummy_input)[0]
self.assertTrue(torch.allclose(logits_merged, logits_merged_from_pretrained, atol=atol, rtol=rtol))
def _test_merge_layers_multi(self, model_id, config_cls, config_kwargs):
supported_peft_types = [PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT]
if ("gpt2" in model_id.lower()) and (config_cls == IA3Config):
self.skipTest("Merging GPT2 adapters not supported for IAยณ (yet)")
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
if config.peft_type not in supported_peft_types:
return
model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
dummy_input = self.prepare_inputs_for_testing()
model.eval()
with torch.inference_mode():
logits_adapter_1 = model(**dummy_input)[0]
model.add_adapter("adapter-2", config)
model.set_adapter("adapter-2")
model.eval()
with torch.inference_mode():
logits_adapter_2 = model(**dummy_input)[0]
self.assertFalse(torch.allclose(logits_adapter_1, logits_adapter_2, atol=1e-3, rtol=1e-3))
model.set_adapter("default")
with torch.inference_mode():
logits_adapter_1_after_set = model(**dummy_input)[0]
self.assertTrue(torch.allclose(logits_adapter_1_after_set, logits_adapter_1, atol=1e-3, rtol=1e-3))
model_copy = copy.deepcopy(model)
model_copy_2 = copy.deepcopy(model)
model_merged_all = model.merge_and_unload(adapter_names=["adapter-2", "default"])
with torch.inference_mode():
logits_merged_all = model_merged_all(**dummy_input)[0]
self.assertFalse(torch.allclose(logits_merged_all, logits_adapter_2, atol=1e-3, rtol=1e-3))
self.assertFalse(torch.allclose(logits_merged_all, logits_adapter_1, atol=1e-3, rtol=1e-3))
model_merged_adapter_2 = model_copy.merge_and_unload(adapter_names=["adapter-2"])
with torch.inference_mode():
logits_merged_adapter_2 = model_merged_adapter_2(**dummy_input)[0]
self.assertTrue(torch.allclose(logits_merged_adapter_2, logits_adapter_2, atol=1e-3, rtol=1e-3))
model_merged_adapter_default = model_copy_2.merge_and_unload(adapter_names=["default"])
with torch.inference_mode():
logits_merged_adapter_default = model_merged_adapter_default(**dummy_input)[0]
self.assertTrue(torch.allclose(logits_merged_adapter_default, logits_adapter_1, atol=1e-3, rtol=1e-3))
def _test_merge_layers_is_idempotent(self, model_id, config_cls, config_kwargs):
if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig):
self.skipTest("Merging GPT2 adapters not supported for IAยณ (yet)")
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
model.eval()
torch.manual_seed(0)
model.merge_adapter()
logits_0 = model(**self.prepare_inputs_for_testing())[0]
# merging again should not change anything
# also check warning:
with self.assertWarnsRegex(UserWarning, "All adapters are already merged, nothing to do"):
model.merge_adapter()
logits_1 = model(**self.prepare_inputs_for_testing())[0]
self.assertTrue(torch.allclose(logits_0, logits_1, atol=1e-6, rtol=1e-6))
def _test_generate(self, model_id, config_cls, config_kwargs):
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
inputs = self.prepare_inputs_for_testing()
# check if `generate` works
_ = model.generate(**inputs)
def _test_generate_pos_args(self, model_id, config_cls, config_kwargs, raises_err: bool):
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
inputs = self.prepare_inputs_for_testing()
if raises_err:
with self.assertRaises(TypeError):
# check if `generate` raises an error if positional arguments are passed
_ = model.generate(inputs["input_ids"])
else:
# check if `generate` works if positional arguments are passed
_ = model.generate(inputs["input_ids"])
def _test_generate_half_prec(self, model_id, config_cls, config_kwargs):
if config_cls not in (IA3Config, LoraConfig, PrefixTuningConfig):
return
model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.bfloat16)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device)
attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
# check if `generate` works
_ = model.generate(input_ids=input_ids, attention_mask=attention_mask)
def _test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs):
if config_cls not in (PrefixTuningConfig,):
return
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config)
model = model.half()
self.assertEqual(model.base_model_torch_dtype, torch.float16)
def _test_training(self, model_id, config_cls, config_kwargs):
if config_cls not in (IA3Config, LoraConfig):
return
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
inputs = self.prepare_inputs_for_testing()
# check if `training` works
output = model(**inputs)[0]
loss = output.sum()
loss.backward()
parameter_prefix = "ia3" if config_cls == IA3Config else "lora"
for n, param in model.named_parameters():
if (parameter_prefix in n) or ("modules_to_save" in n):
self.assertIsNotNone(param.grad)
else:
self.assertIsNone(param.grad)
def _test_inference_safetensors(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig,):
return
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
inputs = self.prepare_inputs_for_testing()
# check if `training` works
output = model(**inputs)[0]
logits = output[0]
loss = output.sum()
loss.backward()
# set to eval mode, since things like dropout can affect the output otherwise
model.eval()
logits = model(**inputs)[0][0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname, safe_serialization=True)
self.assertTrue("adapter_model.safetensors" in os.listdir(tmp_dirname))
self.assertTrue("adapter_model.bin" not in os.listdir(tmp_dirname))
model_from_pretrained = self.transformers_class.from_pretrained(model_id)
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device)
logits_from_pretrained = model_from_pretrained(**inputs)[0][0]
self.assertTrue(torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4))
def _test_training_layer_indexing(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig,):
return
config = config_cls(
base_model_name_or_path=model_id,
layers_to_transform=[0],
**config_kwargs,
)
model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
inputs = self.prepare_inputs_for_testing()
# check if `training` works
output = model(**inputs)[0]
logits = output[0]
loss = output.sum()
loss.backward()
nb_trainable = 0
for n, param in model.named_parameters():
if "lora" in n:
self.assertIsNotNone(param.grad)
nb_trainable += 1
else:
self.assertIsNone(param.grad)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model_from_pretrained = self.transformers_class.from_pretrained(model_id)
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device)
logits_from_pretrained = model_from_pretrained(**inputs)[0][0]
self.assertTrue(torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4))
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
nb_trainable_all = 0
for n, param in model.named_parameters():
if "lora" in n:
nb_trainable_all += 1
self.assertLess(nb_trainable, nb_trainable_all)
def _test_training_gradient_checkpointing(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig, IA3Config):
return
model = self.transformers_class.from_pretrained(model_id)
if not getattr(model, "supports_gradient_checkpointing", False):
return
model.gradient_checkpointing_enable()
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
inputs = self.prepare_inputs_for_testing()
# check if `training` works
output = model(**inputs)[0]
loss = output.sum()
loss.backward()
parameter_prefix = "ia3" if config_cls == IA3Config else "lora"
for n, param in model.named_parameters():
if parameter_prefix in n:
self.assertIsNotNone(param.grad)
else:
self.assertIsNone(param.grad)
def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs):
if config_cls not in (LoraConfig,):
return
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model_from_pretrained = self.transformers_class.from_pretrained(model_id)
_ = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname, device_map={"": "cpu"}).to(
self.torch_device
)
def _test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs):
if not issubclass(config_cls, PromptLearningConfig):
return
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
inputs = self.prepare_inputs_for_testing()
# check if `training` works
output = model(**inputs)[0]
loss = output.sum()
loss.backward()
# check that prompt encoder has grads
for param in model.prompt_encoder.parameters():
self.assertIsNotNone(param.grad)
def _test_delete_adapter(self, model_id, config_cls, config_kwargs):
supported_peft_types = [PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT]
# IA3 does not support deleting adapters yet, but it just needs to be added
# AdaLora does not support multiple adapters
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
if config.peft_type not in supported_peft_types:
return
model = self.transformers_class.from_pretrained(model_id)
adapter_to_delete = "delete_me"
model = get_peft_model(model, config)
model.add_adapter(adapter_to_delete, config)
model.set_adapter(adapter_to_delete)
model = model.to(self.torch_device)
model.delete_adapter(adapter_to_delete)
self.assertFalse(adapter_to_delete in model.peft_config)
self.assertEqual(model.active_adapters, ["default"])
key_list = [key for key, _ in model.named_modules()]
for key in key_list:
_, target, _ = _get_submodules(model, key)
attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr(target, "other_param_names", [])
for attr in attributes_to_check:
self.assertFalse(adapter_to_delete in getattr(target, attr))
# check that we can also delete the last remaining adapter
model.delete_adapter("default")
self.assertFalse("default" in model.peft_config)
self.assertEqual(model.active_adapters, [])
input = self.prepare_inputs_for_testing()
# note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter
model.base_model(**input) # should not raise an error
def _test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs):
# same as test_delete_adapter, but this time an inactive adapter is deleted
supported_peft_types = [PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT]
# IA3 does not support deleting adapters yet, but it just needs to be added
# AdaLora does not support multiple adapters
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
if config.peft_type not in supported_peft_types:
return
model = self.transformers_class.from_pretrained(model_id)
adapter_to_delete = "delete_me"
model = get_peft_model(model, config)
model.add_adapter(adapter_to_delete, config)
# "delete_me" is added but not activated
model = model.to(self.torch_device)
model.delete_adapter(adapter_to_delete)
self.assertFalse(adapter_to_delete in model.peft_config)
self.assertEqual(model.active_adapters, ["default"])
key_list = [key for key, _ in model.named_modules()]
for key in key_list:
_, target, _ = _get_submodules(model, key)
attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr(target, "other_param_names", [])
for attr in attributes_to_check:
self.assertFalse(adapter_to_delete in getattr(target, attr))
# check that we can also delete the last remaining adapter
model.delete_adapter("default")
self.assertFalse("default" in model.peft_config)
self.assertEqual(model.active_adapters, [])
input = self.prepare_inputs_for_testing()
# note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter
model.base_model(**input) # should not raise an error
def _test_unload_adapter(self, model_id, config_cls, config_kwargs):
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model = model.to(self.torch_device)
if config.peft_type not in ("LORA", "ADALORA", "IA3"):
with self.assertRaises(AttributeError):
model = model.unload()
else:
dummy_input = self.prepare_inputs_for_testing()
logits_with_adapter = model(**dummy_input)[0]
transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
logits_transformers = transformers_model(**dummy_input)[0]
model.eval()
model = model.unload()
logits_unload = model(**dummy_input)[0]
self.assertFalse(torch.allclose(logits_with_adapter, logits_unload, atol=1e-10, rtol=1e-10))
self.assertTrue(torch.allclose(logits_transformers, logits_unload, atol=1e-4, rtol=1e-4))
def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs):
if issubclass(config_cls, AdaLoraConfig):
# AdaLora does not support adding more than 1 adapter
return
adapter_list = ["adapter1", "adapter_2", "adapter_3"]
weight_list = [0.5, 1.5, 1.5]
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
if not isinstance(config, (LoraConfig)):
return
model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config, adapter_list[0])
model.add_adapter(adapter_list[1], config)
model.add_adapter(adapter_list[2], replace(config, r=20))
model = model.to(self.torch_device)
# test re-weighting single adapter
model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting")
# test svd re-weighting with multiple adapters
model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_svd_reweighting")
# test cat re-weighting with multiple adapters
model.add_weighted_adapter(
adapter_list[1:], weight_list[1:], "multi_adapter_cat_reweighting", combination_type="cat"
)
# test linear re-weighting with multiple adapters
model.add_weighted_adapter(
adapter_list[:2], weight_list[:2], "multi_adapter_linear_reweighting", combination_type="linear"
)
# test linear re-weighting with multiple adapters with only first adapter having non zero weight
model.add_weighted_adapter(
adapter_list[:2],
[weight_list[0], 0],
"multi_adapter_linear_reweighting_single_enabled",
combination_type="linear",
)
with self.assertRaises(ValueError):
model.add_weighted_adapter(
adapter_list[1:],
weight_list[1:],
"multi_adapter_linear_reweighting_uneven_r",
combination_type="linear",
)
new_adapters = [
"single_adapter_reweighting",
"multi_adapter_svd_reweighting",
"multi_adapter_cat_reweighting",
"multi_adapter_linear_reweighting",
"multi_adapter_linear_reweighting_single_enabled",
]
for new_adapter in new_adapters:
self.assertTrue(new_adapter in model.peft_config)
key_list = [key for key, _ in model.named_modules()]
for key in key_list:
_, target, _ = _get_submodules(model, key)
if isinstance(target, LoraLayer):
for adapter_name in new_adapters:
if "single" in adapter_name:
new_delta_weight = target.get_delta_weight(adapter_name)
weighted_original_delta_weights = target.get_delta_weight(adapter_list[0]) * weight_list[0]
self.assertTrue(
torch.allclose(new_delta_weight, weighted_original_delta_weights, atol=1e-4, rtol=1e-4)
)
elif "svd" in adapter_name:
self.assertTrue(target.r[adapter_name] == 20)
elif "linear" in adapter_name:
self.assertTrue(target.r[adapter_name] == 8)
elif "cat" in adapter_name:
self.assertTrue(target.r[adapter_name] == 28)
dummy_input = self.prepare_inputs_for_testing()
model.eval()
for adapter_name in new_adapters:
# ensuring new adapters pass the forward loop
model.set_adapter(adapter_name)
self.assertTrue(model.active_adapter == adapter_name)
self.assertTrue(model.active_adapters == [adapter_name])
model(**dummy_input)[0]
def _test_disable_adapter(self, model_id, config_cls, config_kwargs):
task_type = config_kwargs.get("task_type")
if (task_type == "SEQ_2_SEQ_LM") and (config_cls in (PromptTuningConfig, PromptEncoderConfig)):
self.skipTest("Seq2Seq + prompt tuning/prompt encoder does not work with disabling adapters")
def get_output(model):
# helper function that works with different model types
torch.manual_seed(0)
if hasattr(model, "generate"):
# let's check the scores, not the output ids, since the latter can easily be identical even if the
# weights are slightly changed
output = model.generate(**input, return_dict_in_generate=True, output_scores=True).scores[0]
# take element 0, as output is a tuple
else:
output = model(**input)
if hasattr(output, "images"): # for SD
import numpy as np
img = output.images[0]
return torch.from_numpy(np.array(img))
return output
# initialize model
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
# output from BASE MODEL
input = self.prepare_inputs_for_testing()
output_before = get_output(model)
# output from PEFT MODEL
if hasattr(self, "instantiate_sd_peft"):
# SD models are instantiated differently
peft_model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs)
else:
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
peft_model = get_peft_model(model, config)
output_peft = get_output(peft_model)
# first check trivial case is not true that peft does not affect the output; for this to work, init_lora_weight
# must be False
if isinstance(peft_model, StableDiffusionPipeline):
# for SD, check that most pixels have different values
self.assertTrue((output_before != output_peft).float().mean() > 0.8)
else:
self.assertFalse(torch.allclose(output_before, output_peft))
# output with DISABLED ADAPTER
if isinstance(peft_model, StableDiffusionPipeline):
with peft_model.unet.disable_adapter():
with peft_model.text_encoder.disable_adapter():
output_peft_disabled = get_output(peft_model)
# for SD, very rarely, a pixel can differ
self.assertTrue((output_before != output_peft_disabled).float().mean() < 1e-4)
else:
with peft_model.disable_adapter():
output_peft_disabled = get_output(peft_model)
self.assertTrue(torch.allclose(output_before, output_peft_disabled, atol=1e-6, rtol=1e-6))
# TODO: add tests to check if disabling adapters works after calling merge_adapter
def _test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs):
# When trying to add multiple adapters with bias in Lora or AdaLora, an error should be
# raised. Also, the peft model should not be left in a half-initialized state.
if not issubclass(config_cls, (LoraConfig, AdaLoraConfig)):
return
config_kwargs = config_kwargs.copy()
config_kwargs["bias"] = "all"
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config, "adapter0")
with self.assertRaises(ValueError):
model.add_adapter("adapter1", replace(config, r=20))
# (superficial) test that the model is not left in a half-initialized state when adding an adapter fails
self.assertFalse("adapter1" in model.peft_config)
self.assertFalse("adapter1" in model.base_model.peft_config)
def _test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs):
# https://github.com/huggingface/peft/issues/727
model = self.transformers_class.from_pretrained(model_id)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config, adapter_name="test-adapter").to(self.torch_device)
dummy_input = self.prepare_inputs_for_testing()
inputs_embeds = model.get_input_embeddings()(dummy_input["input_ids"])
# just check that no error is raised
model.forward(inputs_embeds=inputs_embeds)
| peft/tests/testing_common.py/0 | {
"file_path": "peft/tests/testing_common.py",
"repo_id": "peft",
"token_count": 22859
} | 163 |
# Archived Changes
### Nov 22, 2021
* A number of updated weights anew new model defs
* `eca_halonext26ts` - 79.5 @ 256
* `resnet50_gn` (new) - 80.1 @ 224, 81.3 @ 288
* `resnet50` - 80.7 @ 224, 80.9 @ 288 (trained at 176, not replacing current a1 weights as default since these don't scale as well to higher res, [weights](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth))
* `resnext50_32x4d` - 81.1 @ 224, 82.0 @ 288
* `sebotnet33ts_256` (new) - 81.2 @ 224
* `lamhalobotnet50ts_256` - 81.5 @ 256
* `halonet50ts` - 81.7 @ 256
* `halo2botnet50ts_256` - 82.0 @ 256
* `resnet101` - 82.0 @ 224, 82.8 @ 288
* `resnetv2_101` (new) - 82.1 @ 224, 83.0 @ 288
* `resnet152` - 82.8 @ 224, 83.5 @ 288
* `regnetz_d8` (new) - 83.5 @ 256, 84.0 @ 320
* `regnetz_e8` (new) - 84.5 @ 256, 85.0 @ 320
* `vit_base_patch8_224` (85.8 top-1) & `in21k` variant weights added thanks [Martins Bruveris](https://github.com/martinsbruveris)
* Groundwork in for FX feature extraction thanks to [Alexander Soare](https://github.com/alexander-soare)
* models updated for tracing compatibility (almost full support with some distlled transformer exceptions)
### Oct 19, 2021
* ResNet strikes back (https://arxiv.org/abs/2110.00476) weights added, plus any extra training components used. Model weights and some more details here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-rsb-weights)
* BCE loss and Repeated Augmentation support for RSB paper
* 4 series of ResNet based attention model experiments being added (implemented across byobnet.py/byoanet.py). These include all sorts of attention, from channel attn like SE, ECA to 2D QKV self-attention layers such as Halo, Bottlneck, Lambda. Details here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-attn-weights)
* Working implementations of the following 2D self-attention modules (likely to be differences from paper or eventual official impl):
* Halo (https://arxiv.org/abs/2103.12731)
* Bottleneck Transformer (https://arxiv.org/abs/2101.11605)
* LambdaNetworks (https://arxiv.org/abs/2102.08602)
* A RegNetZ series of models with some attention experiments (being added to). These do not follow the paper (https://arxiv.org/abs/2103.06877) in any way other than block architecture, details of official models are not available. See more here (https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-attn-weights)
* ConvMixer (https://openreview.net/forum?id=TVHS5Y4dNvM), CrossVit (https://arxiv.org/abs/2103.14899), and BeiT (https://arxiv.org/abs/2106.08254) architectures + weights added
* freeze/unfreeze helpers by [Alexander Soare](https://github.com/alexander-soare)
### Aug 18, 2021
* Optimizer bonanza!
* Add LAMB and LARS optimizers, incl trust ratio clipping options. Tweaked to work properly in PyTorch XLA (tested on TPUs w/ `timm bits` [branch](https://github.com/rwightman/pytorch-image-models/tree/bits_and_tpu/timm/bits))
* Add MADGRAD from FB research w/ a few tweaks (decoupled decay option, step handling that works with PyTorch XLA)
* Some cleanup on all optimizers and factory. No more `.data`, a bit more consistency, unit tests for all!
* SGDP and AdamP still won't work with PyTorch XLA but others should (have yet to test Adabelief, Adafactor, Adahessian myself).
* EfficientNet-V2 XL TF ported weights added, but they don't validate well in PyTorch (L is better). The pre-processing for the V2 TF training is a bit diff and the fine-tuned 21k -> 1k weights are very sensitive and less robust than the 1k weights.
* Added PyTorch trained EfficientNet-V2 'Tiny' w/ GlobalContext attn weights. Only .1-.2 top-1 better than the SE so more of a curiosity for those interested.
### July 12, 2021
* Add XCiT models from [official facebook impl](https://github.com/facebookresearch/xcit). Contributed by [Alexander Soare](https://github.com/alexander-soare)
### July 5-9, 2021
* Add `efficientnetv2_rw_t` weights, a custom 'tiny' 13.6M param variant that is a bit better than (non NoisyStudent) B3 models. Both faster and better accuracy (at same or lower res)
* top-1 82.34 @ 288x288 and 82.54 @ 320x320
* Add [SAM pretrained](https://arxiv.org/abs/2106.01548) in1k weight for ViT B/16 (`vit_base_patch16_sam_224`) and B/32 (`vit_base_patch32_sam_224`) models.
* Add 'Aggregating Nested Transformer' (NesT) w/ weights converted from official [Flax impl](https://github.com/google-research/nested-transformer). Contributed by [Alexander Soare](https://github.com/alexander-soare).
* `jx_nest_base` - 83.534, `jx_nest_small` - 83.120, `jx_nest_tiny` - 81.426
### June 23, 2021
* Reproduce gMLP model training, `gmlp_s16_224` trained to 79.6 top-1, matching [paper](https://arxiv.org/abs/2105.08050). Hparams for this and other recent MLP training [here](https://gist.github.com/rwightman/d6c264a9001f9167e06c209f630b2cc6)
### June 20, 2021
* Release Vision Transformer 'AugReg' weights from [How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers](https://arxiv.org/abs/2106.10270)
* .npz weight loading support added, can load any of the 50K+ weights from the [AugReg series](https://console.cloud.google.com/storage/browser/vit_models/augreg)
* See [example notebook](https://colab.research.google.com/github/google-research/vision_transformer/blob/master/vit_jax_augreg.ipynb) from [official impl](https://github.com/google-research/vision_transformer/) for navigating the augreg weights
* Replaced all default weights w/ best AugReg variant (if possible). All AugReg 21k classifiers work.
* Highlights: `vit_large_patch16_384` (87.1 top-1), `vit_large_r50_s32_384` (86.2 top-1), `vit_base_patch16_384` (86.0 top-1)
* `vit_deit_*` renamed to just `deit_*`
* Remove my old small model, replace with DeiT compatible small w/ AugReg weights
* Add 1st training of my `gmixer_24_224` MLP /w GLU, 78.1 top-1 w/ 25M params.
* Add weights from official ResMLP release (https://github.com/facebookresearch/deit)
* Add `eca_nfnet_l2` weights from my 'lightweight' series. 84.7 top-1 at 384x384.
* Add distilled BiT 50x1 student and 152x2 Teacher weights from [Knowledge distillation: A good teacher is patient and consistent](https://arxiv.org/abs/2106.05237)
* NFNets and ResNetV2-BiT models work w/ Pytorch XLA now
* weight standardization uses F.batch_norm instead of std_mean (std_mean wasn't lowered)
* eps values adjusted, will be slight differences but should be quite close
* Improve test coverage and classifier interface of non-conv (vision transformer and mlp) models
* Cleanup a few classifier / flatten details for models w/ conv classifiers or early global pool
* Please report any regressions, this PR touched quite a few models.
### June 8, 2021
* Add first ResMLP weights, trained in PyTorch XLA on TPU-VM w/ my XLA branch. 24 block variant, 79.2 top-1.
* Add ResNet51-Q model w/ pretrained weights at 82.36 top-1.
* NFNet inspired block layout with quad layer stem and no maxpool
* Same param count (35.7M) and throughput as ResNetRS-50 but +1.5 top-1 @ 224x224 and +2.5 top-1 at 288x288
### May 25, 2021
* Add LeViT, Visformer, Convit (PR by Aman Arora), Twins (PR by paper authors) transformer models
* Cleanup input_size/img_size override handling and testing for all vision transformer models
* Add `efficientnetv2_rw_m` model and weights (started training before official code). 84.8 top-1, 53M params.
### May 14, 2021
* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl.
* 1k trained variants: `tf_efficientnetv2_s/m/l`
* 21k trained variants: `tf_efficientnetv2_s/m/l_in21k`
* 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k`
* v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3`
* Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s`
* Some blank `efficientnetv2_*` models in-place for future native PyTorch training
### May 5, 2021
* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen)
* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit)
* Add ResNet-RS models and weights from [TF](https://github.com/tensorflow/tpu/tree/master/models/official/resnet/resnet_rs). Thanks [Aman Arora](https://github.com/amaarora)
* Add CoaT models and weights. Thanks [Mohammed Rizin](https://github.com/morizin)
* Add new ImageNet-21k weights & finetuned weights for TResNet, MobileNet-V3, ViT models. Thanks [mrT](https://github.com/mrT23)
* Add GhostNet models and weights. Thanks [Kai Han](https://github.com/iamhankai)
* Update ByoaNet attention modles
* Improve SA module inits
* Hack together experimental stand-alone Swin based attn module and `swinnet`
* Consistent '26t' model defs for experiments.
* Add improved Efficientnet-V2S (prelim model def) weights. 83.8 top-1.
* WandB logging support
### April 13, 2021
* Add Swin Transformer models and weights from https://github.com/microsoft/Swin-Transformer
### April 12, 2021
* Add ECA-NFNet-L1 (slimmed down F1 w/ SiLU, 41M params) trained with this code. 84% top-1 @ 320x320. Trained at 256x256.
* Add EfficientNet-V2S model (unverified model definition) weights. 83.3 top-1 @ 288x288. Only trained single res 224. Working on progressive training.
* Add ByoaNet model definition (Bring-your-own-attention) w/ SelfAttention block and corresponding SA/SA-like modules and model defs
* Lambda Networks - https://arxiv.org/abs/2102.08602
* Bottleneck Transformers - https://arxiv.org/abs/2101.11605
* Halo Nets - https://arxiv.org/abs/2103.12731
* Adabelief optimizer contributed by Juntang Zhuang
### April 1, 2021
* Add snazzy `benchmark.py` script for bulk `timm` model benchmarking of train and/or inference
* Add Pooling-based Vision Transformer (PiT) models (from https://github.com/naver-ai/pit)
* Merged distilled variant into main for torchscript compatibility
* Some `timm` cleanup/style tweaks and weights have hub download support
* Cleanup Vision Transformer (ViT) models
* Merge distilled (DeiT) model into main so that torchscript can work
* Support updated weight init (defaults to old still) that closer matches original JAX impl (possibly better training from scratch)
* Separate hybrid model defs into different file and add several new model defs to fiddle with, support patch_size != 1 for hybrids
* Fix fine-tuning num_class changes (PiT and ViT) and pos_embed resizing (Vit) with distilled variants
* nn.Sequential for block stack (does not break downstream compat)
* TnT (Transformer-in-Transformer) models contributed by author (from https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT)
* Add RegNetY-160 weights from DeiT teacher model
* Add new NFNet-L0 w/ SE attn (rename `nfnet_l0b`->`nfnet_l0`) weights 82.75 top-1 @ 288x288
* Some fixes/improvements for TFDS dataset wrapper
### March 7, 2021
* First 0.4.x PyPi release w/ NFNets (& related), ByoB (GPU-Efficient, RepVGG, etc).
* Change feature extraction for pre-activation nets (NFNets, ResNetV2) to return features before activation.
### Feb 18, 2021
* Add pretrained weights and model variants for NFNet-F* models from [DeepMind Haiku impl](https://github.com/deepmind/deepmind-research/tree/master/nfnets).
* Models are prefixed with `dm_`. They require SAME padding conv, skipinit enabled, and activation gains applied in act fn.
* These models are big, expect to run out of GPU memory. With the GELU activiation + other options, they are roughly 1/2 the inference speed of my SiLU PyTorch optimized `s` variants.
* Original model results are based on pre-processing that is not the same as all other models so you'll see different results in the results csv (once updated).
* Matching the original pre-processing as closely as possible I get these results:
* `dm_nfnet_f6` - 86.352
* `dm_nfnet_f5` - 86.100
* `dm_nfnet_f4` - 85.834
* `dm_nfnet_f3` - 85.676
* `dm_nfnet_f2` - 85.178
* `dm_nfnet_f1` - 84.696
* `dm_nfnet_f0` - 83.464
### Feb 16, 2021
* Add Adaptive Gradient Clipping (AGC) as per https://arxiv.org/abs/2102.06171. Integrated w/ PyTorch gradient clipping via mode arg that defaults to prev 'norm' mode. For backward arg compat, clip-grad arg must be specified to enable when using train.py.
* AGC w/ default clipping factor `--clip-grad .01 --clip-mode agc`
* PyTorch global norm of 1.0 (old behaviour, always norm), `--clip-grad 1.0`
* PyTorch value clipping of 10, `--clip-grad 10. --clip-mode value`
* AGC performance is definitely sensitive to the clipping factor. More experimentation needed to determine good values for smaller batch sizes and optimizers besides those in paper. So far I've found .001-.005 is necessary for stable RMSProp training w/ NFNet/NF-ResNet.
### Feb 12, 2021
* Update Normalization-Free nets to include new NFNet-F (https://arxiv.org/abs/2102.06171) model defs
### Feb 10, 2021
* More model archs, incl a flexible ByobNet backbone ('Bring-your-own-blocks')
* GPU-Efficient-Networks (https://github.com/idstcv/GPU-Efficient-Networks), impl in `byobnet.py`
* RepVGG (https://github.com/DingXiaoH/RepVGG), impl in `byobnet.py`
* classic VGG (from torchvision, impl in `vgg`)
* Refinements to normalizer layer arg handling and normalizer+act layer handling in some models
* Default AMP mode changed to native PyTorch AMP instead of APEX. Issues not being fixed with APEX. Native works with `--channels-last` and `--torchscript` model training, APEX does not.
* Fix a few bugs introduced since last pypi release
### Feb 8, 2021
* Add several ResNet weights with ECA attention. 26t & 50t trained @ 256, test @ 320. 269d train @ 256, fine-tune @320, test @ 352.
* `ecaresnet26t` - 79.88 top-1 @ 320x320, 79.08 @ 256x256
* `ecaresnet50t` - 82.35 top-1 @ 320x320, 81.52 @ 256x256
* `ecaresnet269d` - 84.93 top-1 @ 352x352, 84.87 @ 320x320
* Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed).
* Support model default_cfgs with separate train vs test resolution `test_input_size` and remove extra `_320` suffix ResNet model defs that were just for test.
### Jan 30, 2021
* Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692)
### Jan 25, 2021
* Add ResNetV2 Big Transfer (BiT) models w/ ImageNet-1k and 21k weights from https://github.com/google-research/big_transfer
* Add official R50+ViT-B/16 hybrid models + weights from https://github.com/google-research/vision_transformer
* ImageNet-21k ViT weights are added w/ model defs and representation layer (pre logits) support
* NOTE: ImageNet-21k classifier heads were zero'd in original weights, they are only useful for transfer learning
* Add model defs and weights for DeiT Vision Transformer models from https://github.com/facebookresearch/deit
* Refactor dataset classes into ImageDataset/IterableImageDataset + dataset specific parser classes
* Add Tensorflow-Datasets (TFDS) wrapper to allow use of TFDS image classification sets with train script
* Ex: `train.py /data/tfds --dataset tfds/oxford_iiit_pet --val-split test --model resnet50 -b 256 --amp --num-classes 37 --opt adamw --lr 3e-4 --weight-decay .001 --pretrained -j 2`
* Add improved .tar dataset parser that reads images from .tar, folder of .tar files, or .tar within .tar
* Run validation on full ImageNet-21k directly from tar w/ BiT model: `validate.py /data/fall11_whole.tar --model resnetv2_50x1_bitm_in21k --amp`
* Models in this update should be stable w/ possible exception of ViT/BiT, possibility of some regressions with train/val scripts and dataset handling
### Jan 3, 2021
* Add SE-ResNet-152D weights
* 256x256 val, 0.94 crop top-1 - 83.75
* 320x320 val, 1.0 crop - 84.36
* Update results files
### Dec 18, 2020
* Add ResNet-101D, ResNet-152D, and ResNet-200D weights trained @ 256x256
* 256x256 val, 0.94 crop (top-1) - 101D (82.33), 152D (83.08), 200D (83.25)
* 288x288 val, 1.0 crop - 101D (82.64), 152D (83.48), 200D (83.76)
* 320x320 val, 1.0 crop - 101D (83.00), 152D (83.66), 200D (84.01)
### Dec 7, 2020
* Simplify EMA module (ModelEmaV2), compatible with fully torchscripted models
* Misc fixes for SiLU ONNX export, default_cfg missing from Feature extraction models, Linear layer w/ AMP + torchscript
* PyPi release @ 0.3.2 (needed by EfficientDet)
### Oct 30, 2020
* Test with PyTorch 1.7 and fix a small top-n metric view vs reshape issue.
* Convert newly added 224x224 Vision Transformer weights from official JAX repo. 81.8 top-1 for B/16, 83.1 L/16.
* Support PyTorch 1.7 optimized, native SiLU (aka Swish) activation. Add mapping to 'silu' name, custom swish will eventually be deprecated.
* Fix regression for loading pretrained classifier via direct model entrypoint functions. Didn't impact create_model() factory usage.
* PyPi release @ 0.3.0 version!
### Oct 26, 2020
* Update Vision Transformer models to be compatible with official code release at https://github.com/google-research/vision_transformer
* Add Vision Transformer weights (ImageNet-21k pretrain) for 384x384 base and large models converted from official jax impl
* ViT-B/16 - 84.2
* ViT-B/32 - 81.7
* ViT-L/16 - 85.2
* ViT-L/32 - 81.5
### Oct 21, 2020
* Weights added for Vision Transformer (ViT) models. 77.86 top-1 for 'small' and 79.35 for 'base'. Thanks to [Christof](https://www.kaggle.com/christofhenkel) for training the base model w/ lots of GPUs.
### Oct 13, 2020
* Initial impl of Vision Transformer models. Both patch and hybrid (CNN backbone) variants. Currently trying to train...
* Adafactor and AdaHessian (FP32 only, no AMP) optimizers
* EdgeTPU-M (`efficientnet_em`) model trained in PyTorch, 79.3 top-1
* Pip release, doc updates pending a few more changes...
### Sept 18, 2020
* New ResNet 'D' weights. 72.7 (top-1) ResNet-18-D, 77.1 ResNet-34-D, 80.5 ResNet-50-D
* Added a few untrained defs for other ResNet models (66D, 101D, 152D, 200/200D)
### Sept 3, 2020
* New weights
* Wide-ResNet50 - 81.5 top-1 (vs 78.5 torchvision)
* SEResNeXt50-32x4d - 81.3 top-1 (vs 79.1 cadene)
* Support for native Torch AMP and channels_last memory format added to train/validate scripts (`--channels-last`, `--native-amp` vs `--apex-amp`)
* Models tested with channels_last on latest NGC 20.08 container. AdaptiveAvgPool in attn layers changed to mean((2,3)) to work around bug with NHWC kernel.
### Aug 12, 2020
* New/updated weights from training experiments
* EfficientNet-B3 - 82.1 top-1 (vs 81.6 for official with AA and 81.9 for AdvProp)
* RegNetY-3.2GF - 82.0 top-1 (78.9 from official ver)
* CSPResNet50 - 79.6 top-1 (76.6 from official ver)
* Add CutMix integrated w/ Mixup. See [pull request](https://github.com/rwightman/pytorch-image-models/pull/218) for some usage examples
* Some fixes for using pretrained weights with `in_chans` != 3 on several models.
### Aug 5, 2020
Universal feature extraction, new models, new weights, new test sets.
* All models support the `features_only=True` argument for `create_model` call to return a network that extracts feature maps from the deepest layer at each stride.
* New models
* CSPResNet, CSPResNeXt, CSPDarkNet, DarkNet
* ReXNet
* (Modified Aligned) Xception41/65/71 (a proper port of TF models)
* New trained weights
* SEResNet50 - 80.3 top-1
* CSPDarkNet53 - 80.1 top-1
* CSPResNeXt50 - 80.0 top-1
* DPN68b - 79.2 top-1
* EfficientNet-Lite0 (non-TF ver) - 75.5 (submitted by [@hal-314](https://github.com/hal-314))
* Add 'real' labels for ImageNet and ImageNet-Renditions test set, see [`results/README.md`](results/README.md)
* Test set ranking/top-n diff script by [@KushajveerSingh](https://github.com/KushajveerSingh)
* Train script and loader/transform tweaks to punch through more aug arguments
* README and documentation overhaul. See initial (WIP) documentation at https://rwightman.github.io/pytorch-image-models/
* adamp and sgdp optimizers added by [@hellbell](https://github.com/hellbell)
### June 11, 2020
Bunch of changes:
* DenseNet models updated with memory efficient addition from torchvision (fixed a bug), blur pooling and deep stem additions
* VoVNet V1 and V2 models added, 39 V2 variant (ese_vovnet_39b) trained to 79.3 top-1
* Activation factory added along with new activations:
* select act at model creation time for more flexibility in using activations compatible with scripting or tracing (ONNX export)
* hard_mish (experimental) added with memory-efficient grad, along with ME hard_swish
* context mgr for setting exportable/scriptable/no_jit states
* Norm + Activation combo layers added with initial trial support in DenseNet and VoVNet along with impl of EvoNorm and InplaceAbn wrapper that fit the interface
* Torchscript works for all but two of the model types as long as using Pytorch 1.5+, tests added for this
* Some import cleanup and classifier reset changes, all models will have classifier reset to nn.Identity on reset_classifer(0) call
* Prep for 0.1.28 pip release
### May 12, 2020
* Add ResNeSt models (code adapted from https://github.com/zhanghang1989/ResNeSt, paper https://arxiv.org/abs/2004.08955))
### May 3, 2020
* Pruned EfficientNet B1, B2, and B3 (https://arxiv.org/abs/2002.08258) contributed by [Yonathan Aflalo](https://github.com/yoniaflalo)
### May 1, 2020
* Merged a number of execellent contributions in the ResNet model family over the past month
* BlurPool2D and resnetblur models initiated by [Chris Ha](https://github.com/VRandme), I trained resnetblur50 to 79.3.
* TResNet models and SpaceToDepth, AntiAliasDownsampleLayer layers by [mrT23](https://github.com/mrT23)
* ecaresnet (50d, 101d, light) models and two pruned variants using pruning as per (https://arxiv.org/abs/2002.08258) by [Yonathan Aflalo](https://github.com/yoniaflalo)
* 200 pretrained models in total now with updated results csv in results folder
### April 5, 2020
* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite
* 3.5M param MobileNet-V2 100 @ 73%
* 4.5M param MobileNet-V2 110d @ 75%
* 6.1M param MobileNet-V2 140 @ 76.5%
* 5.8M param MobileNet-V2 120d @ 77.3%
### March 18, 2020
* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
### April 5, 2020
* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite
* 3.5M param MobileNet-V2 100 @ 73%
* 4.5M param MobileNet-V2 110d @ 75%
* 6.1M param MobileNet-V2 140 @ 76.5%
* 5.8M param MobileNet-V2 120d @ 77.3%
### March 18, 2020
* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
### Feb 29, 2020
* New MobileNet-V3 Large weights trained from stratch with this code to 75.77% top-1
* IMPORTANT CHANGE - default weight init changed for all MobilenetV3 / EfficientNet / related models
* overall results similar to a bit better training from scratch on a few smaller models tried
* performance early in training seems consistently improved but less difference by end
* set `fix_group_fanout=False` in `_init_weight_goog` fn if you need to reproducte past behaviour
* Experimental LR noise feature added applies a random perturbation to LR each epoch in specified range of training
### Feb 18, 2020
* Big refactor of model layers and addition of several attention mechanisms. Several additions motivated by 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268):
* Move layer/module impl into `layers` subfolder/module of `models` and organize in a more granular fashion
* ResNet downsample paths now properly support dilation (output stride != 32) for avg_pool ('D' variant) and 3x3 (SENets) networks
* Add Selective Kernel Nets on top of ResNet base, pretrained weights
* skresnet18 - 73% top-1
* skresnet34 - 76.9% top-1
* skresnext50_32x4d (equiv to SKNet50) - 80.2% top-1
* ECA and CECA (circular padding) attention layer contributed by [Chris Ha](https://github.com/VRandme)
* CBAM attention experiment (not the best results so far, may remove)
* Attention factory to allow dynamically selecting one of SE, ECA, CBAM in the `.se` position for all ResNets
* Add DropBlock and DropPath (formerly DropConnect for EfficientNet/MobileNetv3) support to all ResNet variants
* Full dataset results updated that incl NoisyStudent weights and 2 of the 3 SK weights
### Feb 12, 2020
* Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet)
### Feb 6, 2020
* Add RandAugment trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams)
### Feb 1/2, 2020
* Port new EfficientNet-B8 (RandAugment) weights, these are different than the B8 AdvProp, different input normalization.
* Update results csv files on all models for ImageNet validation and three other test sets
* Push PyPi package update
### Jan 31, 2020
* Update ResNet50 weights with a new 79.038 result from further JSD / AugMix experiments. Full command line for reproduction in training section below.
### Jan 11/12, 2020
* Master may be a bit unstable wrt to training, these changes have been tested but not all combos
* Implementations of AugMix added to existing RA and AA. Including numerous supporting pieces like JSD loss (Jensen-Shannon divergence + CE), and AugMixDataset
* SplitBatchNorm adaptation layer added for implementing Auxiliary BN as per AdvProp paper
* ResNet-50 AugMix trained model w/ 79% top-1 added
* `seresnext26tn_32x4d` - 77.99 top-1, 93.75 top-5 added to tiered experiment, higher img/s than 't' and 'd'
### Jan 3, 2020
* Add RandAugment trained EfficientNet-B0 weight with 77.7 top-1. Trained by [Michael Klachko](https://github.com/michaelklachko) with this code and recent hparams (see Training section)
* Add `avg_checkpoints.py` script for post training weight averaging and update all scripts with header docstrings and shebangs.
### Dec 30, 2019
* Merge [Dushyant Mehta's](https://github.com/mehtadushy) PR for SelecSLS (Selective Short and Long Range Skip Connections) networks. Good GPU memory consumption and throughput. Original: https://github.com/mehtadushy/SelecSLS-Pytorch
### Dec 28, 2019
* Add new model weights and training hparams (see Training Hparams section)
* `efficientnet_b3` - 81.5 top-1, 95.7 top-5 at default res/crop, 81.9, 95.8 at 320x320 1.0 crop-pct
* trained with RandAugment, ended up with an interesting but less than perfect result (see training section)
* `seresnext26d_32x4d`- 77.6 top-1, 93.6 top-5
* deep stem (32, 32, 64), avgpool downsample
* stem/dowsample from bag-of-tricks paper
* `seresnext26t_32x4d`- 78.0 top-1, 93.7 top-5
* deep tiered stem (24, 48, 64), avgpool downsample (a modified 'D' variant)
* stem sizing mods from Jeremy Howard and fastai devs discussing ResNet architecture experiments
### Dec 23, 2019
* Add RandAugment trained MixNet-XL weights with 80.48 top-1.
* `--dist-bn` argument added to train.py, will distribute BN stats between nodes after each train epoch, before eval
### Dec 4, 2019
* Added weights from the first training from scratch of an EfficientNet (B2) with my new RandAugment implementation. Much better than my previous B2 and very close to the official AdvProp ones (80.4 top-1, 95.08 top-5).
### Nov 29, 2019
* Brought EfficientNet and MobileNetV3 up to date with my https://github.com/rwightman/gen-efficientnet-pytorch code. Torchscript and ONNX export compat excluded.
* AdvProp weights added
* Official TF MobileNetv3 weights added
* EfficientNet and MobileNetV3 hook based 'feature extraction' classes added. Will serve as basis for using models as backbones in obj detection/segmentation tasks. Lots more to be done here...
* HRNet classification models and weights added from https://github.com/HRNet/HRNet-Image-Classification
* Consistency in global pooling, `reset_classifer`, and `forward_features` across models
* `forward_features` always returns unpooled feature maps now
* Reasonable chance I broke something... let me know
### Nov 22, 2019
* Add ImageNet training RandAugment implementation alongside AutoAugment. PyTorch Transform compatible format, using PIL. Currently training two EfficientNet models from scratch with promising results... will update.
* `drop-connect` cmd line arg finally added to `train.py`, no need to hack model fns. Works for efficientnet/mobilenetv3 based models, ignored otherwise. | pytorch-image-models/docs/archived_changes.md/0 | {
"file_path": "pytorch-image-models/docs/archived_changes.md",
"repo_id": "pytorch-image-models",
"token_count": 9335
} | 164 |
# Deep Layer Aggregation
Extending โshallowโ skip connections, **Dense Layer Aggregation (DLA)** incorporates more depth and sharing. The authors introduce two structures for deep layer aggregation (DLA): iterative deep aggregation (IDA) and hierarchical deep aggregation (HDA). These structures are expressed through an architectural framework, independent of the choice of backbone, for compatibility with current and future networks.
IDA focuses on fusing resolutions and scales while HDA focuses on merging features from all modules and channels. IDA follows the base hierarchy to refine resolution and aggregate scale stage-bystage. HDA assembles its own hierarchy of tree-structured connections that cross and merge stages to aggregate different levels of representation.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{yu2019deep,
title={Deep Layer Aggregation},
author={Fisher Yu and Dequan Wang and Evan Shelhamer and Trevor Darrell},
year={2019},
eprint={1707.06484},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: DLA
Paper:
Title: Deep Layer Aggregation
URL: https://paperswithcode.com/paper/deep-layer-aggregation
Models:
- Name: dla102
In Collection: DLA
Metadata:
FLOPs: 7192952808
Parameters: 33270000
File Size: 135290579
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L410
Weights: http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.03%
Top 5 Accuracy: 93.95%
- Name: dla102x
In Collection: DLA
Metadata:
FLOPs: 5886821352
Parameters: 26310000
File Size: 107552695
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102x
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L418
Weights: http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.51%
Top 5 Accuracy: 94.23%
- Name: dla102x2
In Collection: DLA
Metadata:
FLOPs: 9343847400
Parameters: 41280000
File Size: 167645295
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102x2
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L426
Weights: http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.44%
Top 5 Accuracy: 94.65%
- Name: dla169
In Collection: DLA
Metadata:
FLOPs: 11598004200
Parameters: 53390000
File Size: 216547113
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla169
LR: 0.1
Epochs: 120
Layers: 169
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L434
Weights: http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.69%
Top 5 Accuracy: 94.33%
- Name: dla34
In Collection: DLA
Metadata:
FLOPs: 3070105576
Parameters: 15740000
File Size: 63228658
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla34
LR: 0.1
Epochs: 120
Layers: 32
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L362
Weights: http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.62%
Top 5 Accuracy: 92.06%
- Name: dla46_c
In Collection: DLA
Metadata:
FLOPs: 583277288
Parameters: 1300000
File Size: 5307963
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla46_c
LR: 0.1
Epochs: 120
Layers: 46
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L369
Weights: http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 64.87%
Top 5 Accuracy: 86.29%
- Name: dla46x_c
In Collection: DLA
Metadata:
FLOPs: 544052200
Parameters: 1070000
File Size: 4387641
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla46x_c
LR: 0.1
Epochs: 120
Layers: 46
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L378
Weights: http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 65.98%
Top 5 Accuracy: 86.99%
- Name: dla60
In Collection: DLA
Metadata:
FLOPs: 4256251880
Parameters: 22040000
File Size: 89560235
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60
LR: 0.1
Epochs: 120
Layers: 60
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L394
Weights: http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.04%
Top 5 Accuracy: 93.32%
- Name: dla60_res2net
In Collection: DLA
Metadata:
FLOPs: 4147578504
Parameters: 20850000
File Size: 84886593
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60_res2net
Layers: 60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L346
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.46%
Top 5 Accuracy: 94.21%
- Name: dla60_res2next
In Collection: DLA
Metadata:
FLOPs: 3485335272
Parameters: 17030000
File Size: 69639245
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60_res2next
Layers: 60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L354
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.44%
Top 5 Accuracy: 94.16%
- Name: dla60x
In Collection: DLA
Metadata:
FLOPs: 3544204264
Parameters: 17350000
File Size: 70883139
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60x
LR: 0.1
Epochs: 120
Layers: 60
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L402
Weights: http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.25%
Top 5 Accuracy: 94.02%
- Name: dla60x_c
In Collection: DLA
Metadata:
FLOPs: 593325032
Parameters: 1320000
File Size: 5454396
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60x_c
LR: 0.1
Epochs: 120
Layers: 60
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L386
Weights: http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 67.91%
Top 5 Accuracy: 88.42%
-->
| pytorch-image-models/docs/models/.templates/models/dla.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/dla.md",
"repo_id": "pytorch-image-models",
"token_count": 5955
} | 165 |
# Inception ResNet v2
**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{szegedy2016inceptionv4,
title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
year={2016},
eprint={1602.07261},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Inception ResNet v2
Paper:
Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning
URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
Models:
- Name: inception_resnet_v2
In Collection: Inception ResNet v2
Metadata:
FLOPs: 16959133120
Parameters: 55850000
File Size: 223774238
Architecture:
- Average Pooling
- Dropout
- Inception-ResNet-v2 Reduction-B
- Inception-ResNet-v2-A
- Inception-ResNet-v2-B
- Inception-ResNet-v2-C
- Reduction-A
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 20x NVIDIA Kepler GPUs
ID: inception_resnet_v2
LR: 0.045
Dropout: 0.2
Crop Pct: '0.897'
Momentum: 0.9
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L343
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 0.95%
Top 5 Accuracy: 17.29%
-->
| pytorch-image-models/docs/models/.templates/models/inception-resnet-v2.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/inception-resnet-v2.md",
"repo_id": "pytorch-image-models",
"token_count": 864
} | 166 |
# Res2NeXt
**Res2NeXt** is an image model that employs a variation on [ResNeXt](https://paperswithcode.com/method/resnext) bottleneck residual blocks. The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{Gao_2021,
title={Res2Net: A New Multi-Scale Backbone Architecture},
volume={43},
ISSN={1939-3539},
url={http://dx.doi.org/10.1109/TPAMI.2019.2938758},
DOI={10.1109/tpami.2019.2938758},
number={2},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
publisher={Institute of Electrical and Electronics Engineers (IEEE)},
author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
year={2021},
month={Feb},
pages={652โ662}
}
```
<!--
Type: model-index
Collections:
- Name: Res2NeXt
Paper:
Title: 'Res2Net: A New Multi-scale Backbone Architecture'
URL: https://paperswithcode.com/paper/res2net-a-new-multi-scale-backbone
Models:
- Name: res2next50
In Collection: Res2NeXt
Metadata:
FLOPs: 5396798208
Parameters: 24670000
File Size: 99019592
Architecture:
- Batch Normalization
- Convolution
- Global Average Pooling
- ReLU
- Res2NeXt Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x Titan Xp GPUs
ID: res2next50
LR: 0.1
Epochs: 100
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L207
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.24%
Top 5 Accuracy: 93.91%
-->
| pytorch-image-models/docs/models/.templates/models/res2next.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/res2next.md",
"repo_id": "pytorch-image-models",
"token_count": 905
} | 167 |
# (Tensorflow) EfficientNet CondConv
**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way.
The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to squeeze-and-excitation blocks.
This collection of models amends EfficientNet by adding [CondConv](https://paperswithcode.com/method/condconv) convolutions.
The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1904-04971,
author = {Brandon Yang and
Gabriel Bender and
Quoc V. Le and
Jiquan Ngiam},
title = {Soft Conditional Computation},
journal = {CoRR},
volume = {abs/1904.04971},
year = {2019},
url = {http://arxiv.org/abs/1904.04971},
archivePrefix = {arXiv},
eprint = {1904.04971},
timestamp = {Thu, 25 Apr 2019 13:55:01 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1904-04971.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: TF EfficientNet CondConv
Paper:
Title: 'CondConv: Conditionally Parameterized Convolutions for Efficient Inference'
URL: https://paperswithcode.com/paper/soft-conditional-computation
Models:
- Name: tf_efficientnet_cc_b0_4e
In Collection: TF EfficientNet CondConv
Metadata:
FLOPs: 224153788
Parameters: 13310000
File Size: 53490940
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- CondConv
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_cc_b0_4e
LR: 0.256
Epochs: 350
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 2048
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1561
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.32%
Top 5 Accuracy: 93.32%
- Name: tf_efficientnet_cc_b0_8e
In Collection: TF EfficientNet CondConv
Metadata:
FLOPs: 224158524
Parameters: 24010000
File Size: 96287616
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- CondConv
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_cc_b0_8e
LR: 0.256
Epochs: 350
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 2048
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1572
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.91%
Top 5 Accuracy: 93.65%
- Name: tf_efficientnet_cc_b1_8e
In Collection: TF EfficientNet CondConv
Metadata:
FLOPs: 370427824
Parameters: 39720000
File Size: 159206198
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- CondConv
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_cc_b1_8e
LR: 0.256
Epochs: 350
Crop Pct: '0.882'
Momentum: 0.9
Batch Size: 2048
Image Size: '240'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1584
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.33%
Top 5 Accuracy: 94.37%
-->
| pytorch-image-models/docs/models/.templates/models/tf-efficientnet-condconv.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/tf-efficientnet-condconv.md",
"repo_id": "pytorch-image-models",
"token_count": 2457
} | 168 |
""" ONNX-runtime validation script
This script was created to verify accuracy and performance of exported ONNX
models running with the onnxruntime. It utilizes the PyTorch dataloader/processing
pipeline for a fair comparison against the originals.
Copyright 2020 Ross Wightman
"""
import argparse
import numpy as np
import onnxruntime
from timm.data import create_loader, resolve_data_config, create_dataset
from timm.utils import AverageMeter
import time
parser = argparse.ArgumentParser(description='ONNX Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--onnx-input', default='', type=str, metavar='PATH',
help='path to onnx model/weights file')
parser.add_argument('--onnx-output-opt', default='', type=str, metavar='PATH',
help='path to output optimized onnx graph')
parser.add_argument('--profile', action='store_true', default=False,
help='Enable profiler output.')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT',
help='Override default crop pct of 0.875')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
def main():
args = parser.parse_args()
args.gpu_id = 0
# Set graph optimization level
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
if args.profile:
sess_options.enable_profiling = True
if args.onnx_output_opt:
sess_options.optimized_model_filepath = args.onnx_output_opt
session = onnxruntime.InferenceSession(args.onnx_input, sess_options)
data_config = resolve_data_config(vars(args))
loader = create_loader(
create_dataset('', args.data),
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=False,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=data_config['crop_pct']
)
input_name = session.get_inputs()[0].name
batch_time = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(loader):
# run the net and return prediction
output = session.run([], {input_name: input.data.numpy()})
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy_np(output, target.numpy())
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print(
f'Test: [{i}/{len(loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {input.size(0) / batch_time.avg:.3f}/s, '
f'{100 * batch_time.avg / input.size(0):.3f} ms/sample) \t'
f'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
f'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
)
print(f' * Prec@1 {top1.avg:.3f} ({100-top1.avg:.3f}) Prec@5 {top5.avg:.3f} ({100.-top5.avg:.3f})')
def accuracy_np(output, target):
max_indices = np.argsort(output, axis=1)[:, ::-1]
top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean()
top1 = 100 * np.equal(max_indices[:, 0], target).mean()
return top1, top5
if __name__ == '__main__':
main()
| pytorch-image-models/onnx_validate.py/0 | {
"file_path": "pytorch-image-models/onnx_validate.py",
"repo_id": "pytorch-image-models",
"token_count": 1960
} | 169 |
"""Run tests for all models
Tests that run on CI should have a specific marker, e.g. @pytest.mark.base. This
marker is used to parallelize the CI runs, with one runner for each marker.
If new tests are added, ensure that they use one of the existing markers
(documented in pyproject.toml > pytest > markers) or that a new marker is added
for this set of tests. If using a new marker, adjust the test matrix in
.github/workflows/tests.yml to run tests with this new marker, otherwise the
tests will be skipped on CI.
"""
import pytest
import torch
import platform
import os
import fnmatch
_IS_MAC = platform.system() == 'Darwin'
try:
from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names, NodePathTracer
has_fx_feature_extraction = True
except ImportError:
has_fx_feature_extraction = False
import timm
from timm import list_models, create_model, set_scriptable, get_pretrained_cfg_value
from timm.layers import Format, get_spatial_dim, get_channel_dim
from timm.models import get_notrace_modules, get_notrace_functions
import importlib
import os
torch_backend = os.environ.get('TORCH_BACKEND')
if torch_backend is not None:
importlib.import_module(torch_backend)
torch_device = os.environ.get('TORCH_DEVICE', 'cpu')
timeout = os.environ.get('TIMEOUT')
timeout120 = int(timeout) if timeout else 120
timeout300 = int(timeout) if timeout else 300
if hasattr(torch._C, '_jit_set_profiling_executor'):
# legacy executor is too slow to compile large models for unit tests
# no need for the fusion performance here
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(False)
# transformer models don't support many of the spatial / feature based model functionalities
NON_STD_FILTERS = [
'vit_*', 'tnt_*', 'pit_*', 'coat_*', 'cait_*', '*mixer_*', 'gmlp_*', 'resmlp_*', 'twins_*',
'convit_*', 'levit*', 'visformer*', 'deit*', 'jx_nest_*', 'nest_*', 'xcit_*', 'crossvit_*', 'beit*',
'poolformer_*', 'volo_*', 'sequencer2d_*', 'pvt_v2*', 'mvitv2*', 'gcvit*', 'efficientformer*',
'eva_*', 'flexivit*', 'eva02*', 'samvit_*', 'efficientvit_m*', 'tiny_vit_*'
]
NUM_NON_STD = len(NON_STD_FILTERS)
# exclude models that cause specific test failures
if 'GITHUB_ACTIONS' in os.environ:
# GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models
EXCLUDE_FILTERS = [
'*efficientnet_l2*', '*resnext101_32x48d', '*in21k', '*152x4_bitm', '*101x3_bitm', '*50x3_bitm',
'*nfnet_f3*', '*nfnet_f4*', '*nfnet_f5*', '*nfnet_f6*', '*nfnet_f7*', '*efficientnetv2_xl*',
'*resnetrs350*', '*resnetrs420*', 'xcit_large_24_p8*', '*huge*', '*giant*', '*gigantic*',
'*enormous*', 'maxvit_xlarge*', 'regnet*1280', 'regnet*2560']
NON_STD_EXCLUDE_FILTERS = ['*huge*', '*giant*', '*gigantic*', '*enormous*']
else:
EXCLUDE_FILTERS = ['*enormous*']
NON_STD_EXCLUDE_FILTERS = ['*gigantic*', '*enormous*']
EXCLUDE_JIT_FILTERS = []
TARGET_FWD_SIZE = MAX_FWD_SIZE = 384
TARGET_BWD_SIZE = 128
MAX_BWD_SIZE = 320
MAX_FWD_OUT_SIZE = 448
TARGET_JIT_SIZE = 128
MAX_JIT_SIZE = 320
TARGET_FFEAT_SIZE = 96
MAX_FFEAT_SIZE = 256
TARGET_FWD_FX_SIZE = 128
MAX_FWD_FX_SIZE = 256
TARGET_BWD_FX_SIZE = 128
MAX_BWD_FX_SIZE = 224
def _get_input_size(model=None, model_name='', target=None):
if model is None:
assert model_name, "One of model or model_name must be provided"
input_size = get_pretrained_cfg_value(model_name, 'input_size')
fixed_input_size = get_pretrained_cfg_value(model_name, 'fixed_input_size')
min_input_size = get_pretrained_cfg_value(model_name, 'min_input_size')
else:
default_cfg = model.default_cfg
input_size = default_cfg['input_size']
fixed_input_size = default_cfg.get('fixed_input_size', None)
min_input_size = default_cfg.get('min_input_size', None)
assert input_size is not None
if fixed_input_size:
return input_size
if min_input_size:
if target and max(input_size) > target:
input_size = min_input_size
else:
if target and max(input_size) > target:
input_size = tuple([min(x, target) for x in input_size])
return input_size
@pytest.mark.base
@pytest.mark.timeout(timeout120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
input_size = _get_input_size(model=model, target=TARGET_FWD_SIZE)
if max(input_size) > MAX_FWD_SIZE:
pytest.skip("Fixed input size model > limit.")
inputs = torch.randn((batch_size, *input_size))
inputs = inputs.to(torch_device)
model.to(torch_device)
outputs = model(inputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
@pytest.mark.base
@pytest.mark.timeout(timeout120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [2])
def test_model_backward(model_name, batch_size):
"""Run a single forward pass with each model"""
input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_SIZE)
if max(input_size) > MAX_BWD_SIZE:
pytest.skip("Fixed input size model > limit.")
model = create_model(model_name, pretrained=False, num_classes=42)
num_params = sum([x.numel() for x in model.parameters()])
model.train()
inputs = torch.randn((batch_size, *input_size))
inputs = inputs.to(torch_device)
model.to(torch_device)
outputs = model(inputs)
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
outputs.mean().backward()
for n, x in model.named_parameters():
assert x.grad is not None, f'No gradient for {n}'
num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None])
assert outputs.shape[-1] == 42
assert num_params == num_grad, 'Some parameters are missing gradients'
assert not torch.isnan(outputs).any(), 'Output included NaNs'
@pytest.mark.cfg
@pytest.mark.timeout(timeout300)
@pytest.mark.parametrize('model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + NON_STD_FILTERS, include_tags=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_default_cfgs(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
model.to(torch_device)
state_dict = model.state_dict()
cfg = model.default_cfg
pool_size = cfg['pool_size']
input_size = model.default_cfg['input_size']
output_fmt = getattr(model, 'output_fmt', 'NCHW')
spatial_axis = get_spatial_dim(output_fmt)
assert len(spatial_axis) == 2 # TODO add 1D sequence support
feat_axis = get_channel_dim(output_fmt)
if all([x <= MAX_FWD_OUT_SIZE for x in input_size]) and \
not any([fnmatch.fnmatch(model_name, x) for x in EXCLUDE_FILTERS]):
# output sizes only checked if default res <= 448 * 448 to keep resource down
input_size = tuple([min(x, MAX_FWD_OUT_SIZE) for x in input_size])
input_tensor = torch.randn((batch_size, *input_size), device=torch_device)
# test forward_features (always unpooled)
outputs = model.forward_features(input_tensor)
assert outputs.shape[spatial_axis[0]] == pool_size[0], 'unpooled feature shape != config'
assert outputs.shape[spatial_axis[1]] == pool_size[1], 'unpooled feature shape != config'
if not isinstance(model, (timm.models.MobileNetV3, timm.models.GhostNet, timm.models.RepGhostNet, timm.models.VGG)):
assert outputs.shape[feat_axis] == model.num_features
# test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features
model.reset_classifier(0)
model.to(torch_device)
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 2
assert outputs.shape[1] == model.num_features
# test model forward without pooling and classifier
model.reset_classifier(0, '') # reset classifier and set global pooling to pass-through
model.to(torch_device)
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 4
if not isinstance(model, (timm.models.MobileNetV3, timm.models.GhostNet, timm.models.RepGhostNet, timm.models.VGG)):
# mobilenetv3/ghostnet/repghostnet/vgg forward_features vs removed pooling differ due to location or lack of GAP
assert outputs.shape[spatial_axis[0]] == pool_size[0] and outputs.shape[spatial_axis[1]] == pool_size[1]
if 'pruned' not in model_name: # FIXME better pruned model handling
# test classifier + global pool deletion via __init__
model = create_model(model_name, pretrained=False, num_classes=0, global_pool='').eval()
model.to(torch_device)
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 4
if not isinstance(model, (timm.models.MobileNetV3, timm.models.GhostNet, timm.models.RepGhostNet, timm.models.VGG)):
assert outputs.shape[spatial_axis[0]] == pool_size[0] and outputs.shape[spatial_axis[1]] == pool_size[1]
# check classifier name matches default_cfg
if cfg.get('num_classes', None):
classifier = cfg['classifier']
if not isinstance(classifier, (tuple, list)):
classifier = classifier,
for c in classifier:
assert c + ".weight" in state_dict.keys(), f'{c} not in model params'
# check first conv(s) names match default_cfg
first_conv = cfg['first_conv']
if isinstance(first_conv, str):
first_conv = (first_conv,)
assert isinstance(first_conv, (tuple, list))
for fc in first_conv:
assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params'
@pytest.mark.cfg
@pytest.mark.timeout(timeout300)
@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS, include_tags=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_default_cfgs_non_std(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
model.to(torch_device)
state_dict = model.state_dict()
cfg = model.default_cfg
input_size = _get_input_size(model=model)
if max(input_size) > 320: # FIXME const
pytest.skip("Fixed input size model > limit.")
input_tensor = torch.randn((batch_size, *input_size), device=torch_device)
feat_dim = getattr(model, 'feature_dim', None)
outputs = model.forward_features(input_tensor)
if isinstance(outputs, (tuple, list)):
# cannot currently verify multi-tensor output.
pass
else:
if feat_dim is None:
feat_dim = -1 if outputs.ndim == 3 else 1
assert outputs.shape[feat_dim] == model.num_features
# test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features
model.reset_classifier(0)
model.to(torch_device)
outputs = model.forward(input_tensor)
if isinstance(outputs, (tuple, list)):
outputs = outputs[0]
if feat_dim is None:
feat_dim = -1 if outputs.ndim == 3 else 1
assert outputs.shape[feat_dim] == model.num_features, 'pooled num_features != config'
model = create_model(model_name, pretrained=False, num_classes=0).eval()
model.to(torch_device)
outputs = model.forward(input_tensor)
if isinstance(outputs, (tuple, list)):
outputs = outputs[0]
if feat_dim is None:
feat_dim = -1 if outputs.ndim == 3 else 1
assert outputs.shape[feat_dim] == model.num_features
# check classifier name matches default_cfg
if cfg.get('num_classes', None):
classifier = cfg['classifier']
if not isinstance(classifier, (tuple, list)):
classifier = classifier,
for c in classifier:
assert c + ".weight" in state_dict.keys(), f'{c} not in model params'
# check first conv(s) names match default_cfg
first_conv = cfg['first_conv']
if isinstance(first_conv, str):
first_conv = (first_conv,)
assert isinstance(first_conv, (tuple, list))
for fc in first_conv:
assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params'
if 'GITHUB_ACTIONS' not in os.environ:
@pytest.mark.timeout(240)
@pytest.mark.parametrize('model_name', list_models(pretrained=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_load_pretrained(model_name, batch_size):
"""Create that pretrained weights load, verify support for in_chans != 3 while doing so."""
in_chans = 3 if 'pruned' in model_name else 1 # pruning not currently supported with in_chans change
create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=5)
create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=0)
@pytest.mark.timeout(240)
@pytest.mark.parametrize('model_name', list_models(pretrained=True, exclude_filters=NON_STD_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_features_pretrained(model_name, batch_size):
"""Create that pretrained weights load when features_only==True."""
create_model(model_name, pretrained=True, features_only=True)
@pytest.mark.torchscript
@pytest.mark.timeout(timeout120)
@pytest.mark.parametrize(
'model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_torchscript(model_name, batch_size):
"""Run a single forward pass with each model"""
input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE)
if max(input_size) > MAX_JIT_SIZE:
pytest.skip("Fixed input size model > limit.")
with set_scriptable(True):
model = create_model(model_name, pretrained=False)
model.eval()
model = torch.jit.script(model)
model.to(torch_device)
outputs = model(torch.randn((batch_size, *input_size)))
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
EXCLUDE_FEAT_FILTERS = [
'*pruned*', # hopefully fix at some point
] + NON_STD_FILTERS
if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system():
# GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models
EXCLUDE_FEAT_FILTERS += ['*resnext101_32x32d', '*resnext101_32x16d']
@pytest.mark.features
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FEAT_FILTERS, include_tags=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_features(model_name, batch_size):
"""Run a single forward pass with each model in feature extraction mode"""
model = create_model(model_name, pretrained=False, features_only=True)
model.eval()
expected_channels = model.feature_info.channels()
expected_reduction = model.feature_info.reduction()
assert len(expected_channels) >= 4 # all models here should have at least 4 feature levels by default, some 5 or 6
input_size = _get_input_size(model=model, target=TARGET_FFEAT_SIZE)
if max(input_size) > MAX_FFEAT_SIZE:
pytest.skip("Fixed input size model > limit.")
output_fmt = getattr(model, 'output_fmt', 'NCHW')
feat_axis = get_channel_dim(output_fmt)
spatial_axis = get_spatial_dim(output_fmt)
import math
outputs = model(torch.randn((batch_size, *input_size)))
assert len(expected_channels) == len(outputs)
spatial_size = input_size[-2:]
for e, r, o in zip(expected_channels, expected_reduction, outputs):
assert e == o.shape[feat_axis]
assert o.shape[spatial_axis[0]] <= math.ceil(spatial_size[0] / r) + 1
assert o.shape[spatial_axis[1]] <= math.ceil(spatial_size[1] / r) + 1
assert o.shape[0] == batch_size
assert not torch.isnan(o).any()
def _create_fx_model(model, train=False):
# This block of code does a bit of juggling to handle any case where there are multiple outputs in train mode
# So we trace once and look at the graph, and get the indices of the nodes that lead into the original fx output
# node. Then we use those indices to select from train_nodes returned by torchvision get_graph_node_names
tracer_kwargs = dict(
leaf_modules=get_notrace_modules(),
autowrap_functions=get_notrace_functions(),
#enable_cpatching=True,
param_shapes_constant=True
)
train_nodes, eval_nodes = get_graph_node_names(model, tracer_kwargs=tracer_kwargs)
eval_return_nodes = [eval_nodes[-1]]
train_return_nodes = [train_nodes[-1]]
if train:
tracer = NodePathTracer(**tracer_kwargs)
graph = tracer.trace(model)
graph_nodes = list(reversed(graph.nodes))
output_node_names = [n.name for n in graph_nodes[0]._input_nodes.keys()]
graph_node_names = [n.name for n in graph_nodes]
output_node_indices = [-graph_node_names.index(node_name) for node_name in output_node_names]
train_return_nodes = [train_nodes[ix] for ix in output_node_indices]
fx_model = create_feature_extractor(
model,
train_return_nodes=train_return_nodes,
eval_return_nodes=eval_return_nodes,
tracer_kwargs=tracer_kwargs,
)
return fx_model
EXCLUDE_FX_FILTERS = ['vit_gi*']
# not enough memory to run fx on more models than other tests
if 'GITHUB_ACTIONS' in os.environ:
EXCLUDE_FX_FILTERS += [
'beit_large*',
'mixer_l*',
'*nfnet_f2*',
'*resnext101_32x32d',
'resnetv2_152x2*',
'resmlp_big*',
'resnetrs270',
'swin_large*',
'vgg*',
'vit_large*',
'vit_base_patch8*',
'xcit_large*',
]
@pytest.mark.fxforward
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_fx(model_name, batch_size):
"""
Symbolically trace each model and run single forward pass through the resulting GraphModule
Also check that the output of a forward pass through the GraphModule is the same as that from the original Module
"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
model = create_model(model_name, pretrained=False)
model.eval()
input_size = _get_input_size(model=model, target=TARGET_FWD_FX_SIZE)
if max(input_size) > MAX_FWD_FX_SIZE:
pytest.skip("Fixed input size model > limit.")
with torch.no_grad():
inputs = torch.randn((batch_size, *input_size))
outputs = model(inputs)
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
model = _create_fx_model(model)
fx_outputs = tuple(model(inputs).values())
if isinstance(fx_outputs, tuple):
fx_outputs = torch.cat(fx_outputs)
assert torch.all(fx_outputs == outputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
@pytest.mark.fxbackward
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [2])
def test_model_backward_fx(model_name, batch_size):
"""Symbolically trace each model and run single backward pass through the resulting GraphModule"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_FX_SIZE)
if max(input_size) > MAX_BWD_FX_SIZE:
pytest.skip("Fixed input size model > limit.")
model = create_model(model_name, pretrained=False, num_classes=42)
model.train()
num_params = sum([x.numel() for x in model.parameters()])
if 'GITHUB_ACTIONS' in os.environ and num_params > 100e6:
pytest.skip("Skipping FX backward test on model with more than 100M params.")
model = _create_fx_model(model, train=True)
outputs = tuple(model(torch.randn((batch_size, *input_size))).values())
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
outputs.mean().backward()
for n, x in model.named_parameters():
assert x.grad is not None, f'No gradient for {n}'
num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None])
assert outputs.shape[-1] == 42
assert num_params == num_grad, 'Some parameters are missing gradients'
assert not torch.isnan(outputs).any(), 'Output included NaNs'
if 'GITHUB_ACTIONS' not in os.environ:
# FIXME this test is causing GitHub actions to run out of RAM and abruptly kill the test process
# reason: model is scripted after fx tracing, but beit has torch.jit.is_scripting() control flow
EXCLUDE_FX_JIT_FILTERS = [
'deit_*_distilled_patch16_224',
'levit*',
'pit_*_distilled_224',
] + EXCLUDE_FX_FILTERS
@pytest.mark.timeout(120)
@pytest.mark.parametrize(
'model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS + EXCLUDE_FX_JIT_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_fx_torchscript(model_name, batch_size):
"""Symbolically trace each model, script it, and run single forward pass"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE)
if max(input_size) > MAX_JIT_SIZE:
pytest.skip("Fixed input size model > limit.")
with set_scriptable(True):
model = create_model(model_name, pretrained=False)
model.eval()
model = torch.jit.script(_create_fx_model(model))
with torch.no_grad():
outputs = tuple(model(torch.randn((batch_size, *input_size))).values())
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
| pytorch-image-models/tests/test_models.py/0 | {
"file_path": "pytorch-image-models/tests/test_models.py",
"repo_id": "pytorch-image-models",
"token_count": 9191
} | 170 |
""" Loader Factory, Fast Collate, CUDA Prefetcher
Prefetcher and Fast Collate inspired by NVIDIA APEX example at
https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf
Hacked together by / Copyright 2019, Ross Wightman
"""
import logging
import random
from contextlib import suppress
from functools import partial
from itertools import repeat
from typing import Callable, Optional, Tuple, Union
import torch
import torch.utils.data
import numpy as np
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .dataset import IterableImageDataset, ImageDataset
from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler
from .random_erasing import RandomErasing
from .mixup import FastCollateMixup
from .transforms_factory import create_transform
_logger = logging.getLogger(__name__)
def fast_collate(batch):
""" A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)"""
assert isinstance(batch[0], tuple)
batch_size = len(batch)
if isinstance(batch[0][0], tuple):
# This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position
# such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position
inner_tuple_size = len(batch[0][0])
flattened_batch_size = batch_size * inner_tuple_size
targets = torch.zeros(flattened_batch_size, dtype=torch.int64)
tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length
for j in range(inner_tuple_size):
targets[i + j * batch_size] = batch[i][1]
tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j])
return tensor, targets
elif isinstance(batch[0][0], np.ndarray):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
assert len(targets) == batch_size
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i] += torch.from_numpy(batch[i][0])
return tensor, targets
elif isinstance(batch[0][0], torch.Tensor):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
assert len(targets) == batch_size
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i].copy_(batch[i][0])
return tensor, targets
else:
assert False
def adapt_to_chs(x, n):
if not isinstance(x, (tuple, list)):
x = tuple(repeat(x, n))
elif len(x) != n:
x_mean = np.mean(x).item()
x = (x_mean,) * n
_logger.warning(f'Pretrained mean/std different shape than model, using avg value {x}.')
else:
assert len(x) == n, 'normalization stats must match image channels'
return x
class PrefetchLoader:
def __init__(
self,
loader,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
channels=3,
device=torch.device('cuda'),
img_dtype=torch.float32,
fp16=False,
re_prob=0.,
re_mode='const',
re_count=1,
re_num_splits=0):
mean = adapt_to_chs(mean, channels)
std = adapt_to_chs(std, channels)
normalization_shape = (1, channels, 1, 1)
self.loader = loader
self.device = device
if fp16:
# fp16 arg is deprecated, but will override dtype arg if set for bwd compat
img_dtype = torch.float16
self.img_dtype = img_dtype
self.mean = torch.tensor(
[x * 255 for x in mean], device=device, dtype=img_dtype).view(normalization_shape)
self.std = torch.tensor(
[x * 255 for x in std], device=device, dtype=img_dtype).view(normalization_shape)
if re_prob > 0.:
self.random_erasing = RandomErasing(
probability=re_prob,
mode=re_mode,
max_count=re_count,
num_splits=re_num_splits,
device=device,
)
else:
self.random_erasing = None
self.is_cuda = torch.cuda.is_available() and device.type == 'cuda'
def __iter__(self):
first = True
if self.is_cuda:
stream = torch.cuda.Stream()
stream_context = partial(torch.cuda.stream, stream=stream)
else:
stream = None
stream_context = suppress
for next_input, next_target in self.loader:
with stream_context():
next_input = next_input.to(device=self.device, non_blocking=True)
next_target = next_target.to(device=self.device, non_blocking=True)
next_input = next_input.to(self.img_dtype).sub_(self.mean).div_(self.std)
if self.random_erasing is not None:
next_input = self.random_erasing(next_input)
if not first:
yield input, target
else:
first = False
if stream is not None:
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
@property
def dataset(self):
return self.loader.dataset
@property
def mixup_enabled(self):
if isinstance(self.loader.collate_fn, FastCollateMixup):
return self.loader.collate_fn.mixup_enabled
else:
return False
@mixup_enabled.setter
def mixup_enabled(self, x):
if isinstance(self.loader.collate_fn, FastCollateMixup):
self.loader.collate_fn.mixup_enabled = x
def _worker_init(worker_id, worker_seeding='all'):
worker_info = torch.utils.data.get_worker_info()
assert worker_info.id == worker_id
if isinstance(worker_seeding, Callable):
seed = worker_seeding(worker_info)
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed % (2 ** 32 - 1))
else:
assert worker_seeding in ('all', 'part')
# random / torch seed already called in dataloader iter class w/ worker_info.seed
# to reproduce some old results (same seed + hparam combo), partial seeding is required (skip numpy re-seed)
if worker_seeding == 'all':
np.random.seed(worker_info.seed % (2 ** 32 - 1))
def create_loader(
dataset: Union[ImageDataset, IterableImageDataset],
input_size: Union[int, Tuple[int, int], Tuple[int, int, int]],
batch_size: int,
is_training: bool = False,
no_aug: bool = False,
re_prob: float = 0.,
re_mode: str = 'const',
re_count: int = 1,
re_split: bool = False,
train_crop_mode: Optional[str] = None,
scale: Optional[Tuple[float, float]] = None,
ratio: Optional[Tuple[float, float]] = None,
hflip: float = 0.5,
vflip: float = 0.,
color_jitter: float = 0.4,
color_jitter_prob: Optional[float] = None,
grayscale_prob: float = 0.,
gaussian_blur_prob: float = 0.,
auto_augment: Optional[str] = None,
num_aug_repeats: int = 0,
num_aug_splits: int = 0,
interpolation: str = 'bilinear',
mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN,
std: Tuple[float, ...] = IMAGENET_DEFAULT_STD,
num_workers: int = 1,
distributed: bool = False,
crop_pct: Optional[float] = None,
crop_mode: Optional[str] = None,
crop_border_pixels: Optional[int] = None,
collate_fn: Optional[Callable] = None,
pin_memory: bool = False,
fp16: bool = False, # deprecated, use img_dtype
img_dtype: torch.dtype = torch.float32,
device: torch.device = torch.device('cuda'),
use_prefetcher: bool = True,
use_multi_epochs_loader: bool = False,
persistent_workers: bool = True,
worker_seeding: str = 'all',
tf_preprocessing: bool = False,
):
"""
Args:
dataset: The image dataset to load.
input_size: Target input size (channels, height, width) tuple or size scalar.
batch_size: Number of samples in a batch.
is_training: Return training (random) transforms.
no_aug: Disable augmentation for training (useful for debug).
re_prob: Random erasing probability.
re_mode: Random erasing fill mode.
re_count: Number of random erasing regions.
re_split: Control split of random erasing across batch size.
scale: Random resize scale range (crop area, < 1.0 => zoom in).
ratio: Random aspect ratio range (crop ratio for RRC, ratio adjustment factor for RKR).
hflip: Horizontal flip probability.
vflip: Vertical flip probability.
color_jitter: Random color jitter component factors (brightness, contrast, saturation, hue).
Scalar is applied as (scalar,) * 3 (no hue).
color_jitter_prob: Apply color jitter with this probability if not None (for SimlCLR-like aug
grayscale_prob: Probability of converting image to grayscale (for SimCLR-like aug).
gaussian_blur_prob: Probability of applying gaussian blur (for SimCLR-like aug).
auto_augment: Auto augment configuration string (see auto_augment.py).
num_aug_repeats: Enable special sampler to repeat same augmentation across distributed GPUs.
num_aug_splits: Enable mode where augmentations can be split across the batch.
interpolation: Image interpolation mode.
mean: Image normalization mean.
std: Image normalization standard deviation.
num_workers: Num worker processes per DataLoader.
distributed: Enable dataloading for distributed training.
crop_pct: Inference crop percentage (output size / resize size).
crop_mode: Inference crop mode. One of ['squash', 'border', 'center']. Defaults to 'center' when None.
crop_border_pixels: Inference crop border of specified # pixels around edge of original image.
collate_fn: Override default collate_fn.
pin_memory: Pin memory for device transfer.
fp16: Deprecated argument for half-precision input dtype. Use img_dtype.
img_dtype: Data type for input image.
device: Device to transfer inputs and targets to.
use_prefetcher: Use efficient pre-fetcher to load samples onto device.
use_multi_epochs_loader:
persistent_workers: Enable persistent worker processes.
worker_seeding: Control worker random seeding at init.
tf_preprocessing: Use TF 1.0 inference preprocessing for testing model ports.
Returns:
DataLoader
"""
re_num_splits = 0
if re_split:
# apply RE to second half of batch if no aug split otherwise line up with aug split
re_num_splits = num_aug_splits or 2
dataset.transform = create_transform(
input_size,
is_training=is_training,
no_aug=no_aug,
train_crop_mode=train_crop_mode,
scale=scale,
ratio=ratio,
hflip=hflip,
vflip=vflip,
color_jitter=color_jitter,
color_jitter_prob=color_jitter_prob,
grayscale_prob=grayscale_prob,
gaussian_blur_prob=gaussian_blur_prob,
auto_augment=auto_augment,
interpolation=interpolation,
mean=mean,
std=std,
crop_pct=crop_pct,
crop_mode=crop_mode,
crop_border_pixels=crop_border_pixels,
re_prob=re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits,
tf_preprocessing=tf_preprocessing,
use_prefetcher=use_prefetcher,
separate=num_aug_splits > 0,
)
if isinstance(dataset, IterableImageDataset):
# give Iterable datasets early knowledge of num_workers so that sample estimates
# are correct before worker processes are launched
dataset.set_loader_cfg(num_workers=num_workers)
sampler = None
if distributed and not isinstance(dataset, torch.utils.data.IterableDataset):
if is_training:
if num_aug_repeats:
sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats)
else:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
# This will add extra duplicate entries to result in equal num
# of samples per-process, will slightly alter validation results
sampler = OrderedDistributedSampler(dataset)
else:
assert num_aug_repeats == 0, "RepeatAugment not currently supported in non-distributed or IterableDataset use"
if collate_fn is None:
collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate
loader_class = torch.utils.data.DataLoader
if use_multi_epochs_loader:
loader_class = MultiEpochsDataLoader
loader_args = dict(
batch_size=batch_size,
shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training,
num_workers=num_workers,
sampler=sampler,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=is_training,
worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding),
persistent_workers=persistent_workers
)
try:
loader = loader_class(dataset, **loader_args)
except TypeError as e:
loader_args.pop('persistent_workers') # only in Pytorch 1.7+
loader = loader_class(dataset, **loader_args)
if use_prefetcher:
prefetch_re_prob = re_prob if is_training and not no_aug else 0.
loader = PrefetchLoader(
loader,
mean=mean,
std=std,
channels=input_size[0],
device=device,
fp16=fp16, # deprecated, use img_dtype
img_dtype=img_dtype,
re_prob=prefetch_re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits
)
return loader
class MultiEpochsDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._DataLoader__initialized = False
if self.batch_sampler is None:
self.sampler = _RepeatSampler(self.sampler)
else:
self.batch_sampler = _RepeatSampler(self.batch_sampler)
self._DataLoader__initialized = True
self.iterator = super().__iter__()
def __len__(self):
return len(self.sampler) if self.batch_sampler is None else len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
| pytorch-image-models/timm/data/loader.py/0 | {
"file_path": "pytorch-image-models/timm/data/loader.py",
"repo_id": "pytorch-image-models",
"token_count": 6793
} | 171 |
""" Real labels evaluator for ImageNet
Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159
Based on Numpy example at https://github.com/google-research/reassessed-imagenet
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import json
import numpy as np
import pkgutil
class RealLabelsImagenet:
def __init__(self, filenames, real_json=None, topk=(1, 5)):
if real_json is not None:
with open(real_json) as real_labels:
real_labels = json.load(real_labels)
else:
real_labels = json.loads(
pkgutil.get_data(__name__, os.path.join('_info', 'imagenet_real_labels.json')).decode('utf-8'))
real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)}
self.real_labels = real_labels
self.filenames = filenames
assert len(self.filenames) == len(self.real_labels)
self.topk = topk
self.is_correct = {k: [] for k in topk}
self.sample_idx = 0
def add_result(self, output):
maxk = max(self.topk)
_, pred_batch = output.topk(maxk, 1, True, True)
pred_batch = pred_batch.cpu().numpy()
for pred in pred_batch:
filename = self.filenames[self.sample_idx]
filename = os.path.basename(filename)
if self.real_labels[filename]:
for k in self.topk:
self.is_correct[k].append(
any([p in self.real_labels[filename] for p in pred[:k]]))
self.sample_idx += 1
def get_accuracy(self, k=None):
if k is None:
return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk}
else:
return float(np.mean(self.is_correct[k])) * 100
| pytorch-image-models/timm/data/real_labels.py/0 | {
"file_path": "pytorch-image-models/timm/data/real_labels.py",
"repo_id": "pytorch-image-models",
"token_count": 854
} | 172 |
""" Model / Layer Config singleton state
"""
import os
import warnings
from typing import Any, Optional
import torch
__all__ = [
'is_exportable', 'is_scriptable', 'is_no_jit', 'use_fused_attn',
'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config', 'set_fused_attn'
]
# Set to True if prefer to have layers with no jit optimization (includes activations)
_NO_JIT = False
# Set to True if prefer to have activation layers with no jit optimization
# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying
# the jit flags so far are activations. This will change as more layers are updated and/or added.
_NO_ACTIVATION_JIT = False
# Set to True if exporting a model with Same padding via ONNX
_EXPORTABLE = False
# Set to True if wanting to use torch.jit.script on a model
_SCRIPTABLE = False
# use torch.scaled_dot_product_attention where possible
_HAS_FUSED_ATTN = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
if 'TIMM_FUSED_ATTN' in os.environ:
_USE_FUSED_ATTN = int(os.environ['TIMM_FUSED_ATTN'])
else:
_USE_FUSED_ATTN = 1 # 0 == off, 1 == on (for tested use), 2 == on (for experimental use)
def is_no_jit():
return _NO_JIT
class set_no_jit:
def __init__(self, mode: bool) -> None:
global _NO_JIT
self.prev = _NO_JIT
_NO_JIT = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _NO_JIT
_NO_JIT = self.prev
return False
def is_exportable():
return _EXPORTABLE
class set_exportable:
def __init__(self, mode: bool) -> None:
global _EXPORTABLE
self.prev = _EXPORTABLE
_EXPORTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _EXPORTABLE
_EXPORTABLE = self.prev
return False
def is_scriptable():
return _SCRIPTABLE
class set_scriptable:
def __init__(self, mode: bool) -> None:
global _SCRIPTABLE
self.prev = _SCRIPTABLE
_SCRIPTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
_SCRIPTABLE = self.prev
return False
class set_layer_config:
""" Layer config context manager that allows setting all layer config flags at once.
If a flag arg is None, it will not change the current value.
"""
def __init__(
self,
scriptable: Optional[bool] = None,
exportable: Optional[bool] = None,
no_jit: Optional[bool] = None,
no_activation_jit: Optional[bool] = None):
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT
if scriptable is not None:
_SCRIPTABLE = scriptable
if exportable is not None:
_EXPORTABLE = exportable
if no_jit is not None:
_NO_JIT = no_jit
if no_activation_jit is not None:
_NO_ACTIVATION_JIT = no_activation_jit
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev
return False
def use_fused_attn(experimental: bool = False) -> bool:
# NOTE: ONNX export cannot handle F.scaled_dot_product_attention as of pytorch 2.0
if not _HAS_FUSED_ATTN or _EXPORTABLE:
return False
if experimental:
return _USE_FUSED_ATTN > 1
return _USE_FUSED_ATTN > 0
def set_fused_attn(enable: bool = True, experimental: bool = False):
global _USE_FUSED_ATTN
if not _HAS_FUSED_ATTN:
warnings.warn('This version of pytorch does not have F.scaled_dot_product_attention, fused_attn flag ignored.')
return
if experimental and enable:
_USE_FUSED_ATTN = 2
elif enable:
_USE_FUSED_ATTN = 1
else:
_USE_FUSED_ATTN = 0
| pytorch-image-models/timm/layers/config.py/0 | {
"file_path": "pytorch-image-models/timm/layers/config.py",
"repo_id": "pytorch-image-models",
"token_count": 1787
} | 173 |
from typing import Tuple
import torch
def ndgrid(*tensors) -> Tuple[torch.Tensor, ...]:
"""generate N-D grid in dimension order.
The ndgrid function is like meshgrid except that the order of the first two input arguments are switched.
That is, the statement
[X1,X2,X3] = ndgrid(x1,x2,x3)
produces the same result as
[X2,X1,X3] = meshgrid(x2,x1,x3)
This naming is based on MATLAB, the purpose is to avoid confusion due to torch's change to make
torch.meshgrid behaviour move from matching ndgrid ('ij') indexing to numpy meshgrid defaults of ('xy').
"""
try:
return torch.meshgrid(*tensors, indexing='ij')
except TypeError:
# old PyTorch < 1.10 will follow this path as it does not have indexing arg,
# the old behaviour of meshgrid was 'ij'
return torch.meshgrid(*tensors)
def meshgrid(*tensors) -> Tuple[torch.Tensor, ...]:
"""generate N-D grid in spatial dim order.
The meshgrid function is similar to ndgrid except that the order of the
first two input and output arguments is switched.
That is, the statement
[X,Y,Z] = meshgrid(x,y,z)
produces the same result as
[Y,X,Z] = ndgrid(y,x,z)
Because of this, meshgrid is better suited to problems in two- or three-dimensional Cartesian space,
while ndgrid is better suited to multidimensional problems that aren't spatially based.
"""
# NOTE: this will throw in PyTorch < 1.10 as meshgrid did not support indexing arg or have
# capability of generating grid in xy order before then.
return torch.meshgrid(*tensors, indexing='xy')
| pytorch-image-models/timm/layers/grid.py/0 | {
"file_path": "pytorch-image-models/timm/layers/grid.py",
"repo_id": "pytorch-image-models",
"token_count": 565
} | 174 |
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794
"""
return_indices: torch.jit.Final[bool]
def __init__(
self,
prob: float = 0.5,
num_prefix_tokens: int = 1,
ordered: bool = False,
return_indices: bool = False,
):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.num_prefix_tokens = num_prefix_tokens # exclude CLS token (or other prefix tokens)
self.ordered = ordered
self.return_indices = return_indices
def forward(self, x) -> Union[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]:
if not self.training or self.prob == 0.:
if self.return_indices:
return x, None
return x
if self.num_prefix_tokens:
prefix_tokens, x = x[:, :self.num_prefix_tokens], x[:, self.num_prefix_tokens:]
else:
prefix_tokens = None
B = x.shape[0]
L = x.shape[1]
num_keep = max(1, int(L * (1. - self.prob)))
keep_indices = torch.argsort(torch.randn(B, L, device=x.device), dim=-1)[:, :num_keep]
if self.ordered:
# NOTE does not need to maintain patch order in typical transformer use,
# but possibly useful for debug / visualization
keep_indices = keep_indices.sort(dim=-1)[0]
x = x.gather(1, keep_indices.unsqueeze(-1).expand((-1, -1) + x.shape[2:]))
if prefix_tokens is not None:
x = torch.cat((prefix_tokens, x), dim=1)
if self.return_indices:
return x, keep_indices
return x
| pytorch-image-models/timm/layers/patch_dropout.py/0 | {
"file_path": "pytorch-image-models/timm/layers/patch_dropout.py",
"repo_id": "pytorch-image-models",
"token_count": 842
} | 175 |
import torch
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
return _trunc_normal_(tensor, mean, std, a, b)
def trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
and the result is subsquently scaled and shifted by the mean and std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
_trunc_normal_(tensor, 0, 1.0, a, b)
tensor.mul_(std).add_(mean)
return tensor
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_tf_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
with torch.no_grad():
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
with torch.no_grad():
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
| pytorch-image-models/timm/layers/weight_init.py/0 | {
"file_path": "pytorch-image-models/timm/layers/weight_init.py",
"repo_id": "pytorch-image-models",
"token_count": 1838
} | 176 |
import copy
from collections import deque, defaultdict
from dataclasses import dataclass, field, replace, asdict
from typing import Any, Deque, Dict, Tuple, Optional, Union
__all__ = ['PretrainedCfg', 'filter_pretrained_cfg', 'DefaultCfg']
@dataclass
class PretrainedCfg:
"""
"""
# weight source locations
url: Optional[Union[str, Tuple[str, str]]] = None # remote URL
file: Optional[str] = None # local / shared filesystem path
state_dict: Optional[Dict[str, Any]] = None # in-memory state dict
hf_hub_id: Optional[str] = None # Hugging Face Hub model id ('organization/model')
hf_hub_filename: Optional[str] = None # Hugging Face Hub filename (overrides default)
source: Optional[str] = None # source of cfg / weight location used (url, file, hf-hub)
architecture: Optional[str] = None # architecture variant can be set when not implicit
tag: Optional[str] = None # pretrained tag of source
custom_load: bool = False # use custom model specific model.load_pretrained() (ie for npz files)
# input / data config
input_size: Tuple[int, int, int] = (3, 224, 224)
test_input_size: Optional[Tuple[int, int, int]] = None
min_input_size: Optional[Tuple[int, int, int]] = None
fixed_input_size: bool = False
interpolation: str = 'bicubic'
crop_pct: float = 0.875
test_crop_pct: Optional[float] = None
crop_mode: str = 'center'
mean: Tuple[float, ...] = (0.485, 0.456, 0.406)
std: Tuple[float, ...] = (0.229, 0.224, 0.225)
# head / classifier config and meta-data
num_classes: int = 1000
label_offset: Optional[int] = None
label_names: Optional[Tuple[str]] = None
label_descriptions: Optional[Dict[str, str]] = None
# model attributes that vary with above or required for pretrained adaptation
pool_size: Optional[Tuple[int, ...]] = None
test_pool_size: Optional[Tuple[int, ...]] = None
first_conv: Optional[str] = None
classifier: Optional[str] = None
license: Optional[str] = None
description: Optional[str] = None
origin_url: Optional[str] = None
paper_name: Optional[str] = None
paper_ids: Optional[Union[str, Tuple[str]]] = None
notes: Optional[Tuple[str]] = None
@property
def has_weights(self):
return self.url or self.file or self.hf_hub_id
def to_dict(self, remove_source=False, remove_null=True):
return filter_pretrained_cfg(
asdict(self),
remove_source=remove_source,
remove_null=remove_null
)
def filter_pretrained_cfg(cfg, remove_source=False, remove_null=True):
filtered_cfg = {}
keep_null = {'pool_size', 'first_conv', 'classifier'} # always keep these keys, even if none
for k, v in cfg.items():
if remove_source and k in {'url', 'file', 'hf_hub_id', 'hf_hub_id', 'hf_hub_filename', 'source'}:
continue
if remove_null and v is None and k not in keep_null:
continue
filtered_cfg[k] = v
return filtered_cfg
@dataclass
class DefaultCfg:
tags: Deque[str] = field(default_factory=deque) # priority queue of tags (first is default)
cfgs: Dict[str, PretrainedCfg] = field(default_factory=dict) # pretrained cfgs by tag
is_pretrained: bool = False # at least one of the configs has a pretrained source set
@property
def default(self):
return self.cfgs[self.tags[0]]
@property
def default_with_tag(self):
tag = self.tags[0]
return tag, self.cfgs[tag]
| pytorch-image-models/timm/models/_pretrained.py/0 | {
"file_path": "pytorch-image-models/timm/models/_pretrained.py",
"repo_id": "pytorch-image-models",
"token_count": 1341
} | 177 |
""" CrossViT Model
@inproceedings{
chen2021crossvit,
title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}},
author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda},
booktitle={International Conference on Computer Vision (ICCV)},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.14899
Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py
NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
from functools import partial
from typing import List
from typing import Tuple
import torch
import torch.hub
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, trunc_normal_, _assert
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Block
__all__ = ['CrossVit'] # model_registry will add each entrypoint fn to this
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if multi_conv:
if patch_size[0] == 12:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1),
)
elif patch_size[0] == 16:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1),
)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
_assert(H == self.img_size[0],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
_assert(W == self.img_size[1],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class CrossAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = head_dim ** -0.5
self.wq = nn.Linear(dim, dim, bias=qkv_bias)
self.wk = nn.Linear(dim, dim, bias=qkv_bias)
self.wv = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
# B1C -> B1H(C/H) -> BH1(C/H)
q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = CrossAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x)))
return x
class MultiScaleBlock(nn.Module):
def __init__(
self,
dim,
patches,
depth,
num_heads,
mlp_ratio,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
num_branches = len(dim)
self.num_branches = num_branches
# different branch could have different embedding size, the first one is the base
self.blocks = nn.ModuleList()
for d in range(num_branches):
tmp = []
for i in range(depth[d]):
tmp.append(Block(
dim=dim[d],
num_heads=num_heads[d],
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i],
norm_layer=norm_layer,
))
if len(tmp) != 0:
self.blocks.append(nn.Sequential(*tmp))
if len(self.blocks) == 0:
self.blocks = None
self.projs = nn.ModuleList()
for d in range(num_branches):
if dim[d] == dim[(d + 1) % num_branches] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])]
self.projs.append(nn.Sequential(*tmp))
self.fusion = nn.ModuleList()
for d in range(num_branches):
d_ = (d + 1) % num_branches
nh = num_heads[d_]
if depth[-1] == 0: # backward capability:
self.fusion.append(
CrossAttentionBlock(
dim=dim[d_],
num_heads=nh,
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[-1],
norm_layer=norm_layer,
))
else:
tmp = []
for _ in range(depth[-1]):
tmp.append(CrossAttentionBlock(
dim=dim[d_],
num_heads=nh,
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[-1],
norm_layer=norm_layer,
))
self.fusion.append(nn.Sequential(*tmp))
self.revert_projs = nn.ModuleList()
for d in range(num_branches):
if dim[(d + 1) % num_branches] == dim[d] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(),
nn.Linear(dim[(d + 1) % num_branches], dim[d])]
self.revert_projs.append(nn.Sequential(*tmp))
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
outs_b = []
for i, block in enumerate(self.blocks):
outs_b.append(block(x[i]))
# only take the cls token out
proj_cls_token = torch.jit.annotate(List[torch.Tensor], [])
for i, proj in enumerate(self.projs):
proj_cls_token.append(proj(outs_b[i][:, 0:1, ...]))
# cross attention
outs = []
for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)):
tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1)
tmp = fusion(tmp)
reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...])
tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1)
outs.append(tmp)
return outs
def _compute_num_patches(img_size, patches):
return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)]
@register_notrace_function
def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript
"""
Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing.
Args:
x (Tensor): input image
ss (tuple[int, int]): height and width to scale to
crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False
Returns:
Tensor: the "scaled" image batch tensor
"""
H, W = x.shape[-2:]
if H != ss[0] or W != ss[1]:
if crop_scale and ss[0] <= H and ss[1] <= W:
cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.))
x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]]
else:
x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False)
return x
class CrossVit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size=224,
img_scale=(1.0, 1.0),
patch_size=(8, 16),
in_chans=3,
num_classes=1000,
embed_dim=(192, 384),
depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)),
num_heads=(6, 12),
mlp_ratio=(2., 2., 4.),
multi_conv=False,
crop_scale=False,
qkv_bias=True,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
global_pool='token',
):
super().__init__()
assert global_pool in ('token', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.img_size = to_2tuple(img_size)
img_scale = to_2tuple(img_scale)
self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale]
self.crop_scale = crop_scale # crop instead of interpolate for scale
num_patches = _compute_num_patches(self.img_size_scaled, patch_size)
self.num_branches = len(patch_size)
self.embed_dim = embed_dim
self.num_features = sum(embed_dim)
self.patch_embed = nn.ModuleList()
# hard-coded for torch jit script
for i in range(self.num_branches):
setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i])))
setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i])))
for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim):
self.patch_embed.append(
PatchEmbed(
img_size=im_s,
patch_size=p,
in_chans=in_chans,
embed_dim=d,
multi_conv=multi_conv,
))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
total_depth = sum([sum(x[-2:]) for x in depth])
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule
dpr_ptr = 0
self.blocks = nn.ModuleList()
for idx, block_cfg in enumerate(depth):
curr_depth = max(block_cfg[:-1]) + block_cfg[-1]
dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth]
blk = MultiScaleBlock(
embed_dim,
num_patches,
block_cfg,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr_,
norm_layer=norm_layer,
)
dpr_ptr += curr_depth
self.blocks.append(blk)
self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)])
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.ModuleList([
nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity()
for i in range(self.num_branches)])
for i in range(self.num_branches):
trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02)
trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
out = set()
for i in range(self.num_branches):
out.add(f'cls_token_{i}')
pe = getattr(self, f'pos_embed_{i}', None)
if pe is not None and pe.requires_grad:
out.add(f'pos_embed_{i}')
return out
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('token', 'avg')
self.global_pool = global_pool
self.head = nn.ModuleList(
[nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in
range(self.num_branches)])
def forward_features(self, x) -> List[torch.Tensor]:
B = x.shape[0]
xs = []
for i, patch_embed in enumerate(self.patch_embed):
x_ = x
ss = self.img_size_scaled[i]
x_ = scale_image(x_, ss, self.crop_scale)
x_ = patch_embed(x_)
cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script
cls_tokens = cls_tokens.expand(B, -1, -1)
x_ = torch.cat((cls_tokens, x_), dim=1)
pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script
x_ = x_ + pos_embed
x_ = self.pos_drop(x_)
xs.append(x_)
for i, blk in enumerate(self.blocks):
xs = blk(xs)
# NOTE: was before branch token section, move to here to assure all branch token are before layer norm
xs = [norm(xs[i]) for i, norm in enumerate(self.norm)]
return xs
def forward_head(self, xs: List[torch.Tensor], pre_logits: bool = False) -> torch.Tensor:
xs = [x[:, 1:].mean(dim=1) for x in xs] if self.global_pool == 'avg' else [x[:, 0] for x in xs]
xs = [self.head_drop(x) for x in xs]
if pre_logits or isinstance(self.head[0], nn.Identity):
return torch.cat([x for x in xs], dim=1)
return torch.mean(torch.stack([head(xs[i]) for i, head in enumerate(self.head)], dim=0), dim=0)
def forward(self, x):
xs = self.forward_features(x)
x = self.forward_head(xs)
return x
def _create_crossvit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
def pretrained_filter_fn(state_dict):
new_state_dict = {}
for key in state_dict.keys():
if 'pos_embed' in key or 'cls_token' in key:
new_key = key.replace(".", "_")
else:
new_key = key
new_state_dict[new_key] = state_dict[key]
return new_state_dict
return build_model_with_cfg(
CrossVit,
variant,
pretrained,
pretrained_filter_fn=pretrained_filter_fn,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'),
'classifier': ('head.0', 'head.1'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'crossvit_15_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_15_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_15_dagger_408.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_18_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_18_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_18_dagger_408.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_9_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_9_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_base_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_small_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_tiny_240.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def crossvit_tiny_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[3, 3], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_small_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[6, 6], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_base_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[12, 12], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_9_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1])
model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1])
model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs)
model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_9_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_dagger_408(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_dagger_408(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/crossvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/crossvit.py",
"repo_id": "pytorch-image-models",
"token_count": 12463
} | 178 |
"""
Poolformer from MetaFormer is Actually What You Need for Vision https://arxiv.org/abs/2111.11418
IdentityFormer, RandFormer, PoolFormerV2, ConvFormer, and CAFormer
from MetaFormer Baselines for Vision https://arxiv.org/abs/2210.13452
All implemented models support feature extraction and variable input resolution.
Original implementation by Weihao Yu et al.,
adapted for timm by Fredo Guan and Ross Wightman.
Adapted from https://github.com/sail-sg/metaformer, original copyright below
"""
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.jit import Final
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, DropPath, SelectAdaptivePool2d, GroupNorm1, LayerNorm, LayerNorm2d, Mlp, \
use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['MetaFormer']
class Stem(nn.Module):
"""
Stem implemented by a layer of convolution.
Conv2d params constant across all models.
"""
def __init__(
self,
in_channels,
out_channels,
norm_layer=None,
):
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=4,
padding=2
)
self.norm = norm_layer(out_channels) if norm_layer else nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class Downsampling(nn.Module):
"""
Downsampling implemented by a layer of convolution.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
norm_layer=None,
):
super().__init__()
self.norm = norm_layer(in_channels) if norm_layer else nn.Identity()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding
)
def forward(self, x):
x = self.norm(x)
x = self.conv(x)
return x
class Scale(nn.Module):
"""
Scale vector by element multiplications.
"""
def __init__(self, dim, init_value=1.0, trainable=True, use_nchw=True):
super().__init__()
self.shape = (dim, 1, 1) if use_nchw else (dim,)
self.scale = nn.Parameter(init_value * torch.ones(dim), requires_grad=trainable)
def forward(self, x):
return x * self.scale.view(self.shape)
class SquaredReLU(nn.Module):
"""
Squared ReLU: https://arxiv.org/abs/2109.08668
"""
def __init__(self, inplace=False):
super().__init__()
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
return torch.square(self.relu(x))
class StarReLU(nn.Module):
"""
StarReLU: s * relu(x) ** 2 + b
"""
def __init__(
self,
scale_value=1.0,
bias_value=0.0,
scale_learnable=True,
bias_learnable=True,
mode=None,
inplace=False
):
super().__init__()
self.inplace = inplace
self.relu = nn.ReLU(inplace=inplace)
self.scale = nn.Parameter(scale_value * torch.ones(1), requires_grad=scale_learnable)
self.bias = nn.Parameter(bias_value * torch.ones(1), requires_grad=bias_learnable)
def forward(self, x):
return self.scale * self.relu(x) ** 2 + self.bias
class Attention(nn.Module):
"""
Vanilla self-attention from Transformer: https://arxiv.org/abs/1706.03762.
Modified from timm.
"""
fused_attn: Final[bool]
def __init__(
self,
dim,
head_dim=32,
num_heads=None,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
proj_bias=False,
**kwargs
):
super().__init__()
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.num_heads = num_heads if num_heads else dim // head_dim
if self.num_heads == 0:
self.num_heads = 1
self.attention_dim = self.num_heads * self.head_dim
self.qkv = nn.Linear(dim, self.attention_dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(self.attention_dim, dim, bias=proj_bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# custom norm modules that disable the bias term, since the original models defs
# used a custom norm with a weight term but no bias term.
class GroupNorm1NoBias(GroupNorm1):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class LayerNorm2dNoBias(LayerNorm2d):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class LayerNormNoBias(nn.LayerNorm):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels, **kwargs)
self.eps = kwargs.get('eps', 1e-6)
self.bias = None
class SepConv(nn.Module):
r"""
Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381.
"""
def __init__(
self,
dim,
expansion_ratio=2,
act1_layer=StarReLU,
act2_layer=nn.Identity,
bias=False,
kernel_size=7,
padding=3,
**kwargs
):
super().__init__()
mid_channels = int(expansion_ratio * dim)
self.pwconv1 = nn.Conv2d(dim, mid_channels, kernel_size=1, bias=bias)
self.act1 = act1_layer()
self.dwconv = nn.Conv2d(
mid_channels, mid_channels, kernel_size=kernel_size,
padding=padding, groups=mid_channels, bias=bias) # depthwise conv
self.act2 = act2_layer()
self.pwconv2 = nn.Conv2d(mid_channels, dim, kernel_size=1, bias=bias)
def forward(self, x):
x = self.pwconv1(x)
x = self.act1(x)
x = self.dwconv(x)
x = self.act2(x)
x = self.pwconv2(x)
return x
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer: https://arxiv.org/abs/2111.11418
"""
def __init__(self, pool_size=3, **kwargs):
super().__init__()
self.pool = nn.AvgPool2d(
pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, x):
y = self.pool(x)
return y - x
class MlpHead(nn.Module):
""" MLP classification head
"""
def __init__(
self,
dim,
num_classes=1000,
mlp_ratio=4,
act_layer=SquaredReLU,
norm_layer=LayerNorm,
drop_rate=0.,
bias=True
):
super().__init__()
hidden_features = int(mlp_ratio * dim)
self.fc1 = nn.Linear(dim, hidden_features, bias=bias)
self.act = act_layer()
self.norm = norm_layer(hidden_features)
self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
self.head_drop = nn.Dropout(drop_rate)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.norm(x)
x = self.head_drop(x)
x = self.fc2(x)
return x
class MetaFormerBlock(nn.Module):
"""
Implementation of one MetaFormer block.
"""
def __init__(
self,
dim,
token_mixer=Pooling,
mlp_act=StarReLU,
mlp_bias=False,
norm_layer=LayerNorm2d,
proj_drop=0.,
drop_path=0.,
use_nchw=True,
layer_scale_init_value=None,
res_scale_init_value=None,
**kwargs
):
super().__init__()
ls_layer = partial(Scale, dim=dim, init_value=layer_scale_init_value, use_nchw=use_nchw)
rs_layer = partial(Scale, dim=dim, init_value=res_scale_init_value, use_nchw=use_nchw)
self.norm1 = norm_layer(dim)
self.token_mixer = token_mixer(dim=dim, proj_drop=proj_drop, **kwargs)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.layer_scale1 = ls_layer() if layer_scale_init_value is not None else nn.Identity()
self.res_scale1 = rs_layer() if res_scale_init_value is not None else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
dim,
int(4 * dim),
act_layer=mlp_act,
bias=mlp_bias,
drop=proj_drop,
use_conv=use_nchw,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.layer_scale2 = ls_layer() if layer_scale_init_value is not None else nn.Identity()
self.res_scale2 = rs_layer() if res_scale_init_value is not None else nn.Identity()
def forward(self, x):
x = self.res_scale1(x) + \
self.layer_scale1(
self.drop_path1(
self.token_mixer(self.norm1(x))
)
)
x = self.res_scale2(x) + \
self.layer_scale2(
self.drop_path2(
self.mlp(self.norm2(x))
)
)
return x
class MetaFormerStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth=2,
token_mixer=nn.Identity,
mlp_act=StarReLU,
mlp_bias=False,
downsample_norm=LayerNorm2d,
norm_layer=LayerNorm2d,
proj_drop=0.,
dp_rates=[0.] * 2,
layer_scale_init_value=None,
res_scale_init_value=None,
**kwargs,
):
super().__init__()
self.grad_checkpointing = False
self.use_nchw = not issubclass(token_mixer, Attention)
# don't downsample if in_chs and out_chs are the same
self.downsample = nn.Identity() if in_chs == out_chs else Downsampling(
in_chs,
out_chs,
kernel_size=3,
stride=2,
padding=1,
norm_layer=downsample_norm,
)
self.blocks = nn.Sequential(*[MetaFormerBlock(
dim=out_chs,
token_mixer=token_mixer,
mlp_act=mlp_act,
mlp_bias=mlp_bias,
norm_layer=norm_layer,
proj_drop=proj_drop,
drop_path=dp_rates[i],
layer_scale_init_value=layer_scale_init_value,
res_scale_init_value=res_scale_init_value,
use_nchw=self.use_nchw,
**kwargs,
) for i in range(depth)])
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x: Tensor):
x = self.downsample(x)
B, C, H, W = x.shape
if not self.use_nchw:
x = x.reshape(B, C, -1).transpose(1, 2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
if not self.use_nchw:
x = x.transpose(1, 2).reshape(B, C, H, W)
return x
class MetaFormer(nn.Module):
r""" MetaFormer
A PyTorch impl of : `MetaFormer Baselines for Vision` -
https://arxiv.org/abs/2210.13452
Args:
in_chans (int): Number of input image channels.
num_classes (int): Number of classes for classification head.
global_pool: Pooling for classifier head.
depths (list or tuple): Number of blocks at each stage.
dims (list or tuple): Feature dimension at each stage.
token_mixers (list, tuple or token_fcn): Token mixer for each stage.
mlp_act: Activation layer for MLP.
mlp_bias (boolean): Enable or disable mlp bias term.
drop_path_rate (float): Stochastic depth rate.
drop_rate (float): Dropout rate.
layer_scale_init_values (list, tuple, float or None): Init value for Layer Scale.
None means not use the layer scale. Form: https://arxiv.org/abs/2103.17239.
res_scale_init_values (list, tuple, float or None): Init value for res Scale on residual connections.
None means not use the res scale. From: https://arxiv.org/abs/2110.09456.
downsample_norm (nn.Module): Norm layer used in stem and downsampling layers.
norm_layers (list, tuple or norm_fcn): Norm layers for each stage.
output_norm: Norm layer before classifier head.
use_mlp_head: Use MLP classification head.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
depths=(2, 2, 6, 2),
dims=(64, 128, 320, 512),
token_mixers=Pooling,
mlp_act=StarReLU,
mlp_bias=False,
drop_path_rate=0.,
proj_drop_rate=0.,
drop_rate=0.0,
layer_scale_init_values=None,
res_scale_init_values=(None, None, 1.0, 1.0),
downsample_norm=LayerNorm2dNoBias,
norm_layers=LayerNorm2dNoBias,
output_norm=LayerNorm2d,
use_mlp_head=True,
**kwargs,
):
super().__init__()
self.num_classes = num_classes
self.num_features = dims[-1]
self.drop_rate = drop_rate
self.use_mlp_head = use_mlp_head
self.num_stages = len(depths)
# convert everything to lists if they aren't indexable
if not isinstance(depths, (list, tuple)):
depths = [depths] # it means the model has only one stage
if not isinstance(dims, (list, tuple)):
dims = [dims]
if not isinstance(token_mixers, (list, tuple)):
token_mixers = [token_mixers] * self.num_stages
if not isinstance(norm_layers, (list, tuple)):
norm_layers = [norm_layers] * self.num_stages
if not isinstance(layer_scale_init_values, (list, tuple)):
layer_scale_init_values = [layer_scale_init_values] * self.num_stages
if not isinstance(res_scale_init_values, (list, tuple)):
res_scale_init_values = [res_scale_init_values] * self.num_stages
self.grad_checkpointing = False
self.feature_info = []
self.stem = Stem(
in_chans,
dims[0],
norm_layer=downsample_norm
)
stages = []
prev_dim = dims[0]
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
for i in range(self.num_stages):
stages += [MetaFormerStage(
prev_dim,
dims[i],
depth=depths[i],
token_mixer=token_mixers[i],
mlp_act=mlp_act,
mlp_bias=mlp_bias,
proj_drop=proj_drop_rate,
dp_rates=dp_rates[i],
layer_scale_init_value=layer_scale_init_values[i],
res_scale_init_value=res_scale_init_values[i],
downsample_norm=downsample_norm,
norm_layer=norm_layers[i],
**kwargs,
)]
prev_dim = dims[i]
self.feature_info += [dict(num_chs=dims[i], reduction=2, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# if using MlpHead, dropout is handled by MlpHead
if num_classes > 0:
if self.use_mlp_head:
final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate)
else:
final = nn.Linear(self.num_features, num_classes)
else:
final = nn.Identity()
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', output_norm(self.num_features)),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(drop_rate) if self.use_mlp_head else nn.Identity()),
('fc', final)
]))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for stage in self.stages:
stage.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool=None):
if global_pool is not None:
self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity()
if num_classes > 0:
if self.use_mlp_head:
final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate)
else:
final = nn.Linear(self.num_features, num_classes)
else:
final = nn.Identity()
self.head.fc = final
def forward_head(self, x: Tensor, pre_logits: bool = False):
# NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :(
x = self.head.global_pool(x)
x = self.head.norm(x)
x = self.head.flatten(x)
x = self.head.drop(x)
return x if pre_logits else self.head.fc(x)
def forward_features(self, x: Tensor):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward(self, x: Tensor):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# this works but it's long and breaks backwards compatability with weights from the poolformer-only impl
def checkpoint_filter_fn(state_dict, model):
if 'stem.conv.weight' in state_dict:
return state_dict
import re
out_dict = {}
is_poolformerv1 = 'network.0.0.mlp.fc1.weight' in state_dict
model_state_dict = model.state_dict()
for k, v in state_dict.items():
if is_poolformerv1:
k = re.sub(r'layer_scale_([0-9]+)', r'layer_scale\1.scale', k)
k = k.replace('network.1', 'downsample_layers.1')
k = k.replace('network.3', 'downsample_layers.2')
k = k.replace('network.5', 'downsample_layers.3')
k = k.replace('network.2', 'network.1')
k = k.replace('network.4', 'network.2')
k = k.replace('network.6', 'network.3')
k = k.replace('network', 'stages')
k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k)
k = k.replace('downsample.proj', 'downsample.conv')
k = k.replace('patch_embed.proj', 'patch_embed.conv')
k = re.sub(r'([0-9]+).([0-9]+)', r'\1.blocks.\2', k)
k = k.replace('stages.0.downsample', 'patch_embed')
k = k.replace('patch_embed', 'stem')
k = k.replace('post_norm', 'norm')
k = k.replace('pre_norm', 'norm')
k = re.sub(r'^head', 'head.fc', k)
k = re.sub(r'^norm', 'head.norm', k)
if v.shape != model_state_dict[k] and v.numel() == model_state_dict[k].numel():
v = v.reshape(model_state_dict[k].shape)
out_dict[k] = v
return out_dict
def _create_metaformer(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (2, 2, 6, 2))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
MetaFormer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 1.0, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'classifier': 'head.fc', 'first_conv': 'stem.conv',
**kwargs
}
default_cfgs = generate_default_cfgs({
'poolformer_s12.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_s24.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.9),
'poolformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
'poolformer_m48.sail_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
'poolformerv2_s12.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_s24.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_s36.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_m36.sail_in1k': _cfg(hf_hub_id='timm/'),
'poolformerv2_m48.sail_in1k': _cfg(hf_hub_id='timm/'),
'convformer_s18.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s18.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s18.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s18.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s18.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_s36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_s36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_m36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_m36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_m36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_m36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'convformer_b36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_b36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_b36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'convformer_b36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'convformer_b36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_s18.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s18.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s18.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s18.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s18.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_s36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_s36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_s36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_m36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_m36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_m36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_m36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_m36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
'caformer_b36.sail_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_b36.sail_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_b36.sail_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2'),
'caformer_b36.sail_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)),
'caformer_b36.sail_in22k': _cfg(
hf_hub_id='timm/',
classifier='head.fc.fc2', num_classes=21841),
})
@register_model
def poolformer_s12(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[2, 2, 6, 2],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-5,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s12', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_s24(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[4, 4, 12, 4],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-5,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s24', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[64, 128, 320, 512],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[96, 192, 384, 768],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformer_m48(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[8, 8, 24, 8],
dims=[96, 192, 384, 768],
downsample_norm=None,
mlp_act=nn.GELU,
mlp_bias=True,
norm_layers=GroupNorm1,
layer_scale_init_values=1e-6,
res_scale_init_values=None,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformer_m48', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s12(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[2, 2, 6, 2],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s12', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s24(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[4, 4, 12, 4],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s24', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[64, 128, 320, 512],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_s36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[6, 6, 18, 6],
dims=[96, 192, 384, 768],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_m36', pretrained=pretrained, **model_kwargs)
@register_model
def poolformerv2_m48(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[8, 8, 24, 8],
dims=[96, 192, 384, 768],
norm_layers=GroupNorm1NoBias,
use_mlp_head=False,
**kwargs)
return _create_metaformer('poolformerv2_m48', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_s18(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 3, 9, 3],
dims=[64, 128, 320, 512],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_s18', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[64, 128, 320, 512],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[96, 192, 384, 576],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def convformer_b36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[128, 256, 512, 768],
token_mixers=SepConv,
norm_layers=LayerNorm2dNoBias,
**kwargs)
return _create_metaformer('convformer_b36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_s18(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 3, 9, 3],
dims=[64, 128, 320, 512],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_s18', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_s36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[64, 128, 320, 512],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_s36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_m36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[96, 192, 384, 576],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_m36', pretrained=pretrained, **model_kwargs)
@register_model
def caformer_b36(pretrained=False, **kwargs) -> MetaFormer:
model_kwargs = dict(
depths=[3, 12, 18, 3],
dims=[128, 256, 512, 768],
token_mixers=[SepConv, SepConv, Attention, Attention],
norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2,
**kwargs)
return _create_metaformer('caformer_b36', pretrained=pretrained, **model_kwargs)
| pytorch-image-models/timm/models/metaformer.py/0 | {
"file_path": "pytorch-image-models/timm/models/metaformer.py",
"repo_id": "pytorch-image-models",
"token_count": 17521
} | 179 |
""" ResNeSt Models
Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955
Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang
Modified for torchscript compat, and consistency with timm by Ross Wightman
"""
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SplitAttn
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .resnet import ResNet
class ResNestBottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
radix=1,
cardinality=1,
base_width=64,
avd=False,
avd_first=False,
is_first=False,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None,
):
super(ResNestBottleneck, self).__init__()
assert reduce_first == 1 # not supported
assert attn_layer is None # not supported
assert aa_layer is None # TODO not yet supported
assert drop_path is None # TODO not yet supported
group_width = int(planes * (base_width / 64.)) * cardinality
first_dilation = first_dilation or dilation
if avd and (stride > 1 or is_first):
avd_stride = stride
stride = 1
else:
avd_stride = 0
self.radix = radix
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.act1 = act_layer(inplace=True)
self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None
if self.radix >= 1:
self.conv2 = SplitAttn(
group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block)
self.bn2 = nn.Identity()
self.drop_block = nn.Identity()
self.act2 = nn.Identity()
else:
self.conv2 = nn.Conv2d(
group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, bias=False)
self.bn2 = norm_layer(group_width)
self.drop_block = drop_block() if drop_block is not None else nn.Identity()
self.act2 = act_layer(inplace=True)
self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None
self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes*4)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
def zero_init_last(self):
if getattr(self.bn3, 'weight', None) is not None:
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
if self.avd_first is not None:
out = self.avd_first(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.drop_block(out)
out = self.act2(out)
if self.avd_last is not None:
out = self.avd_last(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out += shortcut
out = self.act3(out)
return out
def _create_resnest(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet,
variant,
pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'resnest14d.gluon_in1k': _cfg(hf_hub_id='timm/'),
'resnest26d.gluon_in1k': _cfg(hf_hub_id='timm/'),
'resnest50d.in1k': _cfg(hf_hub_id='timm/'),
'resnest101e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8)),
'resnest200e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'),
'resnest269e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'),
'resnest50d_4s2x40d.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'resnest50d_1s4x24d.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic')
})
@register_model
def resnest14d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-14d model. Weights ported from GluonCV.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[1, 1, 1, 1],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest14d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest26d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-26d model. Weights ported from GluonCV.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[2, 2, 2, 2],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest26d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest50d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest101e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 23, 3],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest101e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest200e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 24, 36, 3],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest200e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest269e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 30, 48, 8],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest269e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d_4s2x40d(pretrained=False, **kwargs) -> ResNet:
"""ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2,
block_args=dict(radix=4, avd=True, avd_first=True))
return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d_1s4x24d(pretrained=False, **kwargs) -> ResNet:
"""ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4,
block_args=dict(radix=1, avd=True, avd_first=True))
return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
| pytorch-image-models/timm/models/resnest.py/0 | {
"file_path": "pytorch-image-models/timm/models/resnest.py",
"repo_id": "pytorch-image-models",
"token_count": 4439
} | 180 |
""" Visformer
Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533
From original at https://github.com/danczs/Visformer
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['Visformer']
class SpatialMlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.,
group=8,
spatial_conv=False,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = to_2tuple(drop)
self.in_features = in_features
self.out_features = out_features
self.spatial_conv = spatial_conv
if self.spatial_conv:
if group < 2: # net setting
hidden_features = in_features * 5 // 6
else:
hidden_features = in_features * 2
self.hidden_features = hidden_features
self.group = group
self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False)
self.act1 = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
if self.spatial_conv:
self.conv2 = nn.Conv2d(
hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False)
self.act2 = act_layer()
else:
self.conv2 = None
self.act2 = None
self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False)
self.drop3 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.drop1(x)
if self.conv2 is not None:
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop3(x)
return x
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = round(dim // num_heads * head_dim_ratio)
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn(experimental=True)
self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, C, H, W = x.shape
x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3)
q, k, v = x.unbind(0)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q.contiguous(), k.contiguous(), v.contiguous(),
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
head_dim_ratio=1.,
mlp_ratio=4.,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=LayerNorm2d,
group=8,
attn_disabled=False,
spatial_conv=False,
):
super().__init__()
self.spatial_conv = spatial_conv
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if attn_disabled:
self.norm1 = None
self.attn = None
else:
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
head_dim_ratio=head_dim_ratio,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.mlp = SpatialMlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
group=group,
spatial_conv=spatial_conv,
)
def forward(self, x):
if self.attn is not None:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Visformer(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
init_channels=32,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4.,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm2d,
attn_stage='111',
use_pos_embed=True,
spatial_conv='111',
vit_stem=False,
group=8,
global_pool='avg',
conv_init=False,
embed_norm=None,
):
super().__init__()
img_size = to_2tuple(img_size)
self.num_classes = num_classes
self.embed_dim = embed_dim
self.init_channels = init_channels
self.img_size = img_size
self.vit_stem = vit_stem
self.conv_init = conv_init
if isinstance(depth, (list, tuple)):
self.stage_num1, self.stage_num2, self.stage_num3 = depth
depth = sum(depth)
else:
self.stage_num1 = self.stage_num3 = depth // 3
self.stage_num2 = depth - self.stage_num1 - self.stage_num3
self.use_pos_embed = use_pos_embed
self.grad_checkpointing = False
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# stage 1
if self.vit_stem:
self.stem = None
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // patch_size for x in img_size]
else:
if self.init_channels is None:
self.stem = None
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 2,
in_chans=in_chans,
embed_dim=embed_dim // 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 2) for x in img_size]
else:
self.stem = nn.Sequential(
nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(self.init_channels),
nn.ReLU(inplace=True)
)
img_size = [x // 2 for x in img_size]
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 4,
in_chans=self.init_channels,
embed_dim=embed_dim // 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 4) for x in img_size]
if self.use_pos_embed:
if self.vit_stem:
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
else:
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
else:
self.pos_embed1 = None
self.stage1 = nn.Sequential(*[
Block(
dim=embed_dim//2,
num_heads=num_heads,
head_dim_ratio=0.5,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[0] == '0'),
spatial_conv=(spatial_conv[0] == '1'),
)
for i in range(self.stage_num1)
])
# stage2
if not self.vit_stem:
self.patch_embed2 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 8,
in_chans=embed_dim // 2,
embed_dim=embed_dim,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 8) for x in img_size]
if self.use_pos_embed:
self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
else:
self.pos_embed2 = None
else:
self.patch_embed2 = None
self.stage2 = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=num_heads,
head_dim_ratio=1.0,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[1] == '0'),
spatial_conv=(spatial_conv[1] == '1'),
)
for i in range(self.stage_num1, self.stage_num1+self.stage_num2)
])
# stage 3
if not self.vit_stem:
self.patch_embed3 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 8,
in_chans=embed_dim,
embed_dim=embed_dim * 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 8) for x in img_size]
if self.use_pos_embed:
self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size))
else:
self.pos_embed3 = None
else:
self.patch_embed3 = None
self.stage3 = nn.Sequential(*[
Block(
dim=embed_dim * 2,
num_heads=num_heads,
head_dim_ratio=1.0,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[2] == '0'),
spatial_conv=(spatial_conv[2] == '1'),
)
for i in range(self.stage_num1+self.stage_num2, depth)
])
self.num_features = embed_dim if self.vit_stem else embed_dim * 2
self.norm = norm_layer(self.num_features)
# head
global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
self.global_pool = global_pool
self.head_drop = nn.Dropout(drop_rate)
self.head = head
# weights init
if self.use_pos_embed:
trunc_normal_(self.pos_embed1, std=0.02)
if not self.vit_stem:
trunc_normal_(self.pos_embed2, std=0.02)
trunc_normal_(self.pos_embed3, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
if self.conv_init:
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
else:
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^patch_embed1|pos_embed1|stem', # stem and embed
blocks=[
(r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None),
(r'^(?:patch_embed|pos_embed)(\d+)', (0,)),
(r'^norm', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
if self.stem is not None:
x = self.stem(x)
# stage 1
x = self.patch_embed1(x)
if self.pos_embed1 is not None:
x = self.pos_drop(x + self.pos_embed1)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage1, x)
else:
x = self.stage1(x)
# stage 2
if self.patch_embed2 is not None:
x = self.patch_embed2(x)
if self.pos_embed2 is not None:
x = self.pos_drop(x + self.pos_embed2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage2, x)
else:
x = self.stage2(x)
# stage3
if self.patch_embed3 is not None:
x = self.patch_embed3(x)
if self.pos_embed3 is not None:
x = self.pos_drop(x + self.pos_embed3)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage3, x)
else:
x = self.stage3(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'),
'visformer_small.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def visformer_tiny(pretrained=False, **kwargs) -> Visformer:
model_cfg = dict(
init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8,
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
embed_norm=nn.BatchNorm2d)
model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def visformer_small(pretrained=False, **kwargs) -> Visformer:
model_cfg = dict(
init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8,
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
embed_norm=nn.BatchNorm2d)
model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
# @register_model
# def visformer_net1(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net2(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net3(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net4(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net5(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net6(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net7(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000',
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
| pytorch-image-models/timm/models/visformer.py/0 | {
"file_path": "pytorch-image-models/timm/models/visformer.py",
"repo_id": "pytorch-image-models",
"token_count": 10132
} | 181 |
""" Adan Optimizer
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
Implementation adapted from https://github.com/sail-sg/Adan
"""
import math
import torch
from torch.optim import Optimizer
class Adan(Optimizer):
"""
Implements a pytorch variant of Adan
Adan was proposed in
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float, flot], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.98, 0.92, 0.99))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): decoupled weight decay (L2 penalty) (default: 0)
no_prox (bool): how to perform the decoupled weight decay (default: False)
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.98, 0.92, 0.99),
eps=1e-8,
weight_decay=0.0,
no_prox=False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= betas[2] < 1.0:
raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, no_prox=no_prox)
super(Adan, self).__init__(params, defaults)
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.requires_grad:
state = self.state[p]
# State initialization
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
""" Performs a single optimization step.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
beta1, beta2, beta3 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
bias_correction1 = 1.0 - beta1 ** group['step']
bias_correction2 = 1.0 - beta2 ** group['step']
bias_correction3 = 1.0 - beta3 ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['pre_grad'] = grad.clone()
exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_diff'], state['exp_avg_sq']
grad_diff = grad - state['pre_grad']
exp_avg.lerp_(grad, 1. - beta1) # m_t
exp_avg_diff.lerp_(grad_diff, 1. - beta2) # diff_t (v)
update = grad + beta2 * grad_diff
exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1. - beta3) # n_t
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction3)).add_(group['eps'])
update = (exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2).div_(denom)
if group['no_prox']:
p.data.mul_(1 - group['lr'] * group['weight_decay'])
p.add_(update, alpha=-group['lr'])
else:
p.add_(update, alpha=-group['lr'])
p.data.div_(1 + group['lr'] * group['weight_decay'])
state['pre_grad'].copy_(grad)
return loss
| pytorch-image-models/timm/optim/adan.py/0 | {
"file_path": "pytorch-image-models/timm/optim/adan.py",
"repo_id": "pytorch-image-models",
"token_count": 2501
} | 182 |
""" MultiStep LR Scheduler
Basic multi step LR schedule with warmup, noise.
"""
import torch
import bisect
from timm.scheduler.scheduler import Scheduler
from typing import List
class MultiStepLRScheduler(Scheduler):
"""
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
decay_t: List[int],
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=True,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
self.decay_t = decay_t
self.decay_rate = decay_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def get_curr_decay_steps(self, t):
# find where in the array t goes,
# assumes self.decay_t is sorted
return bisect.bisect_right(self.decay_t, t + 1)
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values]
return lrs
| pytorch-image-models/timm/scheduler/multistep_lr.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/multistep_lr.py",
"repo_id": "pytorch-image-models",
"token_count": 1029
} | 183 |
""" Eval metrics and related
Hacked together by / Copyright 2020 Ross Wightman
"""
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = min(max(topk), output.size()[1])
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
| pytorch-image-models/timm/utils/metrics.py/0 | {
"file_path": "pytorch-image-models/timm/utils/metrics.py",
"repo_id": "pytorch-image-models",
"token_count": 374
} | 184 |
use std::time::{Duration, Instant};
use text_generation_client::{
Batch, CachedBatch, ClientError, NextTokenChooserParameters, Request, ShardedClient,
StoppingCriteriaParameters,
};
use tokenizers::{Tokenizer, TruncationDirection};
use tokio::sync::{broadcast, mpsc};
const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
#[derive(Debug, Clone)]
pub(crate) struct Prefill {
pub(crate) latency: Duration,
pub(crate) throughput: f64,
}
#[derive(Debug, Clone)]
pub(crate) struct Decode {
pub(crate) latency: Duration,
pub(crate) token_latency: Duration,
pub(crate) throughput: f64,
}
#[derive(Debug)]
pub(crate) enum Message {
Warmup,
Prefill(Prefill),
Decode(Decode),
EndRun,
EndBatch,
}
/// Benchmarking task
#[allow(clippy::too_many_arguments)]
pub(crate) async fn generation_task(
tokenizer: Tokenizer,
batch_size: Vec<u32>,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
parameters: NextTokenChooserParameters,
client: ShardedClient,
run_sender: mpsc::Sender<Result<Message, ClientError>>,
mut shutdown_receiver: broadcast::Receiver<()>,
_shutdown_guard_sender: mpsc::Sender<()>,
) {
// End task if a message is received on shutdown_receiver
// _shutdown_guard_sender will be dropped once the task is finished
tokio::select! {
res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender.clone()) => {
if let Err(err) = res {
run_sender.send(Err(err)).await.unwrap_or(());
}
},
_ = shutdown_receiver.recv() => {}
}
}
/// Benchmark prefill/decode
#[allow(clippy::too_many_arguments)]
async fn generate_runs(
tokenizer: Tokenizer,
batch_size: Vec<u32>,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
parameters: NextTokenChooserParameters,
mut client: ShardedClient,
run_sender: mpsc::Sender<Result<Message, ClientError>>,
) -> Result<(), ClientError> {
// Create a dummy sequence
let sequence = create_sequence(sequence_length, tokenizer);
for b in batch_size {
// Warmups on batch size
for _ in 0..warmups {
let (_, decode_batch) = prefill(
sequence.clone(),
sequence_length,
b,
decode_length,
parameters.clone(),
top_n_tokens,
&mut client,
)
.await?;
let _ = decode(decode_batch, &mut client).await?;
// Send warmup message
run_sender.send(Ok(Message::Warmup)).await.unwrap_or(());
}
for _ in 0..n_runs {
let (prefill, decode_batch) = prefill(
sequence.clone(),
sequence_length,
b,
decode_length,
parameters.clone(),
top_n_tokens,
&mut client,
)
.await?;
// Send prefill message
run_sender
.send(Ok(Message::Prefill(prefill)))
.await
.unwrap_or(());
let decode = decode(decode_batch, &mut client).await?;
// Send decode message
run_sender
.send(Ok(Message::Decode(decode)))
.await
.unwrap_or(());
// Send run ended message
run_sender.send(Ok(Message::EndRun)).await.unwrap_or(());
}
// Batch ended
run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(());
}
Ok(())
}
// Run a prefill step
async fn prefill(
sequence: String,
sequence_length: u32,
batch_size: u32,
decode_length: u32,
parameters: NextTokenChooserParameters,
top_n_tokens: Option<u32>,
client: &mut ShardedClient,
) -> Result<(Prefill, CachedBatch), ClientError> {
// Create requests
let requests = (0..batch_size)
.map(|id| Request {
id: id.into(),
prefill_logprobs: false,
inputs: sequence.clone(),
truncate: sequence_length,
parameters: Some(parameters.clone()),
stopping_parameters: Some(StoppingCriteriaParameters {
max_new_tokens: decode_length,
stop_sequences: vec![],
ignore_eos_token: true, // Will not stop even if a eos token is generated
}),
top_n_tokens: top_n_tokens.unwrap_or(0),
})
.collect();
let batch = Batch {
id: 0,
requests,
size: batch_size,
max_tokens: batch_size * (sequence_length + decode_length),
};
// Run prefill
let start_time = Instant::now();
let (_, decode_batch, _) = client.prefill(batch.clone()).await?;
// Get latency
let latency = start_time.elapsed();
// Compute throughput from latency and batch size
let throughput = batch_size as f64 / latency.as_secs_f64();
// Decode batch cannot be empty
let decode_batch = decode_batch.expect("decode_batch is None. This is a bug.");
let step = Prefill {
latency,
throughput,
};
Ok((step, decode_batch))
}
/// Run a full decode
async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> {
let mut decode_length = 0;
let batch_size = batch.size;
let start_time = Instant::now();
// Full decode over decode length
let mut next_batch = Some(batch);
while let Some(batch) = next_batch {
let result = client.decode(vec![batch]).await?;
next_batch = result.1;
decode_length += 1;
}
// Get latency
let latency = start_time.elapsed();
let token_latency = latency / decode_length;
// Compute throughput from latency, batch size and decode length
let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64();
let step = Decode {
latency,
token_latency,
throughput,
};
Ok(step)
}
/// Create a dummy sequence of the correct length
fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String {
let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len();
// Repeat lorem ipsum to cover sequence length
let string_sequence =
LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len());
// Encode sequence
let mut encoding = tokenizer.encode(string_sequence, true).unwrap();
// Truncate to sequence_length
encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left);
// Decode
tokenizer.decode(encoding.get_ids(), false).unwrap()
}
| text-generation-inference/benchmark/src/generation.rs/0 | {
"file_path": "text-generation-inference/benchmark/src/generation.rs",
"repo_id": "text-generation-inference",
"token_count": 3201
} | 185 |
import json
import requests
from aiohttp import ClientSession, ClientTimeout
from pydantic import ValidationError
from typing import Dict, Optional, List, AsyncIterator, Iterator
from text_generation.types import (
StreamResponse,
Response,
Request,
Parameters,
)
from text_generation.errors import parse_error
class Client:
"""Client to make calls to a text-generation-inference instance
Example:
```python
>>> from text_generation import Client
>>> client = Client("https://api-inference.huggingface.co/models/bigscience/bloomz")
>>> client.generate("Why is the sky blue?").generated_text
' Rayleigh scattering'
>>> result = ""
>>> for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
"""
def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = timeout
def generate(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
decoder_input_details: bool = False,
top_n_tokens: Optional[int] = None,
) -> Response:
"""
Given a prompt, generate the following text
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
Returns:
Response: generated response
"""
# Validate parameters
parameters = Parameters(
best_of=best_of,
details=True,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
decoder_input_details=decoder_input_details,
top_n_tokens=top_n_tokens,
)
request = Request(inputs=prompt, stream=False, parameters=parameters)
resp = requests.post(
self.base_url,
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
)
payload = resp.json()
if resp.status_code != 200:
raise parse_error(resp.status_code, payload)
return Response(**payload[0])
def generate_stream(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
top_n_tokens: Optional[int] = None,
) -> Iterator[StreamResponse]:
"""
Given a prompt, generate the following stream of tokens
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
Returns:
Iterator[StreamResponse]: stream of generated tokens
"""
# Validate parameters
parameters = Parameters(
best_of=None,
details=True,
decoder_input_details=False,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
)
request = Request(inputs=prompt, stream=True, parameters=parameters)
resp = requests.post(
self.base_url,
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
stream=True,
)
if resp.status_code != 200:
raise parse_error(resp.status_code, resp.json())
# Parse ServerSentEvents
for byte_payload in resp.iter_lines():
# Skip line
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
# Event data
if payload.startswith("data:"):
# Decode payload
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
# Parse payload
try:
response = StreamResponse(**json_payload)
except ValidationError:
# If we failed to parse the payload, then it is an error payload
raise parse_error(resp.status_code, json_payload)
yield response
class AsyncClient:
"""Asynchronous Client to make calls to a text-generation-inference instance
Example:
```python
>>> from text_generation import AsyncClient
>>> client = AsyncClient("https://api-inference.huggingface.co/models/bigscience/bloomz")
>>> response = await client.generate("Why is the sky blue?")
>>> response.generated_text
' Rayleigh scattering'
>>> result = ""
>>> async for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
"""
def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = ClientTimeout(timeout * 60)
async def generate(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
decoder_input_details: bool = False,
top_n_tokens: Optional[int] = None,
) -> Response:
"""
Given a prompt, generate the following text asynchronously
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
Returns:
Response: generated response
"""
# Validate parameters
parameters = Parameters(
best_of=best_of,
details=True,
decoder_input_details=decoder_input_details,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
)
request = Request(inputs=prompt, stream=False, parameters=parameters)
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(self.base_url, json=request.dict()) as resp:
payload = await resp.json()
if resp.status != 200:
raise parse_error(resp.status, payload)
return Response(**payload[0])
async def generate_stream(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
top_n_tokens: Optional[int] = None,
) -> AsyncIterator[StreamResponse]:
"""
Given a prompt, generate the following stream of tokens asynchronously
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
Returns:
AsyncIterator[StreamResponse]: stream of generated tokens
"""
# Validate parameters
parameters = Parameters(
best_of=None,
details=True,
decoder_input_details=False,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
)
request = Request(inputs=prompt, stream=True, parameters=parameters)
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(self.base_url, json=request.dict()) as resp:
if resp.status != 200:
raise parse_error(resp.status, await resp.json())
# Parse ServerSentEvents
async for byte_payload in resp.content:
# Skip line
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
# Event data
if payload.startswith("data:"):
# Decode payload
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
# Parse payload
try:
response = StreamResponse(**json_payload)
except ValidationError:
# If we failed to parse the payload, then it is an error payload
raise parse_error(resp.status, json_payload)
yield response
| text-generation-inference/clients/python/text_generation/client.py/0 | {
"file_path": "text-generation-inference/clients/python/text_generation/client.py",
"repo_id": "text-generation-inference",
"token_count": 9331
} | 186 |
# Safetensors
Safetensors is a model serialization format for deep learning models. It is [faster](https://huggingface.co/docs/safetensors/speed) and safer compared to other serialization formats like pickle (which is used under the hood in many deep learning libraries).
TGI depends on safetensors format mainly to enable [tensor parallelism sharding](./tensor_parallelism). For a given model repository during serving, TGI looks for safetensors weights. If there are no safetensors weights, TGI converts the PyTorch weights to safetensors format.
You can learn more about safetensors by reading the [safetensors documentation](https://huggingface.co/docs/safetensors/index). | text-generation-inference/docs/source/conceptual/safetensors.md/0 | {
"file_path": "text-generation-inference/docs/source/conceptual/safetensors.md",
"repo_id": "text-generation-inference",
"token_count": 185
} | 187 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": 0,
"tokens": [
{
"id": 28747,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -0.1307373,
"special": false,
"text": " Let"
},
{
"id": 332,
"logprob": -2.3359375,
"special": false,
"text": " u"
},
{
"id": 347,
"logprob": 0.0,
"special": false,
"text": " be"
},
{
"id": 325,
"logprob": -1.0234375,
"special": false,
"text": " ("
},
{
"id": 28734,
"logprob": -2.0292969,
"special": false,
"text": "0"
},
{
"id": 648,
"logprob": -1.0439453,
"special": false,
"text": " +"
},
{
"id": 28705,
"logprob": -0.24499512,
"special": false,
"text": " "
},
{
"id": 28770,
"logprob": -0.5073242,
"special": false,
"text": "3"
},
{
"id": 387,
"logprob": -1.5507812,
"special": false,
"text": " -"
}
],
"top_tokens": null
},
"generated_text": "Test request: Let u be (0 + 3 -"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 1041
} | 188 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.0859375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25927734,
"text": "_"
},
{
"id": 6009,
"logprob": -2.25,
"text": "mean"
},
{
"id": 26,
"logprob": -0.30126953,
"text": "("
},
{
"id": 62,
"logprob": -5.7539062,
"text": "L"
},
{
"id": 44,
"logprob": -3.0878906,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6845703,
"text": " List"
},
{
"id": 77,
"logprob": -0.3918457,
"text": "["
},
{
"id": 1808,
"logprob": -0.8798828,
"text": "float"
},
{
"id": 10794,
"logprob": -2.4980469,
"text": "]):"
}
],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1533203,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": -0.91796875,
"special": false,
"text": " return"
},
{
"id": 3632,
"logprob": -1.3291016,
"special": false,
"text": " sum"
},
{
"id": 26,
"logprob": -0.08062744,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.097717285,
"special": false,
"text": "L"
},
{
"id": 27,
"logprob": -0.29003906,
"special": false,
"text": ")"
},
{
"id": 517,
"logprob": -0.34958984,
"special": false,
"text": " /"
},
{
"id": 2069,
"logprob": -0.03829956,
"special": false,
"text": " len"
},
{
"id": 26,
"logprob": -0.0011987686,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.00050878525,
"special": false,
"text": "L"
}
]
},
"generated_text": "\n return sum(L) / len(L"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.0859375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25878906,
"text": "_"
},
{
"id": 6009,
"logprob": -2.2109375,
"text": "mean"
},
{
"id": 26,
"logprob": -0.30371094,
"text": "("
},
{
"id": 62,
"logprob": -5.6054688,
"text": "L"
},
{
"id": 44,
"logprob": -3.0722656,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6879883,
"text": " List"
},
{
"id": 77,
"logprob": -0.38500977,
"text": "["
},
{
"id": 1808,
"logprob": -0.984375,
"text": "float"
},
{
"id": 10794,
"logprob": -2.5351562,
"text": "]):"
}
],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1738281,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": -0.9584961,
"special": false,
"text": " return"
},
{
"id": 3632,
"logprob": -1.4169922,
"special": false,
"text": " sum"
},
{
"id": 26,
"logprob": -0.085876465,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.0982666,
"special": false,
"text": "L"
},
{
"id": 27,
"logprob": -0.3022461,
"special": false,
"text": ")"
},
{
"id": 517,
"logprob": -0.40504883,
"special": false,
"text": " /"
},
{
"id": 2069,
"logprob": -0.041656494,
"special": false,
"text": " len"
},
{
"id": 26,
"logprob": -0.0011844635,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.0005264282,
"special": false,
"text": "L"
}
]
},
"generated_text": "\n return sum(L) / len(L"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.0859375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25927734,
"text": "_"
},
{
"id": 6009,
"logprob": -2.25,
"text": "mean"
},
{
"id": 26,
"logprob": -0.30126953,
"text": "("
},
{
"id": 62,
"logprob": -5.7539062,
"text": "L"
},
{
"id": 44,
"logprob": -3.0878906,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6845703,
"text": " List"
},
{
"id": 77,
"logprob": -0.3918457,
"text": "["
},
{
"id": 1808,
"logprob": -0.8798828,
"text": "float"
},
{
"id": 10794,
"logprob": -2.4980469,
"text": "]):"
}
],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1533203,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": -0.9165039,
"special": false,
"text": " return"
},
{
"id": 3632,
"logprob": -1.328125,
"special": false,
"text": " sum"
},
{
"id": 26,
"logprob": -0.07946777,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.09820557,
"special": false,
"text": "L"
},
{
"id": 27,
"logprob": -0.28930664,
"special": false,
"text": ")"
},
{
"id": 517,
"logprob": -0.34592773,
"special": false,
"text": " /"
},
{
"id": 2069,
"logprob": -0.038330078,
"special": false,
"text": " len"
},
{
"id": 26,
"logprob": -0.0011940002,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.00050878525,
"special": false,
"text": "L"
}
]
},
"generated_text": "\n return sum(L) / len(L"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.0859375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25927734,
"text": "_"
},
{
"id": 6009,
"logprob": -2.25,
"text": "mean"
},
{
"id": 26,
"logprob": -0.30126953,
"text": "("
},
{
"id": 62,
"logprob": -5.7539062,
"text": "L"
},
{
"id": 44,
"logprob": -3.0878906,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6845703,
"text": " List"
},
{
"id": 77,
"logprob": -0.3918457,
"text": "["
},
{
"id": 1808,
"logprob": -0.8798828,
"text": "float"
},
{
"id": 10794,
"logprob": -2.4980469,
"text": "]):"
}
],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1533203,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": -0.91259766,
"special": false,
"text": " return"
},
{
"id": 3632,
"logprob": -1.3251953,
"special": false,
"text": " sum"
},
{
"id": 26,
"logprob": -0.08062744,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.09906006,
"special": false,
"text": "L"
},
{
"id": 27,
"logprob": -0.28979492,
"special": false,
"text": ")"
},
{
"id": 517,
"logprob": -0.35958984,
"special": false,
"text": " /"
},
{
"id": 2069,
"logprob": -0.038604736,
"special": false,
"text": " len"
},
{
"id": 26,
"logprob": -0.0011901855,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.0005078316,
"special": false,
"text": "L"
}
]
},
"generated_text": "\n return sum(L) / len(L"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json",
"repo_id": "text-generation-inference",
"token_count": 7433
} | 189 |
import pytest
@pytest.fixture(scope="module")
def flash_llama_awq_handle(launcher):
with launcher(
"abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq",
num_shard=1,
quantize="awq",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_awq(flash_llama_awq_handle):
await flash_llama_awq_handle.health(300)
return flash_llama_awq_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_awq(flash_llama_awq, response_snapshot):
response = await flash_llama_awq.generate(
"What is Deep Learning?", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
)
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_awq_all_params(flash_llama_awq, response_snapshot):
response = await flash_llama_awq.generate(
"What is Deep Learning?",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_awq_load(flash_llama_awq, generate_load, response_snapshot):
responses = await generate_load(
flash_llama_awq, "What is Deep Learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[
r.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
for r in responses
]
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_awq.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_awq.py",
"repo_id": "text-generation-inference",
"token_count": 866
} | 190 |
import pytest
@pytest.fixture(scope="module")
def neox_handle(launcher):
with launcher(
"stabilityai/stablelm-tuned-alpha-3b", num_shard=1, use_flash_attention=False
) as handle:
yield handle
@pytest.fixture(scope="module")
async def neox(neox_handle):
await neox_handle.health(300)
return neox_handle.client
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox(neox, response_snapshot):
response = await neox.generate(
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox_load(neox, generate_load, response_snapshot):
responses = await generate_load(
neox,
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
assert generated_texts, all(
[text == generated_texts[0] for text in generated_texts]
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_neox.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_neox.py",
"repo_id": "text-generation-inference",
"token_count": 499
} | 191 |
[package]
name = "text-generation-router"
description = "Text Generation Webserver"
build = "build.rs"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[lib]
path = "src/lib.rs"
[[bin]]
name = "text-generation-router"
path = "src/main.rs"
[dependencies]
async-stream = "0.3.5"
axum = { version = "0.6.20", features = ["json"] }
axum-tracing-opentelemetry = "0.14.1"
text-generation-client = { path = "client" }
clap = { version = "4.4.5", features = ["derive", "env"] }
futures = "0.3.28"
hf-hub = { version = "0.3.0", features = ["tokio"] }
metrics = "0.21.1"
metrics-exporter-prometheus = { version = "0.12.1", features = [] }
nohash-hasher = "0.2.0"
opentelemetry = { version = "0.20.0", features = ["rt-tokio"] }
opentelemetry-otlp = "0.13.0"
rand = "0.8.5"
reqwest = { version = "0.11.20", features = [] }
serde = "1.0.188"
serde_json = "1.0.107"
thiserror = "1.0.48"
tokenizers = { version = "0.15.1", features = ["http"] }
tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] }
tokio-stream = "0.1.14"
tower-http = { version = "0.4.4", features = ["cors"] }
tracing = "0.1.37"
tracing-opentelemetry = "0.21.0"
tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] }
utoipa = { version = "3.5.0", features = ["axum_extras"] }
utoipa-swagger-ui = { version = "3.1.5", features = ["axum"] }
ngrok = { version = "0.13.1", features = ["axum"], optional = true }
init-tracing-opentelemetry = { version = "0.14.1", features = ["opentelemetry-otlp"] }
minijinja = "1.0.10"
futures-util = "0.3.30"
[build-dependencies]
vergen = { version = "8.2.5", features = ["build", "git", "gitcl"] }
[features]
default = ["ngrok"]
ngrok = ["dep:ngrok"]
| text-generation-inference/router/Cargo.toml/0 | {
"file_path": "text-generation-inference/router/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 739
} | 192 |
/// HTTP Server logic
use crate::health::Health;
use crate::infer::{InferError, InferResponse, InferStreamResponse};
use crate::validation::ValidationError;
use crate::{
BestOfSequence, ChatCompletion, ChatCompletionChoice, ChatCompletionChunk, ChatCompletionDelta,
ChatRequest, CompatGenerateRequest, Details, ErrorResponse, FinishReason, GenerateParameters,
GenerateRequest, GenerateResponse, HubModelInfo, HubTokenizerConfig, Infer, Info, Message,
PrefillToken, SimpleToken, StreamDetails, StreamResponse, Token, TokenizeResponse, Validation,
};
use axum::extract::Extension;
use axum::http::{HeaderMap, Method, StatusCode};
use axum::response::sse::{Event, KeepAlive, Sse};
use axum::response::{IntoResponse, Response};
use axum::routing::{get, post};
use axum::{http, Json, Router};
use axum_tracing_opentelemetry::middleware::OtelAxumLayer;
use futures::stream::StreamExt;
use futures::Stream;
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
use std::convert::Infallible;
use std::net::SocketAddr;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use text_generation_client::{ShardInfo, ShardedClient};
use tokenizers::Tokenizer;
use tokio::signal;
use tokio::time::Instant;
use tower_http::cors::{AllowOrigin, CorsLayer};
use tracing::{info_span, instrument, Instrument};
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
/// Generate tokens if `stream == false` or a stream of token if `stream == true`
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/",
request_body = CompatGenerateRequest,
responses(
(status = 200, description = "Generated Text",
content(
("application/json" = GenerateResponse),
("text/event-stream" = StreamResponse),
)),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
)]
#[instrument(skip(infer, req))]
async fn compat_generate(
Extension(default_return_full_text): Extension<bool>,
infer: Extension<Infer>,
compute_type: Extension<ComputeType>,
Json(mut req): Json<CompatGenerateRequest>,
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
// default return_full_text given the pipeline_tag
if req.parameters.return_full_text.is_none() {
req.parameters.return_full_text = Some(default_return_full_text)
}
// switch on stream
if req.stream {
Ok(generate_stream(infer, compute_type, Json(req.into()))
.await
.into_response())
} else {
let (headers, Json(generation)) = generate(infer, compute_type, Json(req.into())).await?;
// wrap generation inside a Vec to match api-inference
Ok((headers, Json(vec![generation])).into_response())
}
}
/// Text Generation Inference endpoint info
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/info",
responses((status = 200, description = "Served model info", body = Info))
)]
#[instrument]
async fn get_model_info(info: Extension<Info>) -> Json<Info> {
Json(info.0)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/health",
responses(
(status = 200, description = "Everything is working fine"),
(status = 503, description = "Text generation inference is down", body = ErrorResponse,
example = json ! ({"error": "unhealthy", "error_type": "healthcheck"})),
)
)]
#[instrument(skip(health))]
/// Health check method
async fn health(mut health: Extension<Health>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
match health.check().await {
true => Ok(()),
false => Err((
StatusCode::SERVICE_UNAVAILABLE,
Json(ErrorResponse {
error: "unhealthy".to_string(),
error_type: "healthcheck".to_string(),
}),
)),
}
}
/// Generate tokens
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/generate",
request_body = GenerateRequest,
responses(
(status = 200, description = "Generated Text", body = GenerateResponse),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
)]
#[instrument(
skip_all,
fields(
parameters = ? req.parameters,
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
)]
async fn generate(
infer: Extension<Infer>,
Extension(ComputeType(compute_type)): Extension<ComputeType>,
Json(req): Json<GenerateRequest>,
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
let span = tracing::Span::current();
let start_time = Instant::now();
metrics::increment_counter!("tgi_request_count");
tracing::debug!("Input: {}", req.inputs);
let compute_characters = req.inputs.chars().count();
let mut add_prompt = None;
if req.parameters.return_full_text.unwrap_or(false) {
add_prompt = Some(req.inputs.clone());
}
let details: bool = req.parameters.details || req.parameters.decoder_input_details;
// Inference
let (response, best_of_responses) = match req.parameters.best_of {
Some(best_of) if best_of > 1 => {
let (response, best_of_responses) = infer.generate_best_of(req, best_of).await?;
(response, Some(best_of_responses))
}
_ => (infer.generate(req).await?, None),
};
// Token details
let input_length = response._input_length;
let details = match details {
true => {
// convert best_of_responses
let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
responses
.into_iter()
.map(|response: InferResponse| {
// Add prompt if return_full_text
let mut output_text = response.generated_text.text;
if let Some(prompt) = &add_prompt {
output_text = prompt.clone() + &output_text;
}
BestOfSequence {
generated_text: output_text,
finish_reason: FinishReason::from(
response.generated_text.finish_reason,
),
generated_tokens: response.generated_text.generated_tokens,
prefill: response.prefill,
tokens: response.tokens,
top_tokens: response.top_tokens,
seed: response.generated_text.seed,
}
})
.collect()
});
Some(Details {
finish_reason: FinishReason::from(response.generated_text.finish_reason),
generated_tokens: response.generated_text.generated_tokens,
prefill: response.prefill,
tokens: response.tokens,
seed: response.generated_text.seed,
best_of_sequences,
top_tokens: response.top_tokens,
})
}
false => None,
};
// Timings
let total_time = start_time.elapsed();
let validation_time = response.queued - start_time;
let queue_time = response.start - response.queued;
let inference_time = Instant::now() - response.start;
let time_per_token = inference_time / response.generated_text.generated_tokens;
// Tracing metadata
span.record("total_time", format!("{total_time:?}"));
span.record("validation_time", format!("{validation_time:?}"));
span.record("queue_time", format!("{queue_time:?}"));
span.record("inference_time", format!("{inference_time:?}"));
span.record("time_per_token", format!("{time_per_token:?}"));
span.record("seed", format!("{:?}", response.generated_text.seed));
// Headers
let mut headers = HeaderMap::new();
headers.insert("x-compute-type", compute_type.parse().unwrap());
headers.insert(
"x-compute-time",
total_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-compute-characters",
compute_characters.to_string().parse().unwrap(),
);
headers.insert(
"x-total-time",
total_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-validation-time",
validation_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-queue-time",
queue_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-inference-time",
inference_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-time-per-token",
time_per_token.as_millis().to_string().parse().unwrap(),
);
headers.insert("x-prompt-tokens", input_length.into());
headers.insert(
"x-generated-tokens",
response.generated_text.generated_tokens.into(),
);
// Metrics
metrics::increment_counter!("tgi_request_success");
metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
metrics::histogram!(
"tgi_request_validation_duration",
validation_time.as_secs_f64()
);
metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
metrics::histogram!(
"tgi_request_inference_duration",
inference_time.as_secs_f64()
);
metrics::histogram!(
"tgi_request_mean_time_per_token_duration",
time_per_token.as_secs_f64()
);
metrics::histogram!(
"tgi_request_generated_tokens",
response.generated_text.generated_tokens as f64
);
// Send response
let mut output_text = response.generated_text.text;
if let Some(prompt) = add_prompt {
output_text = prompt + &output_text;
}
tracing::debug!("Output: {}", output_text);
tracing::info!("Success");
let response = GenerateResponse {
generated_text: output_text,
details,
};
Ok((headers, Json(response)))
}
/// Generate a stream of token using Server-Sent Events
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/generate_stream",
request_body = GenerateRequest,
responses(
(status = 200, description = "Generated Text", body = StreamResponse,
content_type = "text/event-stream"),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"}),
content_type = "text/event-stream"),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"}),
content_type = "text/event-stream"),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"}),
content_type = "text/event-stream"),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"}),
content_type = "text/event-stream"),
)
)]
#[instrument(
skip_all,
fields(
parameters = ? req.parameters,
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
)]
async fn generate_stream(
Extension(infer): Extension<Infer>,
Extension(compute_type): Extension<ComputeType>,
Json(req): Json<GenerateRequest>,
) -> (
HeaderMap,
Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
let on_message_callback = |stream_token: StreamResponse| {
let event = Event::default();
event.json_data(stream_token).unwrap()
};
let (headers, response_stream) =
generate_stream_internal(infer, compute_type, Json(req), on_message_callback).await;
let sse = Sse::new(response_stream).keep_alive(KeepAlive::default());
(headers, sse)
}
async fn generate_stream_internal(
infer: Infer,
ComputeType(compute_type): ComputeType,
Json(req): Json<GenerateRequest>,
on_message_callback: impl Fn(StreamResponse) -> Event,
) -> (HeaderMap, impl Stream<Item = Result<Event, Infallible>>) {
let span = tracing::Span::current();
let start_time = Instant::now();
metrics::increment_counter!("tgi_request_count");
tracing::debug!("Input: {}", req.inputs);
let compute_characters = req.inputs.chars().count();
let mut headers = HeaderMap::new();
headers.insert("x-compute-type", compute_type.parse().unwrap());
headers.insert(
"x-compute-characters",
compute_characters.to_string().parse().unwrap(),
);
headers.insert("X-Accel-Buffering", "no".parse().unwrap());
let stream = async_stream::stream! {
// Inference
let mut end_reached = false;
let mut error = false;
let mut add_prompt = None;
if req.parameters.return_full_text.unwrap_or(false) {
add_prompt = Some(req.inputs.clone());
}
let details = req.parameters.details;
let best_of = req.parameters.best_of.unwrap_or(1);
if best_of != 1 {
let err = InferError::from(ValidationError::BestOfStream);
metrics::increment_counter!("tgi_request_failure", "err" => "validation");
tracing::error!("{err}");
yield Ok(Event::from(err));
} else if req.parameters.decoder_input_details {
let err = InferError::from(ValidationError::PrefillDetailsStream);
metrics::increment_counter!("tgi_request_failure", "err" => "validation");
tracing::error!("{err}");
yield Ok(Event::from(err));
} else {
match infer.generate_stream(req).instrument(info_span!(parent: &span, "async_stream")).await {
// Keep permit as long as generate_stream lives
Ok((_permit, _input_length, mut response_stream)) => {
let mut index = 0;
// Server-Sent Event stream
while let Some(response) = response_stream.next().await {
index += 1;
match response {
Ok(response) => {
match response {
// Prefill is ignored
InferStreamResponse::Prefill(_) => {}
// Yield event for every new token
InferStreamResponse::Intermediate{
token,
top_tokens,
} => {
tracing::debug!(parent: &span, "Token: {:?}", token);
// StreamResponse
let stream_token = StreamResponse {
index,
token,
top_tokens,
generated_text: None,
details: None,
};
let event = on_message_callback(stream_token);
yield Ok(event);
}
// Yield event for last token and compute timings
InferStreamResponse::End {
token,
generated_text,
start,
queued,
top_tokens,
} => {
// Token details
let details = match details {
true => Some(StreamDetails {
finish_reason: FinishReason::from(generated_text.finish_reason),
generated_tokens: generated_text.generated_tokens,
seed: generated_text.seed,
}),
false => None,
};
// Timings
let total_time = start_time.elapsed();
let validation_time = queued - start_time;
let queue_time = start - queued;
let inference_time = Instant::now() - start;
let time_per_token = inference_time / generated_text.generated_tokens;
// Tracing metadata
span.record("total_time", format!("{total_time:?}"));
span.record("validation_time", format!("{validation_time:?}"));
span.record("queue_time", format!("{queue_time:?}"));
span.record("inference_time", format!("{inference_time:?}"));
span.record("time_per_token", format!("{time_per_token:?}"));
span.record("seed", format!("{:?}", generated_text.seed));
// Metrics
metrics::increment_counter!("tgi_request_success");
metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);
// StreamResponse
end_reached = true;
let mut output_text = generated_text.text;
if let Some(prompt) = add_prompt {
output_text = prompt + &output_text;
}
tracing::debug!(parent: &span, "Output: {}", output_text);
tracing::info!(parent: &span, "Success");
let stream_token = StreamResponse {
index,
token,
top_tokens,
generated_text: Some(output_text),
details
};
let event = on_message_callback(stream_token);
yield Ok(event);
break;
}
}
}
// yield error
Err(err) => {
error = true;
yield Ok(Event::from(err));
break;
}
}
}
},
// yield error
Err(err) => {
error = true;
yield Ok(Event::from(err));
}
}
// Check if generation reached the end
// Skip if we already sent an error
if !end_reached && !error {
let err = InferError::IncompleteGeneration;
metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
tracing::error!("{err}");
yield Ok(Event::from(err));
}
}
};
(headers, stream)
}
/// Generate tokens
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/v1/chat/completions",
request_body = ChatRequest,
responses(
(status = 200, description = "Generated Text", body = ChatCompletionChunk),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
)]
#[instrument(
skip_all,
fields(
// parameters = ? req.parameters,
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
)]
async fn chat_completions(
Extension(infer): Extension<Infer>,
Extension(compute_type): Extension<ComputeType>,
Extension(info): Extension<Info>,
Json(req): Json<ChatRequest>,
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
metrics::increment_counter!("tgi_request_count");
let stream = req.stream;
let max_new_tokens = req.max_tokens.or(Some(100));
let repetition_penalty = req
.frequency_penalty
// rescale frequency_penalty from (-2.0, 2.0) to (0.0, 4.0)
.map(|x| x + 2.0);
let logprobs = req.logprobs.unwrap_or(false);
let seed = req.seed;
// apply chat template to flatten the request into a single input
let inputs = match infer.apply_chat_template(req.messages) {
Ok(inputs) => inputs,
Err(err) => {
metrics::increment_counter!("tgi_request_failure", "err" => "validation");
tracing::error!("{err}");
return Err((
StatusCode::UNPROCESSABLE_ENTITY,
Json(ErrorResponse {
error: err.to_string(),
error_type: err.error_type().to_string(),
}),
));
}
};
// build the request passing some parameters
let generate_request = GenerateRequest {
inputs: inputs.to_string(),
parameters: GenerateParameters {
best_of: None,
temperature: req.temperature,
repetition_penalty,
top_k: None,
top_p: req.top_p,
typical_p: None,
do_sample: true,
max_new_tokens,
return_full_text: None,
stop: Vec::new(),
truncate: None,
watermark: false,
details: true,
decoder_input_details: !stream,
seed,
top_n_tokens: None,
},
};
// static values that will be returned in all cases
let model_id = info.model_id.clone();
let system_fingerprint = format!("{}-{}", info.version, info.docker_label.unwrap_or("native"));
// switch on stream
if stream {
// pass this callback to the stream generation and build the required event structure
let on_message_callback = move |stream_token: StreamResponse| {
let event = Event::default();
let current_time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_else(|_| std::time::Duration::from_secs(0))
.as_secs();
event
.json_data(ChatCompletionChunk::new(
model_id.clone(),
system_fingerprint.clone(),
stream_token.token.text,
current_time,
stream_token.index,
logprobs.then_some(stream_token.token.logprob),
stream_token.details.map(|d| d.finish_reason.to_string()),
))
.map_or_else(
|e| {
println!("Failed to serialize ChatCompletionChunk: {:?}", e);
Event::default()
},
|data| data,
)
};
let (headers, response_stream) = generate_stream_internal(
infer,
compute_type,
Json(generate_request),
on_message_callback,
)
.await;
let sse = Sse::new(response_stream).keep_alive(KeepAlive::default());
Ok((headers, sse).into_response())
} else {
let (headers, Json(generation)) = generate(
Extension(infer),
Extension(compute_type),
Json(generate_request),
)
.await?;
let current_time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_else(|_| std::time::Duration::from_secs(0))
.as_secs();
// build the complete response object with the full text
let response = ChatCompletion::new(
model_id,
system_fingerprint,
generation.generated_text,
current_time,
generation.details.unwrap(),
logprobs,
);
// wrap generation inside a Vec to match api-inference
Ok((headers, Json(response)).into_response())
}
}
/// Tokenize inputs
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/tokenize",
request_body = GenerateRequest,
responses(
(status = 200, description = "Tokenized ids", body = TokenizeResponse),
(status = 404, description = "No tokenizer found", body = ErrorResponse,
example = json ! ({"error": "No fast tokenizer available"})),
)
)]
#[instrument(skip_all)]
async fn tokenize(
Extension(infer): Extension<Infer>,
Json(req): Json<GenerateRequest>,
) -> Result<Json<TokenizeResponse>, (StatusCode, Json<ErrorResponse>)> {
let input = req.inputs.clone();
let encoding = infer.tokenize(req).await?;
if let Some(encoding) = encoding {
let tokens: Vec<SimpleToken> = encoding
.get_ids()
.iter()
.zip(encoding.get_offsets())
.map(|(&id, &(start, stop))| {
let text: String = input.chars().skip(start).take(stop - start).collect();
SimpleToken {
id,
text,
start,
stop,
}
})
.collect();
Ok(Json(TokenizeResponse(tokens)))
} else {
Err((
StatusCode::NOT_FOUND,
Json(ErrorResponse {
error: "No fast tokenizer or tokenizer.json for this model".to_string(),
error_type: "no fast tokenizer".to_string(),
}),
))
}
}
/// Prometheus metrics scrape endpoint
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/metrics",
responses((status = 200, description = "Prometheus Metrics", body = String))
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
prom_handle.render()
}
#[derive(Clone, Debug)]
pub(crate) struct ComputeType(String);
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
model_info: HubModelInfo,
shard_info: ShardInfo,
compat_return_full_text: bool,
max_concurrent_requests: usize,
max_best_of: usize,
max_stop_sequences: usize,
max_top_n_tokens: u32,
max_input_length: usize,
max_total_tokens: usize,
waiting_served_ratio: f32,
max_batch_prefill_tokens: u32,
max_batch_total_tokens: u32,
max_waiting_tokens: usize,
client: ShardedClient,
tokenizer: Option<Tokenizer>,
validation_workers: usize,
addr: SocketAddr,
allow_origin: Option<AllowOrigin>,
ngrok: bool,
ngrok_authtoken: Option<String>,
ngrok_edge: Option<String>,
tokenizer_config: HubTokenizerConfig,
messages_api_enabled: bool,
) -> Result<(), axum::BoxError> {
// OpenAPI documentation
#[derive(OpenApi)]
#[openapi(
paths(
health,
get_model_info,
compat_generate,
generate,
generate_stream,
chat_completions,
tokenize,
metrics,
),
components(
schemas(
Info,
CompatGenerateRequest,
GenerateRequest,
ChatRequest,
Message,
ChatCompletionChoice,
ChatCompletionDelta,
ChatCompletionChunk,
ChatCompletion,
GenerateParameters,
PrefillToken,
Token,
GenerateResponse,
TokenizeResponse,
SimpleToken,
BestOfSequence,
Details,
FinishReason,
StreamResponse,
StreamDetails,
ErrorResponse,
)
),
tags(
(name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
),
info(
title = "Text Generation Inference",
license(
name = "Apache 2.0",
url = "https://www.apache.org/licenses/LICENSE-2.0"
)
)
)]
struct ApiDoc;
// Create state
let validation = Validation::new(
validation_workers,
tokenizer,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_length,
max_total_tokens,
);
let generation_health = Arc::new(AtomicBool::new(false));
let health_ext = Health::new(client.clone(), generation_health.clone());
let infer = Infer::new(
client,
validation,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
max_concurrent_requests,
shard_info.requires_padding,
shard_info.window_size,
shard_info.speculate,
generation_health,
tokenizer_config,
);
// Duration buckets
let duration_matcher = Matcher::Suffix(String::from("duration"));
let n_duration_buckets = 35;
let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
// Minimum duration in seconds
let mut value = 0.0001;
for _ in 0..n_duration_buckets {
// geometric sequence
value *= 1.5;
duration_buckets.push(value);
}
// Input Length buckets
let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
let input_length_buckets: Vec<f64> = (0..100)
.map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64)
.collect();
// Generated tokens buckets
let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
let generated_tokens_buckets: Vec<f64> = (0..100)
.map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
.collect();
// Input Length buckets
let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
let max_new_tokens_buckets: Vec<f64> = (0..100)
.map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
.collect();
// Batch size buckets
let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect();
// Speculated tokens buckets
let skipped_matcher = Matcher::Full(String::from("tgi_request_skipped_tokens"));
let skipped_buckets: Vec<f64> = (0..shard_info.speculate + 1).map(|x| x as f64).collect();
// Prometheus handler
let builder = PrometheusBuilder::new()
.set_buckets_for_metric(duration_matcher, &duration_buckets)
.unwrap()
.set_buckets_for_metric(input_length_matcher, &input_length_buckets)
.unwrap()
.set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
.unwrap()
.set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
.unwrap()
.set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
.unwrap()
.set_buckets_for_metric(skipped_matcher, &skipped_buckets)
.unwrap();
let prom_handle = builder
.install_recorder()
.expect("failed to install metrics recorder");
// CORS layer
let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
let cors_layer = CorsLayer::new()
.allow_methods([Method::GET, Method::POST])
.allow_headers([http::header::CONTENT_TYPE])
.allow_origin(allow_origin);
// Endpoint info
let info = Info {
model_id: model_info.model_id,
model_sha: model_info.sha,
model_dtype: shard_info.dtype,
model_device_type: shard_info.device_type,
model_pipeline_tag: model_info.pipeline_tag,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_input_length,
max_total_tokens,
waiting_served_ratio,
max_batch_total_tokens,
max_waiting_tokens,
validation_workers,
version: env!("CARGO_PKG_VERSION"),
sha: option_env!("VERGEN_GIT_SHA"),
docker_label: option_env!("DOCKER_LABEL"),
};
// Configure Swagger UI
let swagger_ui = SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi());
// Define base and health routes
let base_routes = Router::new()
.route("/", post(compat_generate))
.route("/", get(health))
.route("/info", get(get_model_info))
.route("/generate", post(generate))
.route("/generate_stream", post(generate_stream))
.route("/v1/chat/completions", post(chat_completions))
.route("/tokenize", post(tokenize))
.route("/health", get(health))
.route("/ping", get(health))
.route("/metrics", get(metrics));
// Conditional AWS Sagemaker route
let aws_sagemaker_route = if messages_api_enabled {
Router::new().route("/invocations", post(chat_completions)) // Use 'chat_completions' for OAI_ENABLED
} else {
Router::new().route("/invocations", post(compat_generate)) // Use 'compat_generate' otherwise
};
let compute_type =
ComputeType(std::env::var("COMPUTE_TYPE").unwrap_or("gpu+optimized".to_string()));
// Combine routes and layers
let app = Router::new()
.merge(swagger_ui)
.merge(base_routes)
.merge(aws_sagemaker_route)
.layer(Extension(info))
.layer(Extension(health_ext.clone()))
.layer(Extension(compat_return_full_text))
.layer(Extension(infer))
.layer(Extension(compute_type))
.layer(Extension(prom_handle.clone()))
.layer(OtelAxumLayer::default())
.layer(cors_layer);
if ngrok {
#[cfg(feature = "ngrok")]
{
use ngrok::config::TunnelBuilder;
let _ = addr;
let authtoken =
ngrok_authtoken.expect("`ngrok-authtoken` must be set when using ngrok tunneling");
let edge = ngrok_edge.expect("`ngrok-edge` must be set when using ngrok tunneling");
let tunnel = ngrok::Session::builder()
.authtoken(authtoken)
.connect()
.await
.unwrap()
.labeled_tunnel()
.label("edge", edge);
let listener = tunnel.listen().await.unwrap();
// Run prom metrics and health locally too
tokio::spawn(
axum::Server::bind(&addr)
.serve(
Router::new()
.route("/health", get(health))
.route("/metrics", get(metrics))
.layer(Extension(health_ext))
.layer(Extension(prom_handle))
.into_make_service(),
)
//Wait until all requests are finished to shut down
.with_graceful_shutdown(shutdown_signal()),
);
// Run server
axum::Server::builder(listener)
.serve(app.into_make_service())
//Wait until all requests are finished to shut down
.with_graceful_shutdown(shutdown_signal())
.await?;
}
#[cfg(not(feature = "ngrok"))]
{
let _ngrok_authtoken = ngrok_authtoken;
let _ngrok_domain = ngrok_domain;
let _ngrok_username = ngrok_username;
let _ngrok_password = ngrok_password;
panic!("`text-generation-router` was compiled without the `ngrok` feature");
}
} else {
// Run server
axum::Server::bind(&addr)
.serve(app.into_make_service())
// Wait until all requests are finished to shut down
.with_graceful_shutdown(shutdown_signal())
.await?;
}
Ok(())
}
/// Shutdown signal handler
async fn shutdown_signal() {
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => {},
_ = terminate => {},
}
tracing::info!("signal received, starting graceful shutdown");
opentelemetry::global::shutdown_tracer_provider();
}
impl From<i32> for FinishReason {
fn from(finish_reason: i32) -> Self {
let finish_reason = text_generation_client::FinishReason::try_from(finish_reason).unwrap();
match finish_reason {
text_generation_client::FinishReason::Length => FinishReason::Length,
text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
}
}
}
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
fn from(err: InferError) -> Self {
let status_code = match err {
InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
InferError::TemplateError(_) => StatusCode::UNPROCESSABLE_ENTITY,
};
(
status_code,
Json(ErrorResponse {
error: err.to_string(),
error_type: err.error_type().to_string(),
}),
)
}
}
impl From<InferError> for Event {
fn from(err: InferError) -> Self {
Event::default()
.json_data(ErrorResponse {
error: err.to_string(),
error_type: err.error_type().to_string(),
})
.unwrap()
}
}
| text-generation-inference/router/src/server.rs/0 | {
"file_path": "text-generation-inference/router/src/server.rs",
"repo_id": "text-generation-inference",
"token_count": 19443
} | 193 |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#define _cuda_buffers_cu
#include "cuda_buffers.cuh"
CudaBuffers* g_buffers[CUDA_MAX_DEVICES] = {NULL};
// __constant__ half2 q4_table[16][256];
// half2 q4_table_host[16][256];
// bool q4_table_init = false;
CudaBuffers::CudaBuffers
(
int _device,
half* _temp_state,
half* _temp_dq
) :
device(_device),
temp_state(_temp_state),
temp_dq(_temp_dq)
{
cudaSetDevice(_device);
cudaStreamCreate(&alt_stream_1);
cudaStreamCreate(&alt_stream_2);
cudaStreamCreate(&alt_stream_3);
cudaEventCreate(&alt_stream_1_done);
cudaEventCreate(&alt_stream_2_done);
cudaEventCreate(&alt_stream_3_done);
}
CudaBuffers::~CudaBuffers()
{
cudaStreamDestroy(alt_stream_1);
cudaStreamDestroy(alt_stream_2);
cudaStreamDestroy(alt_stream_3);
cudaEventDestroy(alt_stream_1_done);
cudaEventDestroy(alt_stream_2_done);
cudaEventDestroy(alt_stream_3_done);
}
CudaBuffers* get_buffers(const int device_index)
{
return g_buffers[device_index];
}
void prepare_buffers_cuda
(
int _device,
half* _temp_state,
half* _temp_dq
)
{
CudaBuffers* buffers = new CudaBuffers
(
_device,
_temp_state,
_temp_dq
);
g_buffers[_device] = buffers;
}
void cleanup_buffers_cuda()
{
for (int i = 0; i < CUDA_MAX_DEVICES; i++)
{
if (!g_buffers[i]) continue;
delete g_buffers[i];
g_buffers[i] = NULL;
}
}
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cu/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cu",
"repo_id": "text-generation-inference",
"token_count": 680
} | 194 |
#include <torch/extension.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#include "config.h"
#include "cuda/q_matrix.cuh"
#include "cuda/q_gemm.cuh"
#include "cpp/util.h"
// Some decluttering macros
#define TORCH_CHECK_DTYPE(__x, __dtype) TORCH_CHECK((__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype)
#define TORCH_CHECK_DTYPE_OPT(__x, __dtype) TORCH_CHECK((__x).device().is_meta() || (__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype)
#define TORCH_CHECK_SHAPES(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes")
#define TORCH_CHECK_SHAPES_OPT(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).device().is_meta() || (__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes")
// Quant matrix
uintptr_t make_q_matrix
(
torch::Tensor q_weight,
torch::Tensor q_perm,
torch::Tensor q_invperm,
torch::Tensor q_scale,
torch::Tensor q_scale_max,
torch::Tensor q_groups,
torch::Tensor q_group_map,
torch::Tensor gptq_qzeros,
torch::Tensor gptq_scales,
torch::Tensor gptq_g_idx,
torch::Tensor temp_dq
)
{
TORCH_CHECK_DTYPE(q_weight, kInt);
TORCH_CHECK_DTYPE_OPT(q_perm, kShort);
TORCH_CHECK_DTYPE_OPT(q_invperm, kShort);
TORCH_CHECK_DTYPE_OPT(q_scale, kInt);
TORCH_CHECK_DTYPE_OPT(q_scale_max, kHalf);
TORCH_CHECK_DTYPE_OPT(q_groups, kShort);
TORCH_CHECK_DTYPE_OPT(q_group_map, kShort);
TORCH_CHECK_DTYPE_OPT(gptq_qzeros, kInt);
TORCH_CHECK_DTYPE_OPT(gptq_scales, kHalf);
TORCH_CHECK_DTYPE_OPT(gptq_g_idx, kInt);
TORCH_CHECK_SHAPES(q_perm, 0, q_invperm, 0, 1);
int device = q_weight.device().index();
int width = q_weight.size(1);
int groups;
int height;
if (!q_scale.device().is_meta())
{
TORCH_CHECK_SHAPES(q_weight, 1, q_scale, 1, 8);
TORCH_CHECK_SHAPES(q_scale_max, 0, q_scale, 0, 1);
groups = q_scale.size(0);
height = q_invperm.size(0);
}
else
{
TORCH_CHECK_SHAPES(q_weight, 1, gptq_qzeros, 1, 8);
TORCH_CHECK_SHAPES(q_weight, 1, gptq_scales, 1, 1);
groups = gptq_qzeros.size(0);
height = q_weight.size(0) * 8;
}
TORCH_CHECK(temp_dq.size(0) >= width * height, "Insufficient size of temp_dq buffer")
QMatrix* m = new QMatrix
(
device,
height,
width,
groups,
(uint32_t*) q_weight.data_ptr(),
q_perm.device().is_meta() ? NULL : (uint16_t*) q_perm.data_ptr(),
q_invperm.device().is_meta() ? NULL : (uint16_t*) q_invperm.data_ptr(),
q_scale.device().is_meta() ? NULL : (uint32_t*) q_scale.data_ptr(),
q_scale_max.device().is_meta() ? NULL : (half*) q_scale_max.data_ptr(),
q_groups.device().is_meta() ? NULL : (uint16_t*) q_groups.data_ptr(),
q_group_map.device().is_meta() ? NULL : (uint16_t*) q_group_map.data_ptr(),
gptq_qzeros.device().is_meta() ? NULL : (uint32_t*) gptq_qzeros.data_ptr(),
gptq_scales.device().is_meta() ? NULL : (half*) gptq_scales.data_ptr(),
gptq_g_idx.device().is_meta() ? NULL : (uint32_t*) gptq_g_idx.data_ptr(),
(half*) temp_dq.data_ptr()
);
if (m->failed) throw std::runtime_error("CUDA out of memory");
return reinterpret_cast<uintptr_t> (m);
}
void gemm_half_q_half
(
torch::Tensor a,
uintptr_t b,
torch::Tensor c,
bool force_cuda
)
{
QMatrix* qm = reinterpret_cast<QMatrix*> (b);
TORCH_CHECK_DTYPE(a, kHalf);
TORCH_CHECK_DTYPE(c, kHalf);
TORCH_CHECK_SHAPES(a, 0, c, 0, 1);
TORCH_CHECK(qm->height == a.size(1), "a and b have incompatible shapes")
TORCH_CHECK(qm->width == c.size(1), "b and c have incompatible shapes")
const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
gemm_half_q_half_cuda
(
at::cuda::getCurrentCUDABlasHandle(),
(const half*) a.data_ptr(),
qm,
(half*) c.data_ptr(),
c.size(0), // m
c.size(1), // n
a.size(1), // k
true,
NULL,
force_cuda
);
}
// Bindings
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
m.def("make_q_matrix", &make_q_matrix, "make_q_matrix");
m.def("gemm_half_q_half", &gemm_half_q_half, "gemm_half_q_half");
}
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/ext.cpp/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/ext.cpp",
"repo_id": "text-generation-inference",
"token_count": 2184
} | 195 |
import torch
from text_generation_server.utils.tokens import (
StopSequenceCriteria,
StoppingCriteria,
FinishReason,
batch_top_tokens,
)
def test_stop_sequence_criteria():
criteria = StopSequenceCriteria("/test;")
assert not criteria("/")
assert not criteria("/test")
assert criteria("/test;")
assert not criteria("/test; ")
def test_stop_sequence_criteria_escape():
criteria = StopSequenceCriteria("<|stop|>")
assert not criteria("<")
assert not criteria("<|stop")
assert criteria("<|stop|>")
assert not criteria("<|stop|> ")
def test_stopping_criteria():
criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5)
assert criteria(65827, "/test") == (False, None)
assert criteria(30, ";") == (True, FinishReason.FINISH_REASON_STOP_SEQUENCE)
def test_stopping_criteria_eos():
criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5)
assert criteria(1, "") == (False, None)
assert criteria(0, "") == (True, FinishReason.FINISH_REASON_EOS_TOKEN)
def test_stopping_criteria_max():
criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5)
assert criteria(1, "") == (False, None)
assert criteria(1, "") == (False, None)
assert criteria(1, "") == (False, None)
assert criteria(1, "") == (False, None)
assert criteria(1, "") == (True, FinishReason.FINISH_REASON_LENGTH)
def test_batch_top_tokens():
top_n_tokens = [0, 2, 3, 4, 5]
top_n_tokens_tensor = torch.tensor(top_n_tokens)
inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5)
accepted_ids = torch.ones_like(top_n_tokens_tensor)
topn_tok_ids, topn_tok_logprobs = batch_top_tokens(
top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids
)
assert topn_tok_ids[0] == [[]]
assert topn_tok_ids[1] == [[0, 3]]
assert topn_tok_ids[2] == [[0, 3, 1, 4]]
assert topn_tok_ids[3] == [[0, 3, 1, 4]]
assert topn_tok_ids[4] == [[0, 3, 1, 4, 2]]
assert topn_tok_logprobs[0] == [[]]
assert topn_tok_logprobs[1] == [[-1, -2]]
assert topn_tok_logprobs[2] == [[-1, -2, -3, -3]]
assert topn_tok_logprobs[3] == [[-1, -2, -3, -3]]
assert topn_tok_logprobs[4] == [[-1, -2, -3, -3, -4]]
# Now let's make second member of the batch be speculated
inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5 * 2)
accepted_ids[1] = 2
topn_tok_ids, topn_tok_logprobs = batch_top_tokens(
top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids
)
assert topn_tok_ids[0] == [[]]
assert topn_tok_ids[1] == [[0, 3], [0, 3]]
assert topn_tok_ids[2] == [[0, 3, 1, 4]]
assert topn_tok_ids[3] == [[0, 3, 1, 4]]
assert topn_tok_ids[4] == [[0, 3, 1, 4, 2]]
assert topn_tok_logprobs[0] == [[]]
assert topn_tok_logprobs[1] == [[-1, -2], [-1, -2]]
assert topn_tok_logprobs[2] == [[-1, -2, -3, -3]]
assert topn_tok_logprobs[3] == [[-1, -2, -3, -3]]
assert topn_tok_logprobs[4] == [[-1, -2, -3, -3, -4]]
| text-generation-inference/server/tests/utils/test_tokens.py/0 | {
"file_path": "text-generation-inference/server/tests/utils/test_tokens.py",
"repo_id": "text-generation-inference",
"token_count": 1428
} | 196 |
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
from text_generation_server.utils import paged_attention, flash_attn
from text_generation_server.utils.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
PositionRotaryEmbedding,
TensorParallelHead,
get_linear,
FastLayerNorm,
)
class PhiConfig(PretrainedConfig):
def __init__(
self,
vocab_size=51200,
hidden_size=2560,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=32,
hidden_act="gelu_fast", # llama uses silu
layer_norm_eps=1e-05, # rms in llama,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
rope_theta=10000.0,
resid_pdrop=0.1, # llama doesn't have this
partial_rotary_factor=0.5, # important difference between llama and phi
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.rope_theta = rope_theta
self.resid_pdrop = resid_pdrop
self.partial_rotary_factor = partial_rotary_factor
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# this is the same as llama except for Phi uses bias=True
def load_attention(config, prefix, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=True,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
quantize=config.quantize,
dim=0,
)
if config.quantize not in ["gptq", "awq"]:
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
# this is the same as llama except for Phi uses bias=True
return TensorParallelColumnLinear(
get_linear(weight, bias=True, quantize=config.quantize)
)
class FlashPhiAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.softmax_scale = self.head_size**-0.5
self.rotary_dim = int(config.partial_rotary_factor * self.head_size)
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.rotary_dim,
base=config.rope_theta,
device=weights.device,
)
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
# in llama the dense layer is called "o_proj" and has bias=False
self.dense = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.dense",
weights=weights,
bias=True,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
input_lengths,
max_s,
):
# Compute query, key, value and split
qkv = self.query_key_value(hidden_states)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
# Reshape query and key for rotary embeddings
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
# NOTE: this is the main difference between Llama and Phi
# in llama the rotary embeddings are applied to the whole query and key.
# Phi uses PARTIAL rotary embeddings, which are applied to the first 32 dimensions
#
# Apply partial positional embeddings in place
self.rotary_emb(
query[:, :, : self.rotary_dim], kv[:, 0, :, : self.rotary_dim], cos, sin
)
# Reshape key and value and cache
paged_attention.reshape_and_cache(
kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots
)
# output tensor
attn_output = torch.empty_like(query)
# Prefill
if cu_seqlen_prefill is not None:
flash_attn.attention(
query,
torch.select(kv, dim=1, index=0),
torch.select(kv, dim=1, index=1),
attn_output,
cu_seqlen_prefill,
max_s,
self.softmax_scale,
)
# Decode
else:
paged_attention.attention(
attn_output,
query,
kv_cache[0],
kv_cache[1],
self.kv_head_mapping,
self.softmax_scale,
block_tables,
input_lengths,
max_s,
)
return self.dense(attn_output.view(-1, self.num_heads * self.head_size))
class PhiMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate="tanh"
if act in ["gelu_fast", "gelu_pytorch_tanh"]
else "none",
)
)
# llama weights are up_proj and down_proj and bias=False
self.up_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.fc1",
weights=weights,
bias=True,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.fc2",
weights=weights,
bias=True,
)
def forward(self, hidden_states):
# NOTE: Llama requires the gate up states to an intermediate size
# Phi does not and we can avoid the `view` operation
return self.down_proj(self.act(self.up_proj(hidden_states)))
class FlashPhiLayer(nn.Module):
def __init__(self, layer_id, config, weights):
super().__init__()
prefix = f"model.layers.{layer_id}"
self.self_attn = FlashPhiAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
self.input_layernorm = FastLayerNorm.load(
prefix=f"{prefix}.input_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
self.resid_dropout = torch.nn.Dropout(config.resid_pdrop)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
input_lengths,
max_s,
):
hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
input_lengths,
max_s,
)
hidden_states = self.resid_dropout(attn_output).add(
self.resid_dropout(self.mlp(hidden_states))
)
return hidden_states, res
class FlashPhiModel(torch.nn.Module):
def __init__(self, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.embed_tokens = TensorParallelEmbedding(
prefix="model.embed_tokens", weights=weights
)
self.layers = nn.ModuleList(
[
FlashPhiLayer(
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
self.norm = FastLayerNorm.load(
prefix="model.final_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
input_lengths: torch.Tensor,
max_s: int,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
input_lengths,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashPhiForCausalLM(torch.nn.Module):
def __init__(self, config, weights):
super().__init__()
self.model = FlashPhiModel(config, weights)
self.lm_head = TensorParallelHead.load(
config,
prefix="lm_head",
weights=weights,
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
input_lengths: torch.Tensor,
max_s: int,
lm_head_indices: Optional[torch.Tensor] = None,
) -> torch.Tensor:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
input_lengths,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
return self.lm_head(hidden_states)
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 6560
} | 197 |
import math
import torch
import torch.distributed
import numpy as np
from dataclasses import dataclass
from opentelemetry import trace
from transformers import PreTrainedTokenizerBase
from transformers.models.llama import LlamaTokenizerFast
from typing import Optional, Tuple, Type, List
from text_generation_server.pb import generate_pb2
from text_generation_server.models import FlashCausalLM
from text_generation_server.models.flash_causal_lm import FlashCausalLMBatch, BLOCK_SIZE
from text_generation_server.models.cache_manager import (
get_cache_manager,
)
from text_generation_server.models.custom_modeling.flash_mistral_modeling import (
FlashMistralForCausalLM,
MistralConfig,
)
from text_generation_server.utils.speculate import get_speculate
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
HeterogeneousNextTokenChooser,
StoppingCriteria,
)
tracer = trace.get_tracer(__name__)
# Will be set in init
SLIDING_WINDOW: Optional[int] = None
SLIDING_WINDOW_BLOCKS: Optional[int] = None
# Adds windowing logic to FlashCausalLMBatch
@dataclass
class FlashMistralBatch(FlashCausalLMBatch):
# Prefill cache indices is used to slice into the kv tensor before caching it into the paged attention buffers
# as we only keep SLIDING_WINDOW values instead of the whole tensor
prefill_cache_indices: Optional[torch.Tensor] = None
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "FlashCausalLMBatch":
global SLIDING_WINDOW
global SLIDING_WINDOW_BLOCKS
batch_inputs = []
max_truncation = 0
for r in pb.requests:
batch_inputs.append(r.inputs)
max_truncation = max(max_truncation, r.truncate)
batch_tokenized_inputs = tokenizer(
batch_inputs, truncation=True, max_length=max_truncation
)["input_ids"]
position_ids = []
cu_seqlen_prefill = [0]
needed_blocks_slots = []
start_slots = []
slot_indices = []
prefill_cache_indices = []
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
requests_idx_mapping = {}
all_prefill_logprobs = True
no_prefill_logprobs = True
prefill_head_indices = []
prefill_next_token_indices = []
prefill_cu_outlens = [0]
next_token_chooser_parameters = []
stopping_criterias = []
top_n_tokens = []
# Cumulative length
cumulative_length = 0
cumulative_max_length = 0
prefill_out_cumulative_length = 0
blocks = 0
max_seqlen = 0
max_length = 0
max_blocks = 0
# Parse batch
for i, (r, tokenized_input) in enumerate(
zip(pb.requests, batch_tokenized_inputs)
):
# request id -> idx in list mapping
requests_idx_mapping[r.id] = i
tokenized_input = tokenized_input[-r.truncate :]
input_length = len(tokenized_input)
input_lengths.append(input_length)
prefix_offsets.append(input_length - 5)
read_offsets.append(input_length)
all_input_ids.append(tokenized_input)
# Position ids
request_position_ids = torch.arange(0, input_length, dtype=torch.int32)
position_ids.append(request_position_ids)
# Add cumulative lengths of all previous inputs
cu_seqlen_prefill.append(cumulative_length + input_length)
next_token_chooser_parameters.append(r.parameters)
stopping_criteria = StoppingCriteria.from_pb(
r.stopping_parameters, tokenizer
)
max_new_tokens = stopping_criteria.max_new_tokens
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(r.top_n_tokens)
# Paged attention
# Remove one as the first token des not have a past
speculative_length = get_speculate()
total_tokens = input_length + max_new_tokens - 1 + speculative_length
# Needed blocks can not go over SLIDING_WINDOW_BLOCKS
needed_blocks = math.ceil(total_tokens / BLOCK_SIZE)
if SLIDING_WINDOW_BLOCKS is not None:
needed_blocks = min(needed_blocks, SLIDING_WINDOW_BLOCKS)
blocks += needed_blocks
needed_blocks_slots.append((needed_blocks, total_tokens))
start_slots.append(cumulative_max_length)
request_slot_indices = torch.arange(
cumulative_max_length,
cumulative_max_length + input_length,
dtype=torch.int64,
)
slot_indices.append(request_slot_indices)
# Create tensor to slice into the kv tensor in prefill
if SLIDING_WINDOW is not None:
request_prefill_cache_indices = torch.arange(
cumulative_length + max(0, input_length - SLIDING_WINDOW),
cumulative_length + input_length,
dtype=torch.int64,
)
prefill_cache_indices.append(request_prefill_cache_indices)
all_prefill_logprobs = all_prefill_logprobs and r.prefill_logprobs
no_prefill_logprobs = no_prefill_logprobs and not r.prefill_logprobs
if r.prefill_logprobs:
prefill_head_indices.append(request_position_ids + cumulative_length)
prefill_next_token_indices.append(
prefill_out_cumulative_length + input_length - 1
)
prefill_cu_outlens.append(prefill_out_cumulative_length + input_length)
prefill_out_cumulative_length += input_length
else:
prefill_head_indices.append(
torch.tensor(
[cumulative_length + input_length - 1], dtype=torch.int32
)
)
prefill_next_token_indices.append(prefill_out_cumulative_length)
prefill_cu_outlens.append(prefill_out_cumulative_length + 1)
prefill_out_cumulative_length += 1
# Update
cumulative_length += input_length
cumulative_max_length += total_tokens
max_seqlen = max(max_seqlen, input_length)
max_blocks = max(max_blocks, needed_blocks)
max_length = max(
max_length, input_length + max_new_tokens + speculative_length
)
next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
next_token_chooser_parameters, dtype, device
)
start_slots = torch.tensor(start_slots, dtype=torch.int64)
# Padded all_input_ids_tensor
all_input_ids_tensor = np.zeros(
(len(all_input_ids), max_length), dtype=np.int64
)
for i, input_ids in enumerate(all_input_ids):
all_input_ids_tensor[i, : len(input_ids)] = input_ids
# Create tensors on device
all_input_ids_tensor = torch.tensor(
all_input_ids_tensor, dtype=torch.int64, device=device
)
if len(pb.requests) > 1:
input_ids = np.concatenate(all_input_ids, dtype=np.int64)
position_ids = torch.cat(position_ids)
slot_indices = torch.cat(slot_indices)
if SLIDING_WINDOW is not None:
prefill_cache_indices = torch.cat(prefill_cache_indices)
else:
input_ids = all_input_ids[0]
position_ids = position_ids[0]
slot_indices = slot_indices[0]
if SLIDING_WINDOW is not None:
prefill_cache_indices = prefill_cache_indices[0]
cu_seqlen_prefill = torch.tensor(
cu_seqlen_prefill, device=device, dtype=torch.int32
)
position_ids = position_ids.to(device)
slot_indices = slot_indices.to(device)
prefill_cache_indices = (
prefill_cache_indices.to(device) if SLIDING_WINDOW is not None else None
)
input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device)
input_lengths_tensor = torch.tensor(
input_lengths, dtype=torch.int32, device=device
)
if all_prefill_logprobs:
prefill_head_indices = None
prefill_next_token_indices = cu_seqlen_prefill[1:] - 1
elif no_prefill_logprobs:
prefill_head_indices = cu_seqlen_prefill[1:] - 1
prefill_next_token_indices = None
else:
prefill_head_indices = torch.tensor(
torch.cat(prefill_head_indices), dtype=torch.int64, device=device
)
prefill_next_token_indices = torch.tensor(
prefill_next_token_indices, dtype=torch.int64, device=device
)
top_n_tokens_tensor = torch.tensor(
top_n_tokens, device=device, dtype=torch.int64
)
return cls(
batch_id=pb.id,
requests=pb.requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
start_slots=start_slots,
slot_indices=slot_indices,
needed_blocks_slots=needed_blocks_slots,
block_tables=None,
block_tables_tensor=None,
slots=None,
max_seqlen=max_seqlen,
prefill_head_indices=prefill_head_indices,
prefill_next_token_indices=prefill_next_token_indices,
prefill_cu_outlens=prefill_cu_outlens,
input_lengths=input_lengths,
input_lengths_tensor=input_lengths_tensor,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
all_input_ids=all_input_ids,
all_input_ids_tensor=all_input_ids_tensor,
next_token_chooser=next_token_chooser,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
blocks=blocks,
max_blocks=max_blocks,
prefill_cache_indices=prefill_cache_indices,
speculative_ids=None,
)
class BaseFlashMistral(FlashCausalLM):
def __init__(
self,
config_cls,
model_cls,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
global SLIDING_WINDOW
global SLIDING_WINDOW_BLOCKS
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = torch.float16 if dtype is None else dtype
else:
raise NotImplementedError("FlashLlama is only available on GPU")
tokenizer = LlamaTokenizerFast.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
config = config_cls.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
config.quantize = quantize
# Set context windows
if config.sliding_window is not None:
SLIDING_WINDOW = config.sliding_window
SLIDING_WINDOW_BLOCKS = math.ceil(config.sliding_window / BLOCK_SIZE)
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(filenames, device, dtype, process_group=self.process_group)
if config.quantize in ["gptq", "awq"]:
weights._set_gptq_params(model_id, revision)
model = model_cls(config, weights)
torch.distributed.barrier(group=self.process_group)
super(BaseFlashMistral, self).__init__(
model=model,
tokenizer=tokenizer,
num_layers=len(model.model.layers),
num_kv_heads=model.model.num_key_value_heads,
head_size=model.model.head_size,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
sliding_window=config.sliding_window,
)
@property
def batch_type(self) -> Type[FlashMistralBatch]:
return FlashMistralBatch
def forward(self, batch: FlashMistralBatch) -> Tuple[torch.Tensor, torch.Tensor]:
# Model Forward
if batch.speculative_ids is not None:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = get_cache_manager().kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
max_s = batch.max_seqlen
lm_head_indices = batch.prefill_head_indices
speculative_ids = batch.speculative_ids
B, speculative_length = speculative_ids.shape
new_length = speculative_length + 1
new_input_ids = torch.cat(
[input_ids.unsqueeze(-1), speculative_ids], dim=1
).reshape(-1)
arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0)
arange_int = arange.to(dtype=torch.int32)
new_position_ids = (
position_ids.unsqueeze(-1).expand(B, new_length) + arange
).view(-1)
slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1)
input_lengths = (
input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int
).view(-1)
# Add Copy the block tables for all members
block_tables = (
block_tables.unsqueeze(1)
.expand(B, new_length, -1)
.reshape(B * new_length, -1)
.contiguous()
)
max_s = max_s + speculative_length
input_ids = new_input_ids
position_ids = new_position_ids
else:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = get_cache_manager().kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
max_s = batch.max_seqlen
lm_head_indices = batch.prefill_head_indices
logits = self.model.forward(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
input_lengths=input_lengths,
max_s=max_s,
prefill_cache_indices=batch.prefill_cache_indices,
lm_head_indices=lm_head_indices,
)
if batch.prefill_cache_indices is not None:
batch.prefill_cache_indices = None
return logits
class FlashMistral(BaseFlashMistral):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
super(FlashMistral, self).__init__(
config_cls=MistralConfig,
model_cls=FlashMistralForCausalLM,
model_id=model_id,
revision=revision,
quantize=quantize,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
| text-generation-inference/server/text_generation_server/models/flash_mistral.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/flash_mistral.py",
"repo_id": "text-generation-inference",
"token_count": 7970
} | 198 |
import torch
import time
from dataclasses import dataclass
from opentelemetry import trace
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, PreTrainedTokenizerBase
from typing import Optional, Tuple, List, Type, Dict
from text_generation_server.utils.tokens import batch_top_tokens
from text_generation_server.models import Model
from text_generation_server.models.types import (
GeneratedText,
Batch,
Generation,
Tokens,
)
from text_generation_server.pb import generate_pb2
from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling
tracer = trace.get_tracer(__name__)
@dataclass
class Seq2SeqLMBatch(Batch):
batch_id: int
requests: List[generate_pb2.Request]
requests_idx_mapping: Dict[int, int]
# Encoder values
input_ids: Optional[torch.Tensor]
attention_mask: torch.Tensor
# Decoder values
decoder_input_ids: torch.Tensor
decoder_attention_mask: Optional[torch.Tensor]
encoder_last_hidden_state: Optional[torch.Tensor]
# All tokens
all_decoder_input_ids: List[torch.Tensor]
# Seq2SeqLM keeps track of both encoder and decoder attention keys and values
past_key_values: Optional[List[Tuple]]
# Lengths of all generations present in the batch
input_lengths: List[int]
decoder_input_lengths: List[int]
prefix_offsets: List[int]
read_offsets: List[int]
# Generation helpers
next_token_choosers: List[NextTokenChooser]
stopping_criterias: List[StoppingCriteria]
top_n_tokens: List[int]
top_n_tokens_tensor: torch.Tensor
# Metadata used for padding
max_input_length: int
max_decoder_input_length: int
padding_right_offset: int
# Maximum number of tokens this batch will grow to
max_tokens: int
def to_pb(self) -> generate_pb2.CachedBatch:
"""Convert a Seq2SeqLMBatch to a text_generation_server.v1.CachedBatch protobuf"""
return generate_pb2.CachedBatch(
id=self.batch_id,
request_ids=[r.id for r in self.requests],
size=len(self),
max_tokens=self.max_tokens,
)
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "Seq2SeqLMBatch":
"""Convert a text_generation_server.v1.Batch protobuf to a Seq2SeqLMBatch"""
inputs = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
decoder_input_lengths = []
prefix_offsets = []
read_offsets = []
requests_idx_mapping = {}
# Parse batch
max_truncation = 0
padding_right_offset = 0
max_decode_tokens = 0
for i, r in enumerate(pb.requests):
inputs.append(r.inputs)
requests_idx_mapping[r.id] = i
decoder_input_lengths.append(1)
next_token_choosers.append(NextTokenChooser.from_pb(r.parameters, device))
stopping_criteria = StoppingCriteria.from_pb(
r.stopping_parameters, tokenizer
)
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(r.top_n_tokens)
max_truncation = max(max_truncation, r.truncate)
max_decode_tokens += stopping_criteria.max_new_tokens
padding_right_offset = max(
padding_right_offset, stopping_criteria.max_new_tokens
)
# Tokenize batch
tokenized_inputs = tokenizer(
inputs,
return_tensors="pt",
padding=True,
return_token_type_ids=False,
truncation=True,
max_length=max_truncation,
).to(device)
input_lengths = tokenized_inputs["attention_mask"].sum(1)
max_input_length = input_lengths.max()
# Decoder sequence only contains the bos_token
decoder_input_ids = (
torch.tensor(tokenizer.bos_token_id, device=device)
.repeat(len(pb.requests))
.view(-1, 1)
)
for _ in pb.requests:
prefix_offsets.append(0)
read_offsets.append(1)
all_decoder_input_ids = decoder_input_ids.view(-1).split(1)
top_n_tokens_tensor = torch.tensor(
top_n_tokens, device=device, dtype=torch.int64
)
max_tokens = len(inputs) * (max_input_length + max_decode_tokens)
return cls(
batch_id=pb.id,
requests=pb.requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=tokenized_inputs["input_ids"],
attention_mask=tokenized_inputs["attention_mask"],
decoder_input_ids=decoder_input_ids,
all_decoder_input_ids=list(all_decoder_input_ids),
decoder_attention_mask=None,
encoder_last_hidden_state=None,
past_key_values=None,
input_lengths=input_lengths.tolist(),
decoder_input_lengths=decoder_input_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length.item(),
max_decoder_input_length=1,
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
)
@tracer.start_as_current_span("filter")
def filter(self, request_ids: List[int]) -> Optional["Seq2SeqLMBatch"]:
if len(request_ids) == 0:
raise ValueError("Batch must have at least one request")
if len(request_ids) == len(self):
return self
keep_indices = []
# New values after filtering
requests_idx_mapping = {}
requests = []
input_lengths = []
decoder_input_lengths = []
prefix_offsets = []
read_offsets = []
all_decoder_input_ids = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
max_input_length = 0
max_decoder_input_length = 0
padding_right_offset = 0
total_remaining_decode_tokens = 0
for i, request_id in enumerate(request_ids):
idx = self.requests_idx_mapping[request_id]
requests_idx_mapping[request_id] = i
keep_indices.append(idx)
requests.append(self.requests[idx])
prefix_offsets.append(self.prefix_offsets[idx])
read_offsets.append(self.read_offsets[idx])
all_decoder_input_ids.append(self.all_decoder_input_ids[idx])
request_input_length = self.input_lengths[idx]
input_lengths.append(request_input_length)
max_input_length = max(max_input_length, request_input_length)
request_decoder_input_length = self.decoder_input_lengths[idx]
decoder_input_lengths.append(request_decoder_input_length)
max_decoder_input_length = max(
max_decoder_input_length, request_decoder_input_length
)
next_token_choosers.append(self.next_token_choosers[idx])
stopping_criteria = self.stopping_criterias[idx]
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(self.top_n_tokens[idx])
remaining_decode_tokens = (
stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
)
total_remaining_decode_tokens += remaining_decode_tokens
padding_right_offset = max(padding_right_offset, remaining_decode_tokens)
# Apply indices to input_ids, attention mask, past key values and other items that need to be cached
self.decoder_input_ids = self.decoder_input_ids[keep_indices]
self.attention_mask = self.attention_mask[keep_indices, -max_input_length:]
if self.decoder_attention_mask is not None:
self.decoder_attention_mask = self.decoder_attention_mask[
keep_indices,
-(self.padding_right_offset + max_decoder_input_length) : (
self.decoder_attention_mask.shape[1] - self.padding_right_offset
)
+ padding_right_offset,
]
self.encoder_last_hidden_state = self.encoder_last_hidden_state[
keep_indices, -max_input_length:
]
# Ensure that past_key_values tensors can be updated in-place
if type(self.past_key_values[0]) == tuple:
self.past_key_values = [
[t for t in layer] for layer in self.past_key_values
]
decoder_past_seq_len = max_decoder_input_length - 1
for layer in self.past_key_values:
layer[0] = layer[0][keep_indices, :, -decoder_past_seq_len:]
layer[1] = layer[1][keep_indices, :, -decoder_past_seq_len:]
layer[2] = layer[2][keep_indices, :, -max_input_length:]
layer[3] = layer[3][keep_indices, :, -max_input_length:]
top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices]
max_tokens = (
len(request_ids) * (max_input_length + max_decoder_input_length)
+ remaining_decode_tokens
)
self.requests = requests
self.requests_idx_mapping = requests_idx_mapping
self.input_ids = None
self.all_decoder_input_ids = all_decoder_input_ids
self.input_lengths = input_lengths
self.decoder_input_lengths = decoder_input_lengths
self.prefix_offsets = prefix_offsets
self.read_offsets = read_offsets
self.next_token_choosers = next_token_choosers
self.stopping_criterias = stopping_criterias
self.top_n_tokens = top_n_tokens
self.top_n_tokens_tensor = top_n_tokens_tensor
self.max_input_length = max_input_length
self.max_decoder_input_length = max_decoder_input_length
self.padding_right_offset = padding_right_offset
self.max_tokens = max_tokens
return self
@classmethod
@tracer.start_as_current_span("concatenate")
def concatenate(cls, batches: List["Seq2SeqLMBatch"]) -> "Seq2SeqLMBatch":
"""Concatenate multiple batches together by padding internal torch tensors"""
# Used for padding
total_batch_size = 0
max_input_length = 0
max_decoder_input_length = 0
padding_right_offset = 0
for batch in batches:
total_batch_size += len(batch)
max_input_length = max(max_input_length, batch.max_input_length)
max_decoder_input_length = max(
max_decoder_input_length, batch.max_decoder_input_length
)
padding_right_offset = max(padding_right_offset, batch.padding_right_offset)
# Batch attributes
requests = []
requests_idx_mapping = {}
all_decoder_input_ids = []
input_lengths = []
decoder_input_lengths = []
prefix_offsets = []
read_offsets = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
max_tokens = 0
# Batch tensors
attention_mask = None
decoder_input_ids = None
decoder_attention_mask = None
encoder_last_hidden_state = None
top_n_tokens_tensor = None
past_key_values = []
# Used for slicing correctly inside the tensors
# Equivalent to a cumsum on batch sizes
start_index = 0
for i, batch in enumerate(batches):
# Extend all list attributes
requests.extend(batch.requests)
all_decoder_input_ids.extend(batch.all_decoder_input_ids)
input_lengths.extend(batch.input_lengths)
decoder_input_lengths.extend(batch.decoder_input_lengths)
prefix_offsets.extend(batch.prefix_offsets)
read_offsets.extend(batch.read_offsets)
next_token_choosers.extend(batch.next_token_choosers)
stopping_criterias.extend(batch.stopping_criterias)
top_n_tokens.extend(batch.top_n_tokens)
if i == 0:
requests_idx_mapping = batch.requests_idx_mapping
else:
# We need to offset the mapping for each batch by the cumulative batch size
for k, v in batch.requests_idx_mapping.items():
requests_idx_mapping[k] = v + start_index
# Slicing end index for this batch
end_index = start_index + len(batch)
# We only concatenate batches that did at least one step
if batch.encoder_last_hidden_state is None:
raise ValueError("Batch encoder_last_hidden_state cannot be None")
# Create padded tensor
if attention_mask is None:
attention_mask = batch.attention_mask.new_zeros(
(total_batch_size, max_input_length),
)
# Copy to correct indices
attention_mask[
start_index:end_index, -batch.max_input_length :
] = batch.attention_mask[:, -batch.max_input_length :]
# Create padded tensor
if decoder_input_ids is None:
decoder_input_ids = batch.decoder_input_ids.new_zeros(
(total_batch_size, 1),
)
# Copy to correct indices
decoder_input_ids[start_index:end_index] = batch.decoder_input_ids
# Create padded tensor
if decoder_attention_mask is None:
# As decoder_attention_mask might not exist, we use `batch.attention_mask` for device here
decoder_attention_mask = batch.attention_mask.new_zeros(
(total_batch_size, max_decoder_input_length + padding_right_offset),
)
# If the decoder mask does not exist yet, all generations started at the same time and we never concatenated
# this batch. All generations are of length `batch.max_decoder_input_length`.
left_offset = max_decoder_input_length - batch.max_decoder_input_length
if batch.decoder_attention_mask is None:
decoder_attention_mask[
start_index:end_index,
left_offset:-padding_right_offset,
] = 1
# If it exists, we need to index
else:
batch_left_offset = (
batch.decoder_attention_mask.shape[1]
- batch.max_decoder_input_length
- batch.padding_right_offset
)
decoder_attention_mask[
start_index:end_index,
left_offset:-padding_right_offset,
] = batch.decoder_attention_mask[
:,
batch_left_offset : -batch.padding_right_offset,
]
# Create padded tensor
if encoder_last_hidden_state is None:
encoder_last_hidden_state = batch.encoder_last_hidden_state.new_zeros(
(
total_batch_size,
max_input_length,
batch.encoder_last_hidden_state.shape[-1],
),
)
if top_n_tokens_tensor is None:
top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
total_batch_size,
)
top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
# Copy to correct indices
encoder_last_hidden_state[
start_index:end_index, -batch.max_input_length :, :
] = batch.encoder_last_hidden_state[:, -batch.max_input_length :, :]
batch.encoder_last_hidden_state = None
# Ensure that we can update tensors in-place
if type(batch.past_key_values[0]) == tuple:
batch.past_key_values = [
[t for t in layer] for layer in batch.past_key_values
]
# Add eventual padding tokens that were added while concatenating
max_tokens += batch.max_tokens + (
max_input_length
- batch.max_input_length
+ max_decoder_input_length
- batch.max_decoder_input_length
) * len(batch)
start_index = end_index
# Determine shapes for new past kv tensors
first_past_kvs = batches[0].past_key_values
_, num_heads, _, head_dim = first_past_kvs[0][0].shape
padded_dec_t_shape = (
total_batch_size,
num_heads,
(max_decoder_input_length - 1),
head_dim,
)
padded_enc_t_shape = (
total_batch_size,
num_heads,
max_input_length,
head_dim,
)
# Iterate over attention layers
for j in range(len(first_past_kvs)):
past_key_values.append([])
# Decoder past
for k in range(0, 2):
# Initialize tensors
padded_past_values = first_past_kvs[j][k].new_zeros(padded_dec_t_shape)
past_key_values[j].append(padded_past_values)
start_index = 0
for batch in batches:
t = batch.past_key_values[j][k]
# Clear reference to the original tensor
batch.past_key_values[j][k] = None
# Slicing end index for this batch
end_index = start_index + len(batch)
# We slice the past keys and values to remove the padding from previous batches
past_seq_len = batch.max_decoder_input_length - 1
padded_past_values[start_index:end_index, :, -past_seq_len:, :] = t[
:, :, -past_seq_len:, :
]
del t
start_index = end_index
# Encoder past
for k in range(2, 4):
# Initialize tensors
padded_past_values = first_past_kvs[j][k].new_zeros(padded_enc_t_shape)
past_key_values[j].append(padded_past_values)
start_index = 0
for batch in batches:
t = batch.past_key_values[j][k]
# Clear reference to the original tensor
batch.past_key_values[j][k] = None
# Slicing end index for this batch
end_index = start_index + len(batch)
# We slice the past keys and values to remove the padding from previous batches
padded_past_values[
start_index:end_index, :, -batch.max_input_length :, :
] = t[:, :, -batch.max_input_length :, :]
del t
start_index = end_index
return cls(
batch_id=batches[0].batch_id,
requests=requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=None,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
all_decoder_input_ids=all_decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_last_hidden_state=encoder_last_hidden_state,
past_key_values=past_key_values,
input_lengths=input_lengths,
decoder_input_lengths=decoder_input_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length,
max_decoder_input_length=max_decoder_input_length,
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
)
def __len__(self):
return len(self.requests)
class Seq2SeqLM(Model):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
if torch.cuda.is_available():
device = torch.device("cuda")
dtype = torch.float16 if dtype is None else dtype
else:
if quantize:
raise ValueError("quantization is not available on CPU")
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=dtype,
device_map="auto"
if torch.cuda.is_available() and torch.cuda.device_count() > 1
else None,
load_in_8bit=quantize == "bitsandbytes",
trust_remote_code=trust_remote_code,
)
if torch.cuda.is_available() and torch.cuda.device_count() == 1:
model = model.cuda()
tokenizer = AutoTokenizer.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
tokenizer.bos_token_id = model.config.decoder_start_token_id
super(Seq2SeqLM, self).__init__(
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
)
@property
def batch_type(self) -> Type[Seq2SeqLMBatch]:
return Seq2SeqLMBatch
def decode(self, decoder_ids: List[int]) -> str:
return self.tokenizer.decode(
decoder_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
def forward(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask: Optional,
encoder_last_hidden_state: Optional,
past_key_values: Optional = None,
) -> Tuple[
torch.Tensor,
torch.Tensor,
List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]],
]:
# Model Forward
outputs = self.model.forward(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_last_hidden_state,
past_key_values=past_key_values,
use_cache=True,
)
return (
outputs.logits,
outputs.encoder_last_hidden_state,
outputs.past_key_values,
)
@tracer.start_as_current_span("generate_token")
def generate_token(
self, batch: Seq2SeqLMBatch
) -> Tuple[List[Generation], Optional[Seq2SeqLMBatch], Tuple[int, int]]:
start = time.time_ns()
if batch.decoder_attention_mask is not None:
# slice to the correct shape
decoder_attention_mask = batch.decoder_attention_mask[
:, : -batch.padding_right_offset
]
else:
decoder_attention_mask = None
# Wrap `encoder_last_hidden_state` because for some reason, Transformers does a `encoder_last_hidden_state[0]`
# internally...
if batch.encoder_last_hidden_state is not None:
encoder_last_hidden_state = [batch.encoder_last_hidden_state]
else:
encoder_last_hidden_state = None
logits, encoder_last_hidden_state, past = self.forward(
batch.input_ids,
batch.attention_mask,
batch.decoder_input_ids,
decoder_attention_mask,
encoder_last_hidden_state,
batch.past_key_values,
)
# Speculation is not active for seq2seq
accepted_ids = torch.ones_like(batch.decoder_input_ids)[:, 0]
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
batch.top_n_tokens,
batch.top_n_tokens_tensor,
torch.log_softmax(logits[:, -1], -1),
accepted_ids,
)
start_decode = time.time_ns()
# Finished requests
generations: List[Generation] = []
stopped = True
# Zipped iterator
iterator = zip(
batch.requests,
batch.input_lengths,
batch.prefix_offsets,
batch.read_offsets,
batch.decoder_input_lengths,
logits,
batch.next_token_choosers,
batch.stopping_criterias,
batch.all_decoder_input_ids,
batch.top_n_tokens,
batch_top_token_ids,
batch_top_token_logprobs,
)
# For each member of the batch
for i, (
request,
input_length,
prefix_offset,
read_offset,
decoder_input_length,
logits,
next_token_chooser,
stopping_criteria,
all_decoder_input_ids,
top_n_tokens,
top_token_ids,
top_token_logprobs,
) in enumerate(iterator):
# Select next token
next_token_id, logprobs = next_token_chooser(
all_decoder_input_ids.view(1, -1), logits[-1:, :]
)
# Append next token to decoder tokens
all_decoder_input_ids = torch.cat(
[all_decoder_input_ids, next_token_id.squeeze(1)]
)
new_decoder_input_length = decoder_input_length + 1
# Generated token
next_token_logprob = logprobs[-1, next_token_id]
next_token_id_squeezed = next_token_id.squeeze()
next_token_text, prefix_offset, read_offset = self.decode_token(
all_decoder_input_ids, prefix_offset, read_offset
)
# Evaluate stopping criteria
stop, reason = stopping_criteria(next_token_id, next_token_text)
if not stop:
stopped = False
# Shard generations
# All generations will be appended in the rust sharded client
if i % self.world_size == self.rank:
if stop:
# Slice with decoder_input_length to remove padding
# Decode all tokens
output_text, _, _ = self.decode_token(
all_decoder_input_ids,
prefix_offset=len(all_decoder_input_ids)
- decoder_input_length
- 1,
read_offset=len(all_decoder_input_ids) - decoder_input_length,
skip_special_tokens=True,
)
# Get seed
if isinstance(next_token_chooser.choice, Sampling):
seed = next_token_chooser.choice.seed
else:
seed = None
generated_text = GeneratedText(
output_text, stopping_criteria.current_tokens, reason, seed
)
else:
generated_text = None
# Prefill
if stopping_criteria.current_tokens == 1 and request.prefill_logprobs:
prefill_tokens = Tokens(
[self.tokenizer.bos_token_id],
[float("nan")],
[self.tokenizer.bos_token],
[False],
)
else:
prefill_tokens = None
if top_n_tokens > 0:
all_top_tokens = []
for (top_token_ids, top_token_logprobs) in zip(top_token_ids, top_token_logprobs):
toptoken_texts = self.tokenizer.batch_decode(
top_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
special_toptokens = [
token_id in self.all_special_ids for token_id in top_token_ids
]
top_tokens = Tokens(
top_token_ids,
top_token_logprobs,
toptoken_texts,
special_toptokens,
)
all_top_tokens.append(top_tokens)
top_tokens = all_top_tokens
else:
top_tokens = None
generation = Generation(
request.id,
prefill_tokens,
Tokens(
[next_token_id_squeezed],
[next_token_logprob],
[next_token_text],
[next_token_id_squeezed.item() in self.all_special_ids],
),
generated_text,
top_tokens,
)
generations.append(generation)
# Update values
batch.decoder_input_ids[i] = next_token_id
batch.all_decoder_input_ids[i] = all_decoder_input_ids
batch.input_lengths[i] = input_length
batch.decoder_input_lengths[i] = new_decoder_input_length
batch.prefix_offsets[i] = prefix_offset
batch.read_offsets[i] = read_offset
batch.max_input_length = max(batch.max_input_length, input_length)
batch.max_decoder_input_length = max(
batch.max_decoder_input_length, new_decoder_input_length
)
# We finished all generations in the batch; there is no next batch
if stopped:
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, None, (forward_ns, decode_ns)
# We don't need input_ids after the prefill forward
batch.input_ids = None
batch.encoder_last_hidden_state = encoder_last_hidden_state
batch.past_key_values = past
# Update decoder_attention_mask as we added a new token to input_ids
if batch.decoder_attention_mask is not None:
batch.decoder_attention_mask[:, -batch.padding_right_offset] = 1
batch.padding_right_offset -= 1
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, batch, (forward_ns, decode_ns)
| text-generation-inference/server/text_generation_server/models/seq2seq_lm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/seq2seq_lm.py",
"repo_id": "text-generation-inference",
"token_count": 16012
} | 199 |
import time
import os
from datetime import timedelta
from loguru import logger
from pathlib import Path
from typing import Optional, List
from huggingface_hub import file_download, hf_api, HfApi, hf_hub_download
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub.utils import (
LocalEntryNotFoundError,
EntryNotFoundError,
RevisionNotFoundError, # noqa # Import here to ease try/except in other part of the lib
)
WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None)
HF_HUB_OFFLINE = os.environ.get("HF_HUB_OFFLINE", "0").lower() in ["true", "1", "yes"]
def _cached_weight_files(
model_id: str, revision: Optional[str], extension: str
) -> List[str]:
"""Guess weight files from the cached revision snapshot directory"""
d = _get_cached_revision_directory(model_id, revision)
if not d:
return []
filenames = _weight_files_from_dir(d, extension)
return filenames
def _weight_hub_files_from_model_info(
info: hf_api.ModelInfo, extension: str
) -> List[str]:
return [
s.rfilename
for s in info.siblings
if s.rfilename.endswith(extension)
and len(s.rfilename.split("/")) == 1
and "arguments" not in s.rfilename
and "args" not in s.rfilename
and "training" not in s.rfilename
]
def _weight_files_from_dir(d: Path, extension: str) -> List[str]:
# os.walk: do not iterate, just scan for depth 1, not recursively
# see _weight_hub_files_from_model_info, that's also what is
# done there with the len(s.rfilename.split("/")) == 1 condition
root, _, files = next(os.walk(str(d)))
filenames = [
os.path.join(root, f)
for f in files
if f.endswith(extension)
and "arguments" not in f
and "args" not in f
and "adapter" not in f
and "training" not in f
]
return filenames
def _get_cached_revision_directory(
model_id: str, revision: Optional[str]
) -> Optional[Path]:
if revision is None:
revision = "main"
repo_cache = Path(HUGGINGFACE_HUB_CACHE) / Path(
file_download.repo_folder_name(repo_id=model_id, repo_type="model")
)
if not repo_cache.is_dir():
# No cache for this model
return None
refs_dir = repo_cache / "refs"
snapshots_dir = repo_cache / "snapshots"
# Resolve refs (for instance to convert main to the associated commit sha)
if refs_dir.is_dir():
revision_file = refs_dir / revision
if revision_file.exists():
with revision_file.open() as f:
revision = f.read()
# Check if revision folder exists
if not snapshots_dir.exists():
return None
cached_shas = os.listdir(snapshots_dir)
if revision not in cached_shas:
# No cache for this revision and we won't try to return a random revision
return None
return snapshots_dir / revision
def weight_hub_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[str]:
"""Get the weights filenames on the hub"""
api = HfApi()
if HF_HUB_OFFLINE:
filenames = _cached_weight_files(model_id, revision, extension)
else:
# Online case, fetch model info from the Hub
info = api.model_info(model_id, revision=revision)
filenames = _weight_hub_files_from_model_info(info, extension)
if not filenames:
raise EntryNotFoundError(
f"No {extension} weights found for model {model_id} and revision {revision}.",
None,
)
return filenames
def try_to_load_from_cache(
model_id: str, revision: Optional[str], filename: str
) -> Optional[Path]:
"""Try to load a file from the Hugging Face cache"""
d = _get_cached_revision_directory(model_id, revision)
if not d:
return None
# Check if file exists in cache
cached_file = d / filename
return cached_file if cached_file.is_file() else None
def weight_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[Path]:
"""Get the local files"""
# Local model
d = Path(model_id)
if d.exists() and d.is_dir():
local_files = _weight_files_from_dir(d, extension)
if not local_files:
raise FileNotFoundError(
f"No local weights found in {model_id} with extension {extension}"
)
return [Path(f) for f in local_files]
try:
filenames = weight_hub_files(model_id, revision, extension)
except EntryNotFoundError as e:
if extension != ".safetensors":
raise e
# Try to see if there are pytorch weights
pt_filenames = weight_hub_files(model_id, revision, extension=".bin")
# Change pytorch extension to safetensors extension
# It is possible that we have safetensors weights locally even though they are not on the
# hub if we converted weights locally without pushing them
filenames = [
f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames
]
if WEIGHTS_CACHE_OVERRIDE is not None:
files = []
for filename in filenames:
p = Path(WEIGHTS_CACHE_OVERRIDE) / filename
if not p.exists():
raise FileNotFoundError(
f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}."
)
files.append(p)
return files
files = []
for filename in filenames:
cache_file = try_to_load_from_cache(
model_id, revision=revision, filename=filename
)
if cache_file is None:
raise LocalEntryNotFoundError(
f"File {filename} of model {model_id} not found in "
f"{os.getenv('HUGGINGFACE_HUB_CACHE', 'the local cache')}. "
f"Please run `text-generation-server download-weights {model_id}` first."
)
files.append(cache_file)
return files
def download_weights(
filenames: List[str], model_id: str, revision: Optional[str] = None
) -> List[Path]:
"""Download the safetensors files from the hub"""
def download_file(fname, tries=5, backoff: int = 5):
local_file = try_to_load_from_cache(model_id, revision, fname)
if local_file is not None:
logger.info(f"File {fname} already present in cache.")
return Path(local_file)
for idx in range(tries):
try:
logger.info(f"Download file: {fname}")
stime = time.time()
local_file = hf_hub_download(
filename=fname,
repo_id=model_id,
revision=revision,
local_files_only=HF_HUB_OFFLINE,
)
logger.info(
f"Downloaded {local_file} in {timedelta(seconds=int(time.time() - stime))}."
)
return Path(local_file)
except Exception as e:
if idx + 1 == tries:
raise e
logger.error(e)
logger.info(f"Retrying in {backoff} seconds")
time.sleep(backoff)
logger.info(f"Retry {idx + 1}/{tries - 1}")
# We do this instead of using tqdm because we want to parse the logs with the launcher
start_time = time.time()
files = []
for i, filename in enumerate(filenames):
file = download_file(filename)
elapsed = timedelta(seconds=int(time.time() - start_time))
remaining = len(filenames) - (i + 1)
eta = (elapsed / (i + 1)) * remaining if remaining > 0 else 0
logger.info(f"Download: [{i + 1}/{len(filenames)}] -- ETA: {eta}")
files.append(file)
return files
| text-generation-inference/server/text_generation_server/utils/hub.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/hub.py",
"repo_id": "text-generation-inference",
"token_count": 3435
} | 200 |
{
"name": "tokenizers-darwin-arm64",
"version": "0.13.4-rc1",
"os": [
"darwin"
],
"cpu": [
"arm64"
],
"main": "tokenizers.darwin-arm64.node",
"files": [
"tokenizers.darwin-arm64.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/darwin-arm64/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/darwin-arm64/package.json",
"repo_id": "tokenizers",
"token_count": 268
} | 201 |
{
"name": "tokenizers-win32-arm64-msvc",
"version": "0.13.4-rc1",
"os": [
"win32"
],
"cpu": [
"arm64"
],
"main": "tokenizers.win32-arm64-msvc.node",
"files": [
"tokenizers.win32-arm64-msvc.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/win32-arm64-msvc/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/win32-arm64-msvc/package.json",
"repo_id": "tokenizers",
"token_count": 277
} | 202 |
extern crate tokenizers as tk;
use crate::models::Model;
use napi::bindgen_prelude::*;
use std::sync::{Arc, RwLock};
use tokenizers::models::bpe::{BpeBuilder, BPE};
use tokenizers::models::wordlevel::{WordLevel, WordLevelBuilder};
use tokenizers::models::wordpiece::{WordPiece, WordPieceBuilder};
pub struct BPEFromFilesTask {
pub(crate) builder: Option<BpeBuilder>,
}
impl Task for BPEFromFilesTask {
type Output = BPE;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
pub struct WordPieceFromFilesTask {
pub(crate) builder: Option<WordPieceBuilder>,
}
impl Task for WordPieceFromFilesTask {
type Output = WordPiece;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
pub struct WordLevelFromFilesTask {
pub(crate) builder: Option<WordLevelBuilder>,
}
impl Task for WordLevelFromFilesTask {
type Output = WordLevel;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
| tokenizers/bindings/node/src/tasks/models.rs/0 | {
"file_path": "tokenizers/bindings/node/src/tasks/models.rs",
"repo_id": "tokenizers",
"token_count": 800
} | 203 |
from typing import List
import jieba
from tokenizers import NormalizedString, PreTokenizedString, Regex, Tokenizer
from tokenizers.decoders import Decoder
from tokenizers.models import BPE
from tokenizers.normalizers import Normalizer
from tokenizers.pre_tokenizers import PreTokenizer
class JiebaPreTokenizer:
def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
splits = []
# we need to call `str(normalized_string)` because jieba expects a str,
# not a NormalizedString
for token, start, stop in jieba.tokenize(str(normalized_string)):
splits.append(normalized_string[start:stop])
return splits
# We can also easily do it in one line:
# return [normalized_string[w[1] : w[2]] for w in jieba.tokenize(str(normalized_string))]
def odd_number_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
# Just an odd example...
splits = []
last = 0
for i, char in enumerate(str(normalized_string)):
if char.isnumeric() and int(char) % 2 == 1:
splits.append(normalized_string[last:i])
last = i
# Don't forget the last one
splits.append(normalized_string[last:])
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
# Let's call split on the PreTokenizedString to split using `self.jieba_split`
pretok.split(self.jieba_split)
# Here we can call `pretok.split` multiple times if we want to apply
# different algorithm, but we generally just need to call it once.
pretok.split(self.odd_number_split)
class CustomDecoder:
def decode(self, tokens: List[str]) -> str:
return "".join(tokens)
class CustomNormalizer:
def normalize(self, normalized: NormalizedString):
# Most of these can be replaced by a `Sequence` combining some provided Normalizer,
# (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ])
# and it should be the prefered way. That being said, here is an example of the kind
# of things that can be done here:
normalized.nfkc()
normalized.filter(lambda char: not char.isnumeric())
normalized.replace(Regex("\s+"), " ")
normalized.lowercase()
# This section shows how to attach these custom components to the Tokenizer
tok = Tokenizer(BPE())
tok.normalizer = Normalizer.custom(CustomNormalizer())
tok.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer())
tok.decoder = Decoder.custom(CustomDecoder())
input = "ๆฐธๅๆ่ฃ
้ฅฐๅๆ้ๅ
ฌๅธ"
print("PreTokenize:", input)
print(tok.pre_tokenizer.pre_tokenize_str(input))
# [('ๆฐธๅ', (0, 2)), ('ๆ่ฃ
', (2, 4)), ('้ฅฐๅ', (4, 6)), ('ๆ้ๅ
ฌๅธ', (6, 10))]
input = "112233"
print("PreTokenize:", input)
print(tok.pre_tokenizer.pre_tokenize_str(input))
# [('1', (0, 1)), ('122', (1, 4)), ('3', (4, 5)), ('3', (5, 6))]
input = "1234 โ๐ข๐ฉ๐ฉ๐ฌ ๐ฑ๐ฅ๐ข๐ฏ๐ข ๐๐ ๐นโฏ๐ถ๐ ๐๐๐๐ฃ ๐๐ฃ๐๐๐๐!"
print("Normalize:", input)
print(tok.normalizer.normalize_str(input))
# " hello there my dear dear friend!"
| tokenizers/bindings/python/examples/custom_components.py/0 | {
"file_path": "tokenizers/bindings/python/examples/custom_components.py",
"repo_id": "tokenizers",
"token_count": 1293
} | 204 |
import json
import os
from typing import Iterator, List, Optional, Union, Tuple
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.models import Unigram
from .base_tokenizer import BaseTokenizer
class SentencePieceUnigramTokenizer(BaseTokenizer):
"""SentencePiece Unigram Tokenizer
Represents the Unigram algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[List[Tuple[str, float]]] = None,
replacement: str = "โ",
add_prefix_space: bool = True,
):
if vocab is not None:
# Let Unigram(..) fail if only one of them is None
tokenizer = Tokenizer(Unigram(vocab))
else:
tokenizer = Tokenizer(Unigram())
tokenizer.normalizer = normalizers.Sequence(
[normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
)
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
parameters = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(tokenizer, parameters)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
):
"""
Train the model using the given files
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
length: Optional[int] = None,
):
"""
Train the model using the given iterator
Args:
iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
Any iterator over strings or list of strings
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
@staticmethod
def from_spm(filename: str):
try:
import sys
sys.path.append(".")
import sentencepiece_model_pb2 as model
except Exception:
raise Exception(
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
)
m = model.ModelProto()
m.ParseFromString(open(filename, "rb").read())
precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
vocab = [(piece.piece, piece.score) for piece in m.pieces]
unk_id = m.trainer_spec.unk_id
model_type = m.trainer_spec.model_type
byte_fallback = m.trainer_spec.byte_fallback
if model_type != 1:
raise Exception(
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
)
replacement = "โ"
add_prefix_space = True
tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
if precompiled_charsmap:
tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Precompiled(precompiled_charsmap),
normalizers.Replace(Regex(" {2,}"), " "),
]
)
else:
tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
parameters = {
"model": "SentencePieceUnigram",
}
obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
BaseTokenizer.__init__(obj, tokenizer, parameters)
return obj
| tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py",
"repo_id": "tokenizers",
"token_count": 3351
} | 205 |
import transformers
from tokenizers.implementations import SentencePieceUnigramTokenizer, BaseTokenizer
from tokenizers.processors import TemplateProcessing
from tokenizers.models import Unigram, BPE
from tokenizers import decoders
from tokenizers import Tokenizer, Regex
from tokenizers.normalizers import (
StripAccents,
NFKD,
Lowercase,
Sequence,
BertNormalizer,
Precompiled,
Replace,
)
from tokenizers.pre_tokenizers import (
Digits,
WhitespaceSplit,
Metaspace,
Sequence as PSequence,
)
import json
import unicodedata
import sys
import os
import datetime
import argparse
sys.path.append(".")
from spm_parity_check import check_details
from sentencepiece_extractor import SentencePieceExtractor
def check_number_comma(piece: str) -> bool:
return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit()
def get_proto(filename: str):
try:
import sys
sys.path.append(".")
import sentencepiece_model_pb2 as model
except Exception:
raise Exception(
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
)
m = model.ModelProto()
m.ParseFromString(open(filename, "rb").read())
return m
class Converter:
def __init__(self, original_tokenizer):
self.original_tokenizer = original_tokenizer
def converted(self) -> Tokenizer:
raise NotImplementedError()
class SpmConverter(Converter):
def __init__(self, *args):
super().__init__(*args)
self.proto = get_proto(self.original_tokenizer.vocab_file)
def vocab(self, proto):
return [(piece.piece, piece.score) for piece in proto.pieces]
def unk_id(self, proto):
return proto.trainer_spec.unk_id
def tokenizer(self, proto):
model_type = proto.trainer_spec.model_type
vocab = self.vocab(proto)
unk_id = self.unk_id(proto)
if model_type == 1:
tokenizer = Tokenizer(Unigram(vocab, unk_id))
elif model_type == 2:
vocab, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract()
tokenizer = Tokenizer(
BPE(vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True)
)
else:
raise Exception(
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
)
return tokenizer
def normalizer(self, proto):
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
return Sequence([Precompiled(precompiled_charsmap), Replace(Regex(" {2,}"), " ")])
def post_processor(self, tokenizer):
return None
def converted(self):
tokenizer = self.tokenizer(self.proto)
# Tokenizer assemble
tokenizer.normalizer = self.normalizer(self.proto)
replacement = "โ"
add_prefix_space = True
tokenizer.pre_tokenizer = Metaspace(
replacement=replacement, add_prefix_space=add_prefix_space
)
tokenizer.decoder = decoders.Metaspace(
replacement=replacement, add_prefix_space=add_prefix_space
)
post_processor = self.post_processor(tokenizer)
if post_processor:
tokenizer.post_processor = post_processor
# TODO what parameters should we give ?
parameters = {}
return BaseTokenizer(tokenizer, parameters)
class AlbertConverter(SpmConverter):
def vocab(self, proto):
return [
(piece.piece, piece.score)
if check_number_comma(piece.piece)
else (piece.piece, piece.score - 100)
for piece in proto.pieces
]
def normalizer(self, proto):
normalizers = [Replace("``", '"'), Replace("''", '"')]
if not self.original_tokenizer.keep_accents:
normalizers.append(NFKD())
normalizers.append(StripAccents())
if self.original_tokenizer.do_lower_case:
normalizers.append(Lowercase())
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
normalizers.append(Precompiled(precompiled_charsmap))
normalizers.append(Replace(Regex(" {2,}"), " "))
return Sequence(normalizers)
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["[CLS]", "$0", "[SEP]"],
seq_b=["$1", "[SEP]"],
special_tokens=[
("[CLS]", tokenizer.get_vocab()["[CLS]"]),
("[SEP]", tokenizer.get_vocab()["[SEP]"]),
],
)
class CamembertConverter(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>NOTUSED", 0.0),
("<pad>", 0.0),
("</s>NOTUSED", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces]
return vocab
def unk_id(self, proto):
# See vocab unk position
return 3
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["<s>", "$0", "</s>"],
seq_b=["$1", "</s>"],
special_tokens=[
("<s>", tokenizer.get_vocab()["<s>"]),
("</s>", tokenizer.get_vocab()["</s>"]),
],
)
class MBartConverter(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>", 0.0),
("<pad>", 0.0),
("</s>", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [
("ar_AR", 0.0),
("cs_CZ", 0.0),
("de_DE", 0.0),
("en_XX", 0.0),
("es_XX", 0.0),
("et_EE", 0.0),
("fi_FI", 0.0),
("fr_XX", 0.0),
("gu_IN", 0.0),
("hi_IN", 0.0),
("it_IT", 0.0),
("ja_XX", 0.0),
("kk_KZ", 0.0),
("ko_KR", 0.0),
("lt_LT", 0.0),
("lv_LV", 0.0),
("my_MM", 0.0),
("ne_NP", 0.0),
("nl_XX", 0.0),
("ro_RO", 0.0),
("ru_RU", 0.0),
("si_LK", 0.0),
("tr_TR", 0.0),
("vi_VN", 0.0),
("zh_CN", 0.0),
]
return vocab
def unk_id(self, proto):
return 3
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["$0", "</s>", "en_XX"],
seq_b=["$1", "</s>"],
special_tokens=[
("en_XX", tokenizer.get_vocab()["en_XX"]),
("</s>", tokenizer.get_vocab()["</s>"]),
],
)
class XLMRobertaConverter(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>", 0.0),
("<pad>", 0.0),
("</s>", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
return vocab
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["<s>", "$0", "</s>"],
seq_b=["$1", "</s>"],
special_tokens=[
("<s>", tokenizer.get_vocab()["<s>"]),
("</s>", tokenizer.get_vocab()["</s>"]),
],
)
class XLNetConverter(SpmConverter):
def vocab(self, proto):
return [
(piece.piece, piece.score)
if check_number_comma(piece.piece)
else (piece.piece, piece.score - 100)
for piece in proto.pieces
]
def normalizer(self, proto):
normalizers = [Replace("``", '"'), Replace("''", '"')]
if not self.original_tokenizer.keep_accents:
normalizers.append(NFKD())
normalizers.append(StripAccents())
if self.original_tokenizer.do_lower_case:
normalizers.append(Lowercase())
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
normalizers.append(Precompiled(precompiled_charsmap))
normalizers.append(Replace(Regex(" {2,}"), " "))
return Sequence(normalizers)
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["$0", "<sep>", "<cls>"],
seq_b=["$1", "<sep>"],
special_tokens=[
("<sep>", tokenizer.get_vocab()["<sep>"]),
("<cls>", tokenizer.get_vocab()["<cls>"]),
],
)
class ReformerConverter(SpmConverter):
pass
class PegasusConverter(SpmConverter):
offset = 103
def vocab(self, proto):
vocab = [
(self.original_tokenizer.pad_token, 0),
(self.original_tokenizer.eos_token, 0),
]
vocab += [(f"unk_{i}", -100) for i in range(2, 2 + self.offset)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]]
return vocab
def unk_id(self, proto):
return proto.trainer_spec.unk_id + self.offset
def post_processor(self, tokenizer):
eos = self.original_tokenizer.eos_token
return TemplateProcessing(
seq_a=["$0", eos],
seq_b=["$1", eos],
special_tokens=[(eos, tokenizer.get_vocab()[eos])],
)
class T5Converter(SpmConverter):
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["$0", "</s>"],
seq_b=["$1", "</s>"],
special_tokens=[("</s>", tokenizer.get_vocab()["</s>"])],
)
CONVERTERS = {
"AlbertTokenizer": AlbertConverter,
"CamembertTokenizer": CamembertConverter,
"XLMRobertaTokenizer": XLMRobertaConverter,
"MBartTokenizer": MBartConverter,
"XLNetTokenizer": XLNetConverter,
"ReformerTokenizer": ReformerConverter,
"PegasusTokenizer": PegasusConverter,
"T5Tokenizer": T5Converter,
}
def check(pretrained, filename):
transformer_tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained)
converter_class = CONVERTERS[transformer_tokenizer.__class__.__name__]
tokenizer = converter_class(transformer_tokenizer).converted()
now = datetime.datetime.now
trans_total_time = datetime.timedelta(seconds=0)
tok_total_time = datetime.timedelta(seconds=0)
with open(filename, "r") as f:
for i, line in enumerate(f):
line = line.strip()
start = now()
ids = transformer_tokenizer.encode(line)
trans = now()
tok_ids = tokenizer.encode(line).ids
tok = now()
trans_total_time += trans - start
tok_total_time += tok - trans
if ids != tok_ids:
if check_details(line, ids, tok_ids, transformer_tokenizer, tokenizer):
continue
assert ids == tok_ids, f"Error in line {i}: {line} {ids} != {tok_ids}"
tokenizer.save(f"{pretrained.replace('/', '-')}.json")
return ("OK", trans_total_time / tok_total_time)
def main():
pretraineds = [
"albert-base-v1",
"albert-large-v1",
"albert-xlarge-v1",
"albert-xxlarge-v1",
"albert-base-v2",
"albert-large-v2",
"albert-xlarge-v2",
"albert-xxlarge-v2",
"camembert-base",
"xlm-roberta-base",
"xlm-roberta-large",
"xlm-roberta-large-finetuned-conll02-dutch",
"xlm-roberta-large-finetuned-conll02-spanish",
"xlm-roberta-large-finetuned-conll03-english",
"xlm-roberta-large-finetuned-conll03-german",
"facebook/mbart-large-en-ro",
"facebook/mbart-large-cc25",
"xlnet-base-cased",
"xlnet-large-cased",
"google/reformer-crime-and-punishment",
"t5-small",
"google/pegasus-large",
]
parser = argparse.ArgumentParser()
parser.add_argument(
"--filename",
required=True,
type=str,
help="The filename that we are going to encode in both versions to check that conversion worked",
)
parser.add_argument(
"--models",
type=lambda s: s.split(","),
default=pretraineds,
help=f"The pretrained tokenizers you want to test agains, (default: {pretraineds})",
)
args = parser.parse_args()
print(args.filename)
model_len = 50
status_len = 6
speedup_len = 8
print(f"|{'Model':^{model_len}}|{'Status':^{status_len}}|{'Speedup':^{speedup_len}}|")
print(f"|{'-'*model_len}|{'-'*status_len}|{'-'*speedup_len}|")
for pretrained in args.models:
status, speedup = check(pretrained, args.filename)
print(
f"|{pretrained:<{model_len}}|{status:^{status_len}}|{speedup:^{speedup_len - 1}.2f}x|"
)
if __name__ == "__main__":
main()
| tokenizers/bindings/python/scripts/convert.py/0 | {
"file_path": "tokenizers/bindings/python/scripts/convert.py",
"repo_id": "tokenizers",
"token_count": 6438
} | 206 |
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use std::marker::PhantomData;
use std::sync::{Arc, Mutex};
mod iterators;
mod normalization;
mod pretokenization;
mod regex;
pub use iterators::*;
pub use normalization::*;
pub use pretokenization::*;
pub use regex::*;
// PyChar
// This type is a temporary hack to accept `char` as argument
// To be removed once https://github.com/PyO3/pyo3/pull/1282 has been released
pub struct PyChar(pub char);
impl FromPyObject<'_> for PyChar {
fn extract(obj: &PyAny) -> PyResult<Self> {
let s = <PyString as PyTryFrom<'_>>::try_from(obj)?.to_str()?;
let mut iter = s.chars();
if let (Some(ch), None) = (iter.next(), iter.next()) {
Ok(Self(ch))
} else {
Err(exceptions::PyValueError::new_err(
"expected a string of length 1",
))
}
}
}
// RefMut utils
pub trait DestroyPtr {
fn destroy(&mut self);
}
pub struct RefMutGuard<'r, T: DestroyPtr + Clone> {
content: T,
r: PhantomData<&'r mut T>,
}
impl<T: DestroyPtr + Clone> RefMutGuard<'_, T> {
pub fn new(content: T) -> Self {
Self {
content,
r: PhantomData,
}
}
pub fn get(&self) -> T {
self.content.clone()
}
}
impl<T: DestroyPtr + Clone> Drop for RefMutGuard<'_, T> {
fn drop(&mut self) {
self.content.destroy()
}
}
#[derive(Clone)]
pub struct RefMutContainer<T> {
inner: Arc<Mutex<Option<*mut T>>>,
}
impl<T> RefMutContainer<T> {
pub fn new(content: &mut T) -> Self {
Self {
inner: Arc::new(Mutex::new(Some(content))),
}
}
pub fn map<F: FnOnce(&T) -> U, U>(&self, f: F) -> Option<U> {
let lock = self.inner.lock().unwrap();
let ptr = lock.as_ref()?;
Some(f(unsafe { ptr.as_ref().unwrap() }))
}
pub fn map_mut<F: FnOnce(&mut T) -> U, U>(&mut self, f: F) -> Option<U> {
let lock = self.inner.lock().unwrap();
let ptr = lock.as_ref()?;
Some(f(unsafe { ptr.as_mut().unwrap() }))
}
}
impl<T> DestroyPtr for RefMutContainer<T> {
fn destroy(&mut self) {
self.inner.lock().unwrap().take();
}
}
unsafe impl<T: Send> Send for RefMutContainer<T> {}
unsafe impl<T: Sync> Sync for RefMutContainer<T> {}
| tokenizers/bindings/python/src/utils/mod.rs/0 | {
"file_path": "tokenizers/bindings/python/src/utils/mod.rs",
"repo_id": "tokenizers",
"token_count": 1057
} | 207 |
# Training from memory
In the [Quicktour](quicktour), we saw how to build and train a
tokenizer using text files, but we can actually use any Python Iterator.
In this section we'll see a few different ways of training our
tokenizer.
For all the examples listed below, we'll use the same [`~tokenizers.Tokenizer`] and
[`~tokenizers.trainers.Trainer`], built as
following:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START init_tokenizer_trainer",
"end-before": "END init_tokenizer_trainer",
"dedent": 8}
</literalinclude>
This tokenizer is based on the [`~tokenizers.models.Unigram`] model. It
takes care of normalizing the input using the NFKC Unicode normalization
method, and uses a [`~tokenizers.pre_tokenizers.ByteLevel`] pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check
[here](components).
## The most basic way
As you probably guessed already, the easiest way to train our tokenizer
is by using a `List`{.interpreted-text role="obj"}:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_basic",
"end-before": "END train_basic",
"dedent": 8}
</literalinclude>
Easy, right? You can use anything working as an iterator here, be it a
`List`{.interpreted-text role="obj"}, `Tuple`{.interpreted-text
role="obj"}, or a `np.Array`{.interpreted-text role="obj"}. Anything
works as long as it provides strings.
## Using the ๐ค Datasets library
An awesome way to access one of the many datasets that exist out there
is by using the ๐ค Datasets library. For more information about it, you
should check [the official documentation
here](https://huggingface.co/docs/datasets/).
Let's start by loading our dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START load_dataset",
"end-before": "END load_dataset",
"dedent": 8}
</literalinclude>
The next step is to build an iterator over this dataset. The easiest way
to do this is probably by using a generator:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START def_batch_iterator",
"end-before": "END def_batch_iterator",
"dedent": 8}
</literalinclude>
As you can see here, for improved efficiency we can actually provide a
batch of examples used to train, instead of iterating over them one by
one. By doing so, we can expect performances very similar to those we
got while training directly from files.
With our iterator ready, we just need to launch the training. In order
to improve the look of our progress bars, we can specify the total
length of the dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_datasets",
"end-before": "END train_datasets",
"dedent": 8}
</literalinclude>
And that's it!
## Using gzip files
Since gzip files in Python can be used as iterators, it is extremely
simple to train on such files:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START single_gzip",
"end-before": "END single_gzip",
"dedent": 8}
</literalinclude>
Now if we wanted to train from multiple gzip files, it wouldn't be much
harder:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START multi_gzip",
"end-before": "END multi_gzip",
"dedent": 8}
</literalinclude>
And voilร !
| tokenizers/docs/source-doc-builder/training_from_memory.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/training_from_memory.mdx",
"repo_id": "tokenizers",
"token_count": 1199
} | 208 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("./_ext"))
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "tokenizers"
copyright = "2020, huggingface"
author = "huggingface"
# The full version, including alpha/beta/rc tags
release = ""
# -- Custom information ------------------------------------------------------
# The possible values for languages (used by `_ext/entities`)
languages = ["node", "rust", "python"]
# This defines the version used to generate links to docs.rs
rust_version = "latest"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "entities", "rust_doc", "toctree_tags"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"analytics_id": "UA-83738774-2"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
def setup(app):
for language in languages:
if not tags.has(language):
exclude_patterns.append(f"tutorials/{language}/*")
app.add_css_file("css/huggingface.css")
app.add_css_file("css/code-snippets.css")
app.add_js_file("js/custom.js")
| tokenizers/docs/source/conf.py/0 | {
"file_path": "tokenizers/docs/source/conf.py",
"repo_id": "tokenizers",
"token_count": 781
} | 209 |
#[macro_use]
extern crate criterion;
mod common;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use criterion::Criterion;
use tokenizers::models::wordpiece::{WordPiece, WordPieceTrainerBuilder};
use tokenizers::normalizers::{BertNormalizer, NormalizerWrapper};
use tokenizers::pre_tokenizers::bert::BertPreTokenizer;
use tokenizers::processors::bert::BertProcessing;
use tokenizers::{decoders, EncodeInput, Model, TokenizerImpl};
use common::{iter_bench_encode, iter_bench_encode_batch, iter_bench_train};
use tokenizers::decoders::DecoderWrapper;
use tokenizers::pre_tokenizers::whitespace::Whitespace;
use tokenizers::processors::PostProcessorWrapper;
static BATCH_SIZE: usize = 1_000;
type BertTokenizer = TokenizerImpl<
WordPiece,
BertNormalizer,
BertPreTokenizer,
BertProcessing,
decoders::wordpiece::WordPiece,
>;
/// Resembling the BertTokenizer implementation from the Python bindings.
fn create_bert_tokenizer(wp: WordPiece) -> BertTokenizer {
let sep_id = *wp.get_vocab().get("[SEP]").unwrap();
let cls_id = *wp.get_vocab().get("[CLS]").unwrap();
let mut tokenizer = TokenizerImpl::new(wp);
tokenizer.with_pre_tokenizer(BertPreTokenizer);
tokenizer.with_normalizer(BertNormalizer::default());
tokenizer.with_decoder(decoders::wordpiece::WordPiece::default());
tokenizer.with_post_processor(BertProcessing::new(
("[SEP]".to_string(), sep_id),
("[CLS]".to_string(), cls_id),
));
tokenizer
}
pub fn bench_bert(c: &mut Criterion) {
let wp = WordPiece::from_file("data/bert-base-uncased-vocab.txt")
.build()
.unwrap();
let tokenizer = create_bert_tokenizer(wp);
let mut lines: Vec<EncodeInput> = vec![];
let mut batches: Vec<Vec<EncodeInput>> = vec![vec![]];
for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() {
let line: EncodeInput = line.unwrap().into();
lines.push(line.clone());
if batches.last().unwrap().len() >= BATCH_SIZE {
batches.push(vec![]);
}
batches.last_mut().unwrap().push(line);
}
c.bench_function("WordPiece BERT encode", |b| {
b.iter_custom(|iters| iter_bench_encode(iters, &tokenizer, &lines))
});
c.bench_function("WordPiece BERT encode batch", |b| {
b.iter_custom(|iters| iter_bench_encode_batch(iters, &tokenizer, &batches))
});
}
fn bench_train(c: &mut Criterion) {
let mut trainer = WordPieceTrainerBuilder::default()
.show_progress(false)
.build();
type Tok = TokenizerImpl<
WordPiece,
NormalizerWrapper,
Whitespace,
PostProcessorWrapper,
DecoderWrapper,
>;
let mut tokenizer = Tok::new(WordPiece::default());
tokenizer.with_pre_tokenizer(Whitespace {});
c.bench_function("WordPiece Train vocabulary (small)", |b| {
b.iter_custom(|iters| {
iter_bench_train(
iters,
&mut tokenizer,
&mut trainer,
vec!["data/small.txt".to_string()],
)
})
});
let mut tokenizer = Tok::new(WordPiece::default());
tokenizer.with_pre_tokenizer(Whitespace {});
c.bench_function("WordPiece Train vocabulary (big)", |b| {
b.iter_custom(|iters| {
iter_bench_train(
iters,
&mut tokenizer,
&mut trainer,
vec!["data/big.txt".to_string()],
)
})
});
}
criterion_group! {
name = bert_benches;
config = Criterion::default().sample_size(20);
targets = bench_bert
}
criterion_group! {
name = benches_train;
config = Criterion::default().sample_size(10);
targets = bench_train
}
criterion_main!(bert_benches, benches_train);
| tokenizers/tokenizers/benches/bert_benchmark.rs/0 | {
"file_path": "tokenizers/tokenizers/benches/bert_benchmark.rs",
"repo_id": "tokenizers",
"token_count": 1642
} | 210 |
use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize, Default)]
/// Strip is a simple trick which converts tokens looking like `<0x61>`
/// to pure bytes, and attempts to make them into a string. If the tokens
/// cannot be decoded you will get ๏ฟฝ instead for each inconvertable byte token
#[serde(tag = "type")]
#[non_exhaustive]
pub struct Strip {
pub content: char,
pub start: usize,
pub stop: usize,
}
impl Strip {
pub fn new(content: char, start: usize, stop: usize) -> Self {
Self {
content,
start,
stop,
}
}
}
impl Decoder for Strip {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
Ok(tokens
.into_iter()
.map(|token| {
let chars: Vec<char> = token.chars().collect();
let mut start_cut = 0;
for (i, &c) in chars.iter().enumerate().take(self.start) {
if c == self.content {
start_cut = i + 1;
continue;
} else {
break;
}
}
let mut stop_cut = chars.len();
for i in 0..self.stop {
let index = chars.len() - i - 1;
if chars[index] == self.content {
stop_cut = index;
continue;
} else {
break;
}
}
let new_token: String = chars[start_cut..stop_cut].iter().collect();
new_token
})
.collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn decode() {
let decoder = Strip::new('H', 1, 0);
let res = decoder
.decode_chain(vec!["Hey".into(), " friend!".into(), "HHH".into()])
.unwrap();
assert_eq!(res, vec!["ey", " friend!", "HH"]);
let decoder = Strip::new('y', 0, 1);
let res = decoder
.decode_chain(vec!["Hey".into(), " friend!".into()])
.unwrap();
assert_eq!(res, vec!["He", " friend!"]);
}
}
| tokenizers/tokenizers/src/decoders/strip.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/strip.rs",
"repo_id": "tokenizers",
"token_count": 1217
} | 211 |
use super::{super::OrderedVocabIter, WordLevel, WordLevelBuilder};
use serde::{
de::{MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashSet;
impl Serialize for WordLevel {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model = serializer.serialize_struct("WordLevel", 3)?;
let ordered_vocab = OrderedVocabIter::new(&self.vocab_r);
model.serialize_field("type", "WordLevel")?;
model.serialize_field("vocab", &ordered_vocab)?;
model.serialize_field("unk_token", &self.unk_token)?;
model.end()
}
}
impl<'de> Deserialize<'de> for WordLevel {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct(
"WordLevel",
&["type", "vocab", "unk_token"],
WordLevelVisitor,
)
}
}
struct WordLevelVisitor;
impl<'de> Visitor<'de> for WordLevelVisitor {
type Value = WordLevel;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct WordLevel")
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = WordLevelBuilder::new();
let mut missing_fields = vec![
// for retrocompatibility the "type" field is not mandatory
"unk_token",
"vocab",
]
.into_iter()
.collect::<HashSet<_>>();
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"vocab" => builder = builder.vocab(map.next_value()?),
"unk_token" => builder = builder.unk_token(map.next_value()?),
"type" => match map.next_value()? {
"WordLevel" => {}
u => {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(u),
&"WordLevel",
))
}
},
_ => {}
}
missing_fields.remove::<str>(&key);
}
if !missing_fields.is_empty() {
Err(serde::de::Error::missing_field(
missing_fields.iter().next().unwrap(),
))
} else {
Ok(builder.build().map_err(serde::de::Error::custom)?)
}
}
}
#[cfg(test)]
mod tests {
use crate::models::wordlevel::{Vocab, WordLevel, WordLevelBuilder};
#[test]
fn serde() {
let wl = WordLevel::default();
let wl_s = r#"{"type":"WordLevel","vocab":{},"unk_token":"<unk>"}"#;
assert_eq!(serde_json::to_string(&wl).unwrap(), wl_s);
assert_eq!(serde_json::from_str::<WordLevel>(wl_s).unwrap(), wl);
}
#[test]
fn incomplete_vocab() {
let vocab: Vocab = [("<unk>".into(), 0), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let wordlevel = WordLevelBuilder::default()
.vocab(vocab)
.unk_token("<unk>".to_string())
.build()
.unwrap();
let wl_s = r#"{"type":"WordLevel","vocab":{"<unk>":0,"b":2},"unk_token":"<unk>"}"#;
assert_eq!(serde_json::to_string(&wordlevel).unwrap(), wl_s);
assert_eq!(serde_json::from_str::<WordLevel>(wl_s).unwrap(), wordlevel);
}
#[test]
fn deserialization_should_fail() {
let missing_unk = r#"{"type":"WordLevel","vocab":{}}"#;
assert!(serde_json::from_str::<WordLevel>(missing_unk)
.unwrap_err()
.to_string()
.starts_with("missing field `unk_token`"));
let wrong_type = r#"{"type":"WordPiece","vocab":{}}"#;
assert!(serde_json::from_str::<WordLevel>(wrong_type)
.unwrap_err()
.to_string()
.starts_with("invalid value: string \"WordPiece\", expected WordLevel"));
}
}
| tokenizers/tokenizers/src/models/wordlevel/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordlevel/serialization.rs",
"repo_id": "tokenizers",
"token_count": 2084
} | 212 |
use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
/// Pre tokenizes the numbers into single tokens. If individual_digits is set
/// to true, then all digits are splitted into individual tokens.
#[non_exhaustive]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Digits {
pub individual_digits: bool,
}
impl Digits {
pub fn new(individual_digits: bool) -> Self {
Self { individual_digits }
}
}
impl Default for Digits {
fn default() -> Self {
Self::new(false)
}
}
impl PreTokenizer for Digits {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
if self.individual_digits {
pretokenized.split(|_, normalized| {
normalized.split(char::is_numeric, SplitDelimiterBehavior::Isolated)
})
} else {
pretokenized.split(|_, normalized| {
normalized.split(char::is_numeric, SplitDelimiterBehavior::Contiguous)
})
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn numbers() {
let pretok = Digits::new(false);
let mut pretokenized = PreTokenizedString::from("Hey 123 friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))]
);
}
#[test]
fn individual_digits() {
let pretok = Digits::new(true);
let mut pretokenized = PreTokenizedString::from("Hey 123 friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey ", (0, 4)),
("1", (4, 5)),
("2", (5, 6)),
("3", (6, 7)),
(" friend!", (7, 15))
]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey ", (0, 4)),
("1", (4, 5)),
("2", (5, 6)),
("3", (6, 7)),
(" friend!", (7, 15))
]
);
}
}
| tokenizers/tokenizers/src/pre_tokenizers/digits.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/digits.rs",
"repo_id": "tokenizers",
"token_count": 1667
} | 213 |
use crate::parallelism::*;
use crate::tokenizer::{Offsets, Token};
use crate::utils::padding::PaddingDirection;
use crate::utils::truncation::TruncationDirection;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::ops::Range;
/// Represents the output of a `Tokenizer`.
#[derive(Default, PartialEq, Debug, Clone, Serialize, Deserialize)]
pub struct Encoding {
/// IDs produced by the `Tokenizer`
ids: Vec<u32>,
/// Type of the IDs
type_ids: Vec<u32>,
/// Tokens associated to each ID
tokens: Vec<String>,
/// Indice of the word associated to each token/ID
words: Vec<Option<u32>>,
/// Offsets of the token/ID from the NormalizedString
offsets: Vec<Offsets>,
/// Mask identifying special tokens
special_tokens_mask: Vec<u32>,
/// Mask identifying padding tokens for the attention mechanism
attention_mask: Vec<u32>,
/// A list of overflowing Encoding generated when we got truncated
overflowing: Vec<Encoding>,
/// Ranges of tokens covered by each sequence. If this is empty we consider
/// there is only one sequence in this Encoding, and that it covers the entire range.
sequence_ranges: HashMap<usize, Range<usize>>,
}
impl Encoding {
#[allow(clippy::too_many_arguments)]
pub fn new(
ids: Vec<u32>,
type_ids: Vec<u32>,
tokens: Vec<String>,
words: Vec<Option<u32>>,
offsets: Vec<Offsets>,
special_tokens_mask: Vec<u32>,
attention_mask: Vec<u32>,
overflowing: Vec<Self>,
sequence_ranges: HashMap<usize, Range<usize>>,
) -> Self {
Self {
ids,
type_ids,
tokens,
words,
offsets,
special_tokens_mask,
attention_mask,
overflowing,
sequence_ranges,
}
}
pub fn with_capacity(len: usize) -> Self {
Self {
ids: Vec::with_capacity(len),
type_ids: Vec::with_capacity(len),
tokens: Vec::with_capacity(len),
words: Vec::with_capacity(len),
offsets: Vec::with_capacity(len),
special_tokens_mask: Vec::with_capacity(len),
attention_mask: Vec::with_capacity(len),
overflowing: vec![],
sequence_ranges: HashMap::new(),
}
}
pub fn from_tokens(tokens: Vec<Token>, type_id: u32) -> Self {
let length = tokens.len();
let (ids, tokens, offsets) = tokens.into_iter().fold(
(
Vec::with_capacity(length),
Vec::with_capacity(length),
Vec::with_capacity(length),
),
|(mut ids, mut tokens, mut offsets), t| {
ids.push(t.id);
tokens.push(t.value);
offsets.push(t.offsets);
(ids, tokens, offsets)
},
);
Self {
ids,
tokens,
offsets,
words: vec![None; length],
type_ids: vec![type_id; length],
attention_mask: vec![1; length],
special_tokens_mask: vec![0; length],
overflowing: vec![],
sequence_ranges: HashMap::new(),
}
}
/// Whether this Encoding is empty
pub fn is_empty(&self) -> bool {
self.ids.is_empty()
}
/// Return the total length of this Encoding
pub fn len(&self) -> usize {
self.ids.len()
}
/// Return the number of sequences combined in this Encoding
pub fn n_sequences(&self) -> usize {
if self.sequence_ranges.is_empty() {
1
} else {
self.sequence_ranges.len()
}
}
/// Set the given sequence id for the whole range of tokens contained in this Encoding
pub fn set_sequence_id(&mut self, sequence_id: usize) {
self.sequence_ranges.insert(sequence_id, 0..self.len());
}
pub fn get_tokens(&self) -> &[String] {
&self.tokens[..]
}
pub fn get_word_ids(&self) -> &[Option<u32>] {
&self.words
}
pub fn get_word_ids_mut(&mut self) -> &mut [Option<u32>] {
&mut self.words
}
pub fn get_sequence_ids(&self) -> Vec<Option<usize>> {
let mut sequences = vec![None; self.len()];
for seq_id in 0..self.n_sequences() {
let range = self.sequence_range(seq_id);
let seq_len = range.len();
sequences.splice(range, std::iter::repeat(Some(seq_id)).take(seq_len));
}
sequences
}
pub fn get_ids(&self) -> &[u32] {
&self.ids
}
pub fn get_type_ids(&self) -> &[u32] {
&self.type_ids
}
pub fn set_type_ids(&mut self, type_ids: Vec<u32>) {
self.type_ids = type_ids;
}
pub fn get_offsets(&self) -> &[Offsets] {
&self.offsets
}
pub fn get_offsets_mut(&mut self) -> &mut [Offsets] {
&mut self.offsets
}
pub fn get_special_tokens_mask(&self) -> &[u32] {
&self.special_tokens_mask
}
pub fn get_attention_mask(&self) -> &[u32] {
&self.attention_mask
}
pub fn get_overflowing(&self) -> &Vec<Encoding> {
&self.overflowing
}
pub fn set_overflowing(&mut self, overflowing: Vec<Encoding>) {
self.overflowing = overflowing;
}
pub fn get_overflowing_mut(&mut self) -> &mut Vec<Encoding> {
&mut self.overflowing
}
pub fn take_overflowing(&mut self) -> Vec<Encoding> {
std::mem::take(&mut self.overflowing)
}
pub(crate) fn process_tokens_with_offsets_mut<F>(&mut self, func: F)
where
F: FnMut((usize, (&String, &mut Offsets))),
{
self.tokens
.iter()
.zip(self.offsets.iter_mut())
.enumerate()
.for_each(func)
}
/// Returns the range to target to retrieve something (word_id, offsets, ..) related to the
/// given sequence id
fn sequence_range(&self, sequence_id: usize) -> Range<usize> {
self.sequence_ranges
.get(&sequence_id)
.cloned()
.unwrap_or(0..self.len())
}
/// Returns the index of the sequence containing the given token
pub fn token_to_sequence(&self, token: usize) -> Option<usize> {
if token > self.len() {
None
} else if self.sequence_ranges.is_empty() {
Some(0)
} else {
self.sequence_ranges.iter().find_map(|(seq_id, range)| {
if range.contains(&token) {
Some(*seq_id)
} else {
None
}
})
}
}
/// Get the encoded tokens corresponding to the word at the given index in the input sequence,
/// with the form (start_token, end_token + 1)
pub fn word_to_tokens(&self, word: u32, sequence_id: usize) -> Option<(usize, usize)> {
let (mut start, mut end) = (None, None);
let sequence_range = self.sequence_range(sequence_id);
self.words
.get(sequence_range.clone())?
.iter()
.enumerate()
.take_while(|(_, w)| **w <= Some(word))
.filter(|(_, w)| **w == Some(word))
.for_each(|(i, _)| {
if start.is_none() || Some(i) < start {
start = Some(i);
}
if end.is_none() || Some(i) >= end {
end = Some(i + 1);
}
});
if let (Some(start), Some(end)) = (start, end) {
Some((sequence_range.start + start, sequence_range.start + end))
} else {
None
}
}
/// Get the offsets of the word at the given index in the input sequence.
pub fn word_to_chars(&self, word: u32, sequence_id: usize) -> Option<Offsets> {
self.word_to_tokens(word, sequence_id)
.and_then(|(start, end)| {
if end == 0 {
None
} else {
Some((self.offsets[start].0, self.offsets[end - 1].1))
}
})
}
/// Get the offsets of the token at the given index.
pub fn token_to_chars(&self, token: usize) -> Option<(usize, Offsets)> {
Some((
self.token_to_sequence(token)?,
self.offsets.get(token).copied()?,
))
}
/// Get the word that contains the token at the given index.
pub fn token_to_word(&self, token: usize) -> Option<(usize, u32)> {
Some((
self.token_to_sequence(token)?,
self.words.get(token).copied().flatten()?,
))
}
/// Get the token that contains the given char.
pub fn char_to_token(&self, pos: usize, sequence_id: usize) -> Option<usize> {
let sequence_range = self.sequence_range(sequence_id);
self.offsets
.get(sequence_range.clone())?
.iter()
.position(|(start, end)| pos >= *start && pos < *end)
.map(|pos| sequence_range.start + pos)
}
/// Get the word that contains the given char.
pub fn char_to_word(&self, pos: usize, sequence_id: usize) -> Option<u32> {
Some(
self.char_to_token(pos, sequence_id)
.and_then(|token| self.token_to_word(token))?
.1,
)
}
/// Truncate the current `Encoding`.
///
/// Panics if `stride >= max_len`
pub fn truncate(&mut self, max_len: usize, stride: usize, direction: TruncationDirection) {
let encoding_len = self.ids.len();
if max_len >= encoding_len {
return;
}
if max_len == 0 {
let o = std::mem::replace(self, Encoding::with_capacity(0));
self.overflowing.push(o);
return;
}
assert!(stride < max_len, "`stride` must be strictly less than `max_len={}` (note that `max_len` may be shorter than the max length of the original model, as it subtracts the number of special characters", max_len);
// When truncating, we lose the `sequence_ranges` information.
self.sequence_ranges.clear();
let offset = max_len - stride;
let mut end = false;
let parts_ranges: Vec<(usize, usize)> = match direction {
TruncationDirection::Right => (0..encoding_len)
.step_by(offset)
.filter_map(|start| {
if !end {
let stop = std::cmp::min(start + max_len, encoding_len);
end = stop == encoding_len;
Some((start, stop))
} else {
None
}
})
.collect(),
TruncationDirection::Left => (0..encoding_len)
.rev()
.step_by(offset)
.filter_map(|stop| {
let stop = stop + 1;
let start = if stop < max_len { 0 } else { stop - max_len };
if start < stop && !end {
end = start == 0;
Some((start, stop))
} else {
None
}
})
.collect(),
};
let mut i = 0;
let (start, stop) = parts_ranges[i];
let mut new_encoding = Encoding {
ids: self.ids[start..stop].to_vec(),
type_ids: self.type_ids[start..stop].to_vec(),
tokens: self.tokens[start..stop].to_vec(),
words: self.words[start..stop].to_vec(),
offsets: self.offsets[start..stop].to_vec(),
special_tokens_mask: self.special_tokens_mask[start..stop].to_vec(),
attention_mask: self.attention_mask[start..stop].to_vec(),
overflowing: vec![],
sequence_ranges: HashMap::new(),
};
loop {
if i == parts_ranges.len() - 1 {
break;
}
i += 1;
let (start, stop) = parts_ranges[i];
new_encoding.overflowing.push(Encoding {
ids: self.ids[start..stop].to_vec(),
type_ids: self.type_ids[start..stop].to_vec(),
tokens: self.tokens[start..stop].to_vec(),
words: self.words[start..stop].to_vec(),
offsets: self.offsets[start..stop].to_vec(),
special_tokens_mask: self.special_tokens_mask[start..stop].to_vec(),
attention_mask: self.attention_mask[start..stop].to_vec(),
overflowing: vec![],
sequence_ranges: HashMap::new(),
});
}
*self = new_encoding;
}
/// Merge all Encodings together
pub fn merge<I: IntoIterator<Item = Encoding>>(encodings: I, growing_offsets: bool) -> Self {
let mut encoding = Encoding::default();
// TODO this is suboptimal as we're doing this iteratively instead of preallocating
// all the encodings sizes all at once and only copying into this preallocated vector
// https://github.com/huggingface/tokenizers/pull/1049
// In order to fix, we just need to preallocate all vectors, then copy everything
// into it (and deal with overlowings correctly)
for sub in encodings {
encoding.merge_with(sub, growing_offsets);
}
encoding
}
/// Merge ourself with the given `Encoding`. Happens in place.
pub fn merge_with(&mut self, pair: Encoding, growing_offsets: bool) {
// Handle merging the overflowing parts too: Combine them all
// In most of the cases, we expect `pair.overflowing.len() == 0`
let mut overflowings = vec![];
// 1. All our overflowings with all the others
for self_o in &self.overflowing {
// 1. The pair itself
let mut n_encoding = self_o.clone();
n_encoding.merge_with(pair.clone(), growing_offsets);
overflowings.push(n_encoding);
// 2. Its overflowings (this should rarely happen...)
for other_o in &pair.overflowing {
let mut n_encoding = self_o.clone();
n_encoding.merge_with(other_o.clone(), growing_offsets);
overflowings.push(n_encoding);
}
}
// 2. Ourself with all the other overflowings (this should rarely happen too...)
for other_o in &pair.overflowing {
let mut n_encoding = self.clone();
n_encoding.merge_with(other_o.clone(), growing_offsets);
overflowings.push(n_encoding);
}
// Finish by merging ourself with the other encoding
let original_self_len = self.len(); // Must be before any modification to self.ids
self.sequence_ranges
.extend(pair.sequence_ranges.into_iter().map(|(seq_id, range)| {
(
seq_id,
original_self_len + range.start..original_self_len + range.end,
)
}));
self.ids.extend(pair.ids);
self.type_ids.extend(pair.type_ids);
self.tokens.extend(pair.tokens);
self.words.extend(pair.words);
let starting_offset = if growing_offsets {
self.offsets.last().map_or(0, |o| o.1)
} else {
0
};
self.offsets.extend(
pair.offsets
.into_iter()
.map(|(start, end)| (start + starting_offset, end + starting_offset))
.collect::<Vec<_>>(),
);
self.special_tokens_mask.extend(pair.special_tokens_mask);
self.attention_mask.extend(pair.attention_mask);
self.overflowing = overflowings;
}
pub fn pad(
&mut self,
target_length: usize,
pad_id: u32,
pad_type_id: u32,
pad_token: &str,
direction: PaddingDirection,
) {
// Dispatch call to all the overflowings first
self.overflowing.maybe_par_iter_mut().for_each(|encoding| {
encoding.pad(target_length, pad_id, pad_type_id, pad_token, direction)
});
// Then check if we should pad ourself
if self.ids.len() >= target_length {
// We just do nothing if the wanted padding length is smaller than us
return;
}
let pad_length = target_length - self.ids.len();
match direction {
PaddingDirection::Left => {
self.ids = (0..pad_length)
.map(|_| pad_id)
.chain(self.ids.drain(..))
.collect();
self.type_ids = (0..pad_length)
.map(|_| pad_type_id)
.chain(self.type_ids.drain(..))
.collect();
self.tokens = (0..pad_length)
.map(|_| pad_token.to_owned())
.chain(self.tokens.drain(..))
.collect();
self.words = (0..pad_length)
.map(|_| None)
.chain(self.words.drain(..))
.collect();
self.attention_mask = (0..pad_length)
.map(|_| 0)
.chain(self.attention_mask.drain(..))
.collect();
self.special_tokens_mask = (0..pad_length)
.map(|_| 1)
.chain(self.special_tokens_mask.drain(..))
.collect();
self.offsets = (0..pad_length)
.map(|_| (0, 0))
.chain(self.offsets.drain(..))
.collect();
self.sequence_ranges
.iter_mut()
.for_each(|(_seq_id, range)| {
*range = (range.start + pad_length)..(range.end + pad_length)
});
}
PaddingDirection::Right => {
self.ids.extend((0..pad_length).map(|_| pad_id));
self.type_ids.extend((0..pad_length).map(|_| pad_type_id));
self.tokens
.extend((0..pad_length).map(|_| pad_token.to_owned()));
self.words.extend((0..pad_length).map(|_| None));
self.attention_mask.extend((0..pad_length).map(|_| 0));
self.special_tokens_mask.extend((0..pad_length).map(|_| 1));
self.offsets.extend((0..pad_length).map(|_| (0, 0)));
}
}
}
}
impl std::iter::FromIterator<Encoding> for Encoding {
fn from_iter<I: IntoIterator<Item = Encoding>>(iter: I) -> Self {
Self::merge(iter, false)
}
}
impl std::iter::FromIterator<(u32, String, (usize, usize), Option<u32>, u32)> for Encoding {
fn from_iter<I: IntoIterator<Item = (u32, String, (usize, usize), Option<u32>, u32)>>(
iter: I,
) -> Self {
let items = iter.into_iter();
let (lower, upper) = items.size_hint();
let length = upper.unwrap_or(lower);
let mut encoding = Self::with_capacity(length);
for (id, token, offsets, word, type_id) in items {
encoding.ids.push(id);
encoding.tokens.push(token);
encoding.offsets.push(offsets);
encoding.type_ids.push(type_id);
encoding.words.push(word);
encoding.special_tokens_mask.push(0);
encoding.attention_mask.push(1);
}
encoding
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::iter::FromIterator;
#[test]
fn merge_encodings() {
let mut a = Encoding {
ids: vec![1],
type_ids: vec![0],
tokens: vec![String::from("Hello ")],
words: vec![Some(0)],
offsets: vec![(0, 6)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
..Default::default()
};
let b = Encoding {
ids: vec![2],
type_ids: vec![1],
tokens: vec![String::from("World!")],
words: vec![Some(0)],
offsets: vec![(0, 6)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
..Default::default()
};
a.merge_with(b, true);
assert_eq!(
a,
Encoding {
ids: vec![1, 2],
type_ids: vec![0, 1],
tokens: vec![String::from("Hello "), String::from("World!")],
words: vec![Some(0), Some(0)],
offsets: vec![(0, 6), (6, 12)],
special_tokens_mask: vec![0, 0],
attention_mask: vec![1, 1],
..Default::default()
}
);
}
#[test]
fn truncate() {
let mut a = Encoding {
ids: vec![1, 2, 3],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("Hello"),
String::from("World"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2)],
offsets: vec![(0, 5), (6, 11), (11, 12)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
..Default::default()
};
a.truncate(2, 0, TruncationDirection::Right);
assert_eq!(
a,
Encoding {
ids: vec![1, 2],
type_ids: vec![0, 0],
tokens: vec![String::from("Hello"), String::from("World")],
words: vec![Some(0), Some(1)],
offsets: vec![(0, 5), (6, 11)],
special_tokens_mask: vec![0, 0],
attention_mask: vec![1, 1],
overflowing: vec![Encoding {
ids: vec![3],
type_ids: vec![0],
tokens: vec![String::from("!")],
words: vec![Some(2)],
offsets: vec![(11, 12)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
..Default::default()
}],
..Default::default()
}
);
}
#[test]
fn truncate_to_empty() {
let mut a = Encoding {
ids: vec![1, 2, 3],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("Hello"),
String::from("World"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2)],
offsets: vec![(0, 5), (6, 11), (11, 12)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
..Default::default()
};
a.truncate(0, 0, TruncationDirection::Right);
assert_eq!(
a,
Encoding {
overflowing: vec![Encoding {
ids: vec![1, 2, 3],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("Hello"),
String::from("World"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2)],
offsets: vec![(0, 5), (6, 11), (11, 12)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
overflowing: vec![],
..Default::default()
}],
..Default::default()
}
);
}
#[test]
fn truncate_overflow_with_stride() {
let mut enc = Encoding {
ids: vec![1, 2, 3, 4, 5],
type_ids: vec![0, 0, 0, 0, 0],
tokens: vec![
String::from("42"),
String::from("is"),
String::from("the"),
String::from("answer"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2), Some(3), Some(4)],
offsets: vec![(0, 2), (2, 4), (4, 7), (7, 13), (13, 14)],
special_tokens_mask: vec![0, 0, 0, 0, 0],
attention_mask: vec![1, 1, 1, 1, 1],
overflowing: vec![],
..Default::default()
};
enc.truncate(4, 2, TruncationDirection::Right);
assert_eq!(
enc,
Encoding {
ids: vec![1, 2, 3, 4],
type_ids: vec![0, 0, 0, 0],
tokens: vec![
String::from("42"),
String::from("is"),
String::from("the"),
String::from("answer"),
],
words: vec![Some(0), Some(1), Some(2), Some(3)],
offsets: vec![(0, 2), (2, 4), (4, 7), (7, 13)],
special_tokens_mask: vec![0, 0, 0, 0],
attention_mask: vec![1, 1, 1, 1],
overflowing: vec![Encoding {
ids: vec![3, 4, 5],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("the"),
String::from("answer"),
String::from("!"),
],
words: vec![Some(2), Some(3), Some(4)],
offsets: vec![(4, 7), (7, 13), (13, 14)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
overflowing: vec![],
..Default::default()
}],
..Default::default()
}
);
}
#[test]
fn truncate_left() {
let mut a = Encoding {
ids: vec![1, 2, 3],
type_ids: vec![0, 0, 0],
tokens: vec![
String::from("Hello"),
String::from("World"),
String::from("!"),
],
words: vec![Some(0), Some(1), Some(2)],
offsets: vec![(0, 5), (6, 11), (11, 12)],
special_tokens_mask: vec![0, 0, 0],
attention_mask: vec![1, 1, 1],
..Default::default()
};
a.truncate(2, 0, TruncationDirection::Left);
assert_eq!(
a,
Encoding {
ids: vec![2, 3],
type_ids: vec![0, 0],
tokens: vec![String::from("World"), String::from("!")],
words: vec![Some(1), Some(2)],
offsets: vec![(6, 11), (11, 12)],
special_tokens_mask: vec![0, 0],
attention_mask: vec![1, 1],
overflowing: vec![Encoding {
ids: vec![1],
type_ids: vec![0],
tokens: vec![String::from("Hello")],
words: vec![Some(0)],
offsets: vec![(0, 5)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
..Default::default()
}],
..Default::default()
}
);
}
#[test]
fn mappings() {
let encoding = Encoding {
ids: vec![0; 11], // Needed for Encoding::len
tokens: vec![
// First sequence:
"He".into(),
"llo".into(),
"won".into(),
"der".into(),
"ful".into(),
"friend".into(),
"!".into(),
// Second sequence:
"How".into(),
"are".into(),
"you".into(),
"?".into(),
],
offsets: vec![
// First sequence:
(0, 2),
(2, 5),
(7, 10),
(10, 13),
(13, 16),
(17, 23),
(23, 24),
// Second sequence:
(0, 3),
(4, 7),
(8, 11),
(11, 12),
],
words: vec![
// First sequence:
Some(0),
Some(0),
Some(1),
Some(1),
Some(1),
Some(2),
Some(3),
// Second sequence:
Some(0),
Some(1),
Some(2),
Some(3),
],
sequence_ranges: HashMap::from_iter(vec![(0, 0..7), (1, 7..11)]),
..Default::default()
};
assert_eq!(encoding.word_to_tokens(0, 0), Some((0, 2)));
assert_eq!(encoding.word_to_tokens(1, 0), Some((2, 5)));
assert_eq!(encoding.word_to_tokens(2, 0), Some((5, 6)));
assert_eq!(encoding.word_to_tokens(3, 0), Some((6, 7)));
assert_eq!(encoding.word_to_tokens(0, 1), Some((7, 8)));
assert_eq!(encoding.word_to_tokens(1, 1), Some((8, 9)));
assert_eq!(encoding.word_to_tokens(2, 1), Some((9, 10)));
assert_eq!(encoding.word_to_tokens(3, 1), Some((10, 11)));
assert_eq!(encoding.word_to_chars(0, 0), Some((0, 5)));
assert_eq!(encoding.word_to_chars(1, 0), Some((7, 16)));
assert_eq!(encoding.word_to_chars(0, 1), Some((0, 3)));
assert_eq!(encoding.word_to_chars(1, 1), Some((4, 7)));
assert_eq!(encoding.token_to_chars(0), Some((0, (0, 2))));
assert_eq!(encoding.token_to_chars(1), Some((0, (2, 5))));
assert_eq!(encoding.token_to_chars(7), Some((1, (0, 3))));
assert_eq!(encoding.token_to_chars(9), Some((1, (8, 11))));
assert_eq!(encoding.token_to_word(1), Some((0, 0)));
assert_eq!(encoding.token_to_word(2), Some((0, 1)));
assert_eq!(encoding.token_to_word(7), Some((1, 0)));
assert_eq!(encoding.token_to_word(9), Some((1, 2)));
assert_eq!(encoding.token_to_word(11), None);
assert_eq!(encoding.char_to_token(3, 0), Some(1));
assert_eq!(encoding.char_to_token(8, 0), Some(2));
assert_eq!(encoding.char_to_token(16, 0), None);
assert_eq!(encoding.char_to_token(23, 0), Some(6));
assert_eq!(encoding.char_to_token(2, 1), Some(7));
assert_eq!(encoding.char_to_token(9, 1), Some(9));
assert_eq!(encoding.char_to_word(3, 0), Some(0));
assert_eq!(encoding.char_to_word(8, 0), Some(1));
assert_eq!(encoding.char_to_word(16, 0), None);
assert_eq!(encoding.char_to_word(23, 0), Some(3));
assert_eq!(encoding.char_to_word(2, 1), Some(0));
assert_eq!(encoding.char_to_word(9, 1), Some(2));
}
#[test]
fn padding() {
let mut a = Encoding {
ids: vec![1],
type_ids: vec![0],
tokens: vec![String::from("Hello ")],
words: vec![Some(0)],
offsets: vec![(0, 6)],
special_tokens_mask: vec![0],
attention_mask: vec![1],
sequence_ranges: HashMap::from([(0, 0..1)]),
..Default::default()
};
let target_length = 2;
let pad_id = 99;
let pad_type_id = 0;
let pad_token = "[PAD]";
a.pad(
target_length,
pad_id,
pad_type_id,
pad_token,
PaddingDirection::Left,
);
assert_eq!(a.sequence_ranges, HashMap::from([(0, 1..2)]));
}
}
| tokenizers/tokenizers/src/tokenizer/encoding.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/encoding.rs",
"repo_id": "tokenizers",
"token_count": 17197
} | 214 |
mod common;
use common::*;
use tokenizers::tokenizer::AddedToken;
#[test]
fn add_tokens() {
let mut tokenizer = get_empty();
assert_eq!(
tokenizer.add_special_tokens(&[
AddedToken::from("<cls>", true),
AddedToken::from("<sep>", true)
]),
2
);
assert_eq!(tokenizer.token_to_id("<cls>"), Some(0));
assert_eq!(tokenizer.token_to_id("<sep>"), Some(1));
assert_eq!(
tokenizer.add_tokens(&[
AddedToken::from("hello", false),
AddedToken::from("world", false)
]),
2
);
assert_eq!(tokenizer.token_to_id("hello"), Some(2));
assert_eq!(tokenizer.token_to_id("world"), Some(3));
}
#[test]
fn lstrip_tokens() {
let mut tokenizer = get_byte_level(true, false);
tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).lstrip(true)]);
let input = "I saw a <mask> ๐บ";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(
output.get_tokens(),
&["ฤ I", "ฤ saw", "ฤ a", " <mask>", "ฤ รฐลฤบ", "ยบ"]
);
assert_eq!(
output.get_offsets(),
&[(0, 1), (1, 5), (5, 7), (7, 14), (14, 19), (15, 19)]
);
}
#[test]
fn rstrip_tokens() {
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).rstrip(true)]);
let input = "I saw a <mask> ๐บ";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(
output.get_tokens(),
&["I", "ฤ saw", "ฤ a", "ฤ ", "<mask> ", "รฐลฤบ", "ยบ"]
);
// When `add_prefix_space = true` rstrip cannot work as a prefix space is added
// to the next token
let mut tokenizer = get_byte_level(true, false);
tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).rstrip(true)]);
let input = "I saw a <mask> ๐บ";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(
output.get_tokens(),
&["ฤ I", "ฤ saw", "ฤ a", "ฤ ", "<mask> ", "ฤ รฐลฤบ", "ยบ"]
);
}
#[test]
fn single_word_tokens() {
// If `single_word = true` it shouldn't split `dancing`
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("ing", true).single_word(true)]);
let input = "I like dancing";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(output.get_tokens(), &["I", "ฤ like", "ฤ dancing"]);
// If `single_word = false` it should split `dancing`
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("ing", true).single_word(false)]);
let input = "I like dancing";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(output.get_tokens(), &["I", "ฤ like", "ฤ d", "anc", "ing"]);
}
#[test]
fn overlapping_tokens() {
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("danc", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("nci", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("ing", true)]);
let input = "I like dancing";
let output = tokenizer.encode(input, false).unwrap();
assert_eq!(output.get_tokens(), &["I", "ฤ like", "ฤ ", "danc", "ing"]);
let mut tokenizer = get_byte_level(false, false);
tokenizer.add_special_tokens(&[AddedToken::from("nci", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("danc", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("ing", true)]);
tokenizer.add_special_tokens(&[AddedToken::from("ike", true)]);
let output = tokenizer.encode(input, false).unwrap();
// Breaking change but following `transformers` breaking change.
// This behavior is deemed not used in practice:
// https://github.com/huggingface/transformers/pull/13220
// Order does NOT matter. (We could make it work again but the trie
// would need to keep insertion order too)
//
// assert_eq!(output.get_tokens(), &["I", "ฤ like", "ฤ da", "nci", "ng"]);
assert_eq!(output.get_tokens(), &["I", "ฤ l", "ike", "ฤ ", "danc", "ing"]);
}
| tokenizers/tokenizers/tests/added_tokens.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/added_tokens.rs",
"repo_id": "tokenizers",
"token_count": 1770
} | 215 |
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
LABEL maintainer="Hugging Face"
ARG DEBIAN_FRONTEND=noninteractive
# Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands)
SHELL ["sh", "-lc"]
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
# to be used as arguments for docker build (so far).
ARG PYTORCH='2.1.1'
# (not always a valid torch version)
ARG INTEL_TORCH_EXT='2.1.100'
# Example: `cu102`, `cu113`, etc.
ARG CUDA='cu118'
RUN apt update
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs
RUN git lfs install
RUN python3 -m pip install --no-cache-dir --upgrade pip
ARG REF=main
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
# TODO: Handle these in a python utility script
RUN [ ${#PYTORCH} -gt 0 -a "$PYTORCH" != "pre" ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile
RUN echo torch=$VERSION
# `torchvision` and `torchaudio` should be installed along with `torch`, especially for nightly build.
# Currently, let's just use their latest releases (when `torch` is installed with a release version)
# TODO: We might need to specify proper versions that work with a specific torch version (especially for past CI).
RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA
RUN python3 -m pip install --no-cache-dir -U tensorflow==2.13 protobuf==3.20.3 tensorflow_text tensorflow_probability
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime]
RUN python3 -m pip uninstall -y flax jax
RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT -f https://developer.intel.com/ipex-whl-stable-cpu
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract
RUN python3 -m pip install -U "itsdangerous<2.1.0"
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/peft@main#egg=peft
# Add bitsandbytes for mixed int8 testing
RUN python3 -m pip install --no-cache-dir bitsandbytes
# Add auto-gptq for gtpq quantization testing
RUN python3 -m pip install --no-cache-dir auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
# Add einops for additional model testing
RUN python3 -m pip install --no-cache-dir einops
# Add autoawq for quantization testing
RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.1.8/autoawq-0.1.8+cu118-cp38-cp38-linux_x86_64.whl
# For bettertransformer + gptq
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum
# For video model testing
RUN python3 -m pip install --no-cache-dir decord av==9.2.0
# For `dinat` model
RUN python3 -m pip install --no-cache-dir 'natten<0.15.0' -f https://shi-labs.com/natten/wheels/$CUDA/
# For `nougat` tokenizer
RUN python3 -m pip install --no-cache-dir python-Levenshtein
# When installing in editable mode, `transformers` is not recognized as a package.
# this line must be added in order for python to be aware of transformers.
RUN cd transformers && python3 setup.py develop
| transformers/docker/transformers-all-latest-gpu/Dockerfile/0 | {
"file_path": "transformers/docker/transformers-all-latest-gpu/Dockerfile",
"repo_id": "transformers",
"token_count": 1267
} | 216 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Schnellstart
[[open-in-colab]]
Mit ๐ค Transformers kรถnnen Sie sofort loslegen! Verwenden Sie die [`pipeline`] fรผr schnelle Inferenz und laden Sie schnell ein vortrainiertes Modell und einen Tokenizer mit einer [AutoClass](./model_doc/auto), um Ihre Text-, Bild- oder Audioaufgabe zu lรถsen.
<Tip>
Alle in der Dokumentation vorgestellten Codebeispiele haben oben links einen Umschalter fรผr PyTorch und TensorFlow. Wenn
nicht, wird erwartet, dass der Code fรผr beide Backends ohne รnderungen funktioniert.
</Tip>
## Pipeline
[`pipeline`] ist der einfachste Weg, ein vortrainiertes Modell fรผr eine bestimmte Aufgabe zu verwenden.
<Youtube id="tiZFewofSLM"/>
Die [`pipeline`] unterstรผtzt viele gรคngige Aufgaben:
**Text**:
* Stimmungsanalyse: Klassifizierung der Polaritรคt eines gegebenen Textes.
* Textgenerierung (auf Englisch): Generierung von Text aus einer gegebenen Eingabe.
* Name-Entity-Recognition (NER): Kennzeichnung jedes Worts mit der Entitรคt, die es reprรคsentiert (Person, Datum, Ort usw.).
* Beantwortung von Fragen: Extrahieren der Antwort aus dem Kontext, wenn ein gewisser Kontext und eine Frage gegeben sind.
* Fill-mask: Ausfรผllen von Lรผcken in einem Text mit maskierten Wรถrtern.
* Zusammenfassung: Erstellung einer Zusammenfassung einer langen Text- oder Dokumentensequenz.
* รbersetzung: รbersetzen eines Textes in eine andere Sprache.
* Merkmalsextraktion: Erstellen einer Tensordarstellung des Textes.
**Bild**:
* Bildklassifizierung: Klassifizierung eines Bildes.
* Bildsegmentierung: Klassifizierung jedes Pixels in einem Bild.
* Objekterkennung: Erkennen von Objekten innerhalb eines Bildes.
**Audio**:
* Audioklassifizierung: Zuweisung eines Labels zu einem bestimmten Audiosegment.
* Automatische Spracherkennung (ASR): Transkription von Audiodaten in Text.
<Tip>
Fรผr mehr Details รผber die [`pipeline`] und assoziierte Aufgaben, schauen Sie in die Dokumentation [hier](./main_classes/pipelines).
</Tip>
### Verwendung der Pipeline
Im folgenden Beispiel werden Sie die [`pipeline`] fรผr die Stimmungsanalyse verwenden.
Installieren Sie die folgenden Abhรคngigkeiten, falls Sie dies nicht bereits getan haben:
<frameworkcontent>
<pt>
```bash
pip install torch
```
</pt>
<tf>
```bash
pip install tensorflow
```
</tf>
</frameworkcontent>
Importieren sie die [`pipeline`] und spezifizieren sie die Aufgabe, welche sie lรถsen mรถchten:
```py
>>> from transformers import pipeline
>>> classifier = pipeline("sentiment-analysis")
```
Die Pipeline lรคdt ein standardmรครiges [vortrainiertes Modell] (https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) und einen Tokenizer fรผr die Stimmungs-Analyse herunter und speichert sie. Jetzt kรถnnen Sie den "Klassifikator" auf Ihren Zieltext anwenden:
```py
>>> classifier("We are very happy to show you the ๐ค Transformers library.")
[{'label': 'POSITIVE', 'score': 0.9998}]
```
For more than one sentence, pass a list of sentences to the [`pipeline`] which returns a list of dictionaries:
```py
>>> results = classifier(["We are very happy to show you the ๐ค Transformers library.", "We hope you don't hate it."])
>>> for result in results:
... print(f"label: {result['label']}, with score: {round(result['score'], 4)}")
label: POSITIVE, with score: 0.9998
label: NEGATIVE, with score: 0.5309
```
Die [`pipeline`] kann auch รผber einen ganzen Datensatz iterieren. Starten wir mit der Installation der [๐ค Datasets](https://huggingface.co/docs/datasets/) Bibliothek:
```bash
pip install datasets
```
Erstellen wir eine [`pipeline`] mit der Aufgabe die wir lรถsen und dem Modell welches wir nutzen mรถchten.
```py
>>> import torch
>>> from transformers import pipeline
>>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")
```
Als nรคchstes laden wir den Datensatz (siehe ๐ค Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) fรผr mehr Details) welches wir nutzen mรถchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz:
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT
```
Wir mรผssen sicherstellen, dass die Abtastrate des Datensatzes der Abtastrate entspricht, mit der `facebook/wav2vec2-base-960h` trainiert wurde.
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate))
```
Audiodateien werden automatisch geladen und neu abgetastet, wenn die Spalte "audio" aufgerufen wird.
Extrahieren wir die rohen Wellenform-Arrays der ersten 4 Beispiele und รผbergeben wir sie als Liste an die Pipeline:
```py
>>> result = speech_recognizer(dataset[:4]["audio"])
>>> print([d["text"] for d in result])
['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT']
```
Bei einem grรถรeren Datensatz mit vielen Eingaben (wie bei Sprache oder Bildverarbeitung) sollten Sie einen Generator anstelle einer Liste รผbergeben, der alle Eingaben in den Speicher lรคdt. Weitere Informationen finden Sie in der [Pipeline-Dokumentation](./main_classes/pipelines).
### Ein anderes Modell und einen anderen Tokenizer in der Pipeline verwenden
Die [`pipeline`] kann jedes Modell aus dem [Model Hub] (https://huggingface.co/models) verwenden, wodurch es einfach ist, die [`pipeline`] fรผr andere Anwendungsfรคlle anzupassen. Wenn Sie beispielsweise ein Modell wรผnschen, das franzรถsischen Text verarbeiten kann, verwenden Sie die Tags im Model Hub, um nach einem geeigneten Modell zu filtern. Das oberste gefilterte Ergebnis liefert ein mehrsprachiges [BERT-Modell](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment), das auf die Stimmungsanalyse abgestimmt ist. Groรartig, verwenden wir dieses Modell!
```py
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
```
<frameworkcontent>
<pt>
Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` below):
```py
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
>>> model = AutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
</pt>
<tf>
Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` below):
```py
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
>>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
</tf>
</frameworkcontent>
Dann kรถnnen Sie das Modell und den Tokenizer in der [`pipeline`] angeben und den `Klassifikator` auf Ihren Zieltext anwenden:
```py
>>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
>>> classifier("Nous sommes trรจs heureux de vous prรฉsenter la bibliothรจque ๐ค Transformers.")
[{'label': '5 stars', 'score': 0.7273}]
```
Wenn Sie kein Modell fรผr Ihren Anwendungsfall finden kรถnnen, mรผssen Sie ein vortrainiertes Modell auf Ihren Daten feinabstimmen. Schauen Sie sich unser [Feinabstimmungs-Tutorial](./training) an, um zu erfahren, wie das geht. Und schlieรlich, nachdem Sie Ihr trainiertes Modell verfeinert haben, sollten Sie es mit der Community im Model Hub teilen (siehe Tutorial [hier](./model_sharing)), um NLP fรผr alle zu demokratisieren! ๐ค
## AutoClass
<Youtube id="AhChOFRegn4"/>
Unter der Haube arbeiten die Klassen [`AutoModelForSequenceClassification`] und [`AutoTokenizer`] zusammen, um die [`pipeline`] zu betreiben. Eine [`AutoClass`](./model_doc/auto) ist eine Abkรผrzung, die automatisch die Architektur eines trainierten Modells aus dessen Namen oder Pfad abruft. Sie mรผssen nur die passende `AutoClass` fรผr Ihre Aufgabe und den zugehรถrigen Tokenizer mit [`AutoTokenizer`] auswรคhlen.
Kehren wir zu unserem Beispiel zurรผck und sehen wir uns an, wie Sie die `AutoClass` verwenden kรถnnen, um die Ergebnisse der [`pipeline`] zu replizieren.
### AutoTokenizer
Ein Tokenizer ist fรผr die Vorverarbeitung von Text in ein fรผr das Modell verstรคndliches Format zustรคndig. Zunรคchst zerlegt der Tokenisierer den Text in Wรถrter, die *Token* genannt werden. Es gibt mehrere Regeln fรผr den Tokenisierungsprozess, z. B. wie und auf welcher Ebene ein Wort aufgespalten wird (weitere Informationen รผber Tokenisierung [hier](./tokenizer_summary)). Das Wichtigste ist jedoch, dass Sie den Tokenizer mit demselben Modellnamen instanziieren mรผssen, um sicherzustellen, dass Sie dieselben Tokenisierungsregeln verwenden, mit denen ein Modell zuvor trainiert wurde.
Laden sie einen Tokenizer mit [`AutoTokenizer`]:
```py
>>> from transformers import AutoTokenizer
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
Anschlieรend wandelt der Tokenizer die Token in Zahlen um, um einen Tensor als Eingabe fรผr das Modell zu konstruieren. Dieser wird als *Vokabular* des Modells bezeichnet.
รbergeben Sie Ihren Text an den Tokenizer:
```py
>>> encoding = tokenizer("We are very happy to show you the ๐ค Transformers library.")
>>> print(encoding)
{'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
Der Tokenizer gibt ein Wรถrterbuch zurรผck, das Folgendes enthรคlt:
* [input_ids](./glossary#input-ids): numerische Reprรคsentationen Ihrer Token.
* [atttention_mask](.glossary#attention-mask): gibt an, welche Token beachtet werden sollen.
Genau wie die [`pipeline`] akzeptiert der Tokenizer eine Liste von Eingaben. Darรผber hinaus kann der Tokenizer den Text auch auffรผllen und kรผrzen, um einen Stapel mit einheitlicher Lรคnge zurรผckzugeben:
<frameworkcontent>
<pt>
```py
>>> pt_batch = tokenizer(
... ["We are very happy to show you the ๐ค Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="pt",
... )
```
</pt>
<tf>
```py
>>> tf_batch = tokenizer(
... ["We are very happy to show you the ๐ค Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="tf",
... )
```
</tf>
</frameworkcontent>
Lesen Sie das Tutorial [preprocessing](./preprocessing) fรผr weitere Details zur Tokenisierung.
### AutoModel
<frameworkcontent>
<pt>
๐ค Transformers bietet eine einfache und einheitliche Mรถglichkeit, vortrainierte Instanzen zu laden. Das bedeutet, dass Sie ein [`AutoModel`] laden kรถnnen, wie Sie einen [`AutoTokenizer`] laden wรผrden. Der einzige Unterschied ist die Auswahl des richtigen [`AutoModel`] fรผr die Aufgabe. Da Sie eine Text- oder Sequenzklassifizierung vornehmen, laden Sie [`AutoModelForSequenceClassification`]:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse fรผr welche Aufgabe zu verwenden ist.
</Tip>
Jetzt kรถnnen Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell รผbergeben. Sie mรผssen nur das Wรถrterbuch entpacken, indem Sie `**` hinzufรผgen:
```py
>>> pt_outputs = pt_model(**pt_batch)
```
Das Modell gibt die endgรผltigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten:
```py
>>> from torch import nn
>>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1)
>>> print(pt_predictions)
tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725],
[0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>)
```
</pt>
<tf>
๐ค Transformers bietet eine einfache und einheitliche Methode zum Laden von vortrainierten Instanzen. Das bedeutet, dass Sie ein [`TFAutoModel`] genauso laden kรถnnen, wie Sie einen [`AutoTokenizer`] laden wรผrden. Der einzige Unterschied ist die Auswahl des richtigen [`TFAutoModel`] fรผr die Aufgabe. Da Sie Text - oder Sequenz - Klassifizierung machen, laden Sie [`TFAutoModelForSequenceClassification`]:
```py
>>> from transformers import TFAutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse fรผr welche Aufgabe zu verwenden ist.
</Tip>
Jetzt kรถnnen Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell รผbergeben, indem Sie die Wรถrterbuchschlรผssel direkt an die Tensoren รผbergeben:
```py
>>> tf_outputs = tf_model(tf_batch)
```
Das Modell gibt die endgรผltigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten:
```py
>>> import tensorflow as tf
>>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
>>> tf_predictions # doctest: +IGNORE_RESULT
```
</tf>
</frameworkcontent>
<Tip>
Alle ๐ค Transformers-Modelle (PyTorch oder TensorFlow) geben die Tensoren *vor* der endgรผltigen Aktivierungsfunktion
Funktion (wie Softmax) aus, da die endgรผltige Aktivierungsfunktion oft mit dem Verlusten verschmolzen ist.
</Tip>
Modelle sind ein standardmรครiges [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) oder ein [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model), sodass Sie sie in Ihrer รผblichen Trainingsschleife verwenden kรถnnen. Um jedoch die Dinge einfacher zu machen, bietet ๐ค Transformers eine [`Trainer`]-Klasse fรผr PyTorch, die Funktionalitรคt fรผr verteiltes Training, gemischte Prรคzision und mehr bietet. Fรผr TensorFlow kรถnnen Sie die Methode `fit` aus [Keras](https://keras.io/) verwenden. Siehe das [training tutorial](./training) fรผr weitere Details.
<Tip>
Transformers-Modellausgaben sind spezielle Datenklassen, so dass ihre Attribute in einer IDE automatisch vervollstรคndigt werden.
Die Modellausgรคnge verhalten sich auch wie ein Tupel oder ein Wรถrterbuch (z.B. kรถnnen Sie mit einem Integer, einem Slice oder einem String indexieren), wobei die Attribute, die "None" sind, ignoriert werden.
</Tip>
### Modell speichern
<frameworkcontent>
<pt>
Sobald Ihr Modell feinabgestimmt ist, kรถnnen Sie es mit seinem Tokenizer speichern, indem Sie [`PreTrainedModel.save_pretrained`] verwenden:
```py
>>> pt_save_directory = "./pt_save_pretrained"
>>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT
>>> pt_model.save_pretrained(pt_save_directory)
```
Wenn Sie bereit sind, das Modell erneut zu verwenden, laden Sie es mit [`PreTrainedModel.from_pretrained`]:
```py
>>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained")
```
</pt>
<tf>
Sobald Ihr Modell feinabgestimmt ist, kรถnnen Sie es mit seinem Tokenizer unter Verwendung von [`TFPreTrainedModel.save_pretrained`] speichern:
```py
>>> tf_save_directory = "./tf_save_pretrained"
>>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT
>>> tf_model.save_pretrained(tf_save_directory)
```
Wenn Sie bereit sind, das Modell wieder zu verwenden, laden Sie es mit [`TFPreTrainedModel.from_pretrained`]:
```py
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained")
```
</tf>
</frameworkcontent>
Ein besonders cooles ๐ค Transformers-Feature ist die Mรถglichkeit, ein Modell zu speichern und es entweder als PyTorch- oder TensorFlow-Modell wieder zu laden. Der Parameter "from_pt" oder "from_tf" kann das Modell von einem Framework in das andere konvertieren:
<frameworkcontent>
<pt>
```py
>>> from transformers import AutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)
```
</pt>
<tf>
```py
>>> from transformers import TFAutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
```
</tf>
</frameworkcontent>
## Custom model builds
Sie kรถnnen die Konfigurationsklasse des Modells รคndern, um zu bestimmen, wie ein Modell aufgebaut ist. Die Konfiguration legt die Attribute eines Modells fest, z. B. die Anzahl der verborgenen Schichten oder der Aufmerksamkeitskรถpfe. Wenn Sie ein Modell aus einer benutzerdefinierten Konfigurationsklasse initialisieren, beginnen Sie bei Null. Die Modellattribute werden zufรคllig initialisiert, und Sie mรผssen das Modell trainieren, bevor Sie es verwenden kรถnnen, um aussagekrรคftige Ergebnisse zu erhalten.
Beginnen Sie mit dem Import von [`AutoConfig`] und laden Sie dann das trainierte Modell, das Sie รคndern mรถchten. Innerhalb von [`AutoConfig.from_pretrained`] kรถnnen Sie das Attribut angeben, das Sie รคndern mรถchten, z. B. die Anzahl der Aufmerksamkeitskรถpfe:
```py
>>> from transformers import AutoConfig
>>> my_config = AutoConfig.from_pretrained("distilbert-base-uncased", n_heads=12)
```
<frameworkcontent>
<pt>
Create a model from your custom configuration with [`AutoModel.from_config`]:
```py
>>> from transformers import AutoModel
>>> my_model = AutoModel.from_config(my_config)
```
</pt>
<tf>
Create a model from your custom configuration with [`TFAutoModel.from_config`]:
```py
>>> from transformers import TFAutoModel
>>> my_model = TFAutoModel.from_config(my_config)
```
</tf>
</frameworkcontent>
Weitere Informationen zur Erstellung von benutzerdefinierten Konfigurationen finden Sie in der Anleitung [Erstellen einer benutzerdefinierten Architektur](./create_a_model).
## Wie geht es weiter?
Nachdem Sie nun die ๐ค Transformers-Kurztour abgeschlossen haben, schauen Sie sich unsere Anleitungen an und erfahren Sie, wie Sie spezifischere Dinge tun kรถnnen, wie das Schreiben eines benutzerdefinierten Modells, die Feinabstimmung eines Modells fรผr eine Aufgabe und wie man ein Modell mit einem Skript trainiert. Wenn Sie mehr รผber die Kernkonzepte von ๐ค Transformers erfahren mรถchten, nehmen Sie sich eine Tasse Kaffee und werfen Sie einen Blick auf unsere konzeptionellen Leitfรคden!
| transformers/docs/source/de/quicktour.md/0 | {
"file_path": "transformers/docs/source/de/quicktour.md",
"repo_id": "transformers",
"token_count": 7324
} | 217 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Instantiating a big model
When you want to use a very big pretrained model, one challenge is to minimize the use of the RAM. The usual workflow
from PyTorch is:
1. Create your model with random weights.
2. Load your pretrained weights.
3. Put those pretrained weights in your random model.
Step 1 and 2 both require a full version of the model in memory, which is not a problem in most cases, but if your model starts weighing several GigaBytes, those two copies can make you get out of RAM. Even worse, if you are using `torch.distributed` to launch a distributed training, each process will load the pretrained model and store these two copies in RAM.
<Tip>
Note that the randomly created model is initialized with "empty" tensors, which take the space in memory without filling it (thus the random values are whatever was in this chunk of memory at a given time). The random initialization following the appropriate distribution for the kind of model/parameters instantiated (like a normal distribution for instance) is only performed after step 3 on the non-initialized weights, to be as fast as possible!
</Tip>
In this guide, we explore the solutions Transformers offer to deal with this issue. Note that this is an area of active development, so the APIs explained here may change slightly in the future.
## Sharded checkpoints
Since version 4.18.0, model checkpoints that end up taking more than 10GB of space are automatically sharded in smaller pieces. In terms of having one single checkpoint when you do `model.save_pretrained(save_dir)`, you will end up with several partial checkpoints (each of which being of size < 10GB) and an index that maps parameter names to the files they are stored in.
You can control the maximum size before sharding with the `max_shard_size` parameter, so for the sake of an example, we'll use a normal-size models with a small shard size: let's take a traditional BERT model.
```py
from transformers import AutoModel
model = AutoModel.from_pretrained("bert-base-cased")
```
If you save it using [`~PreTrainedModel.save_pretrained`], you will get a new folder with two files: the config of the model and its weights:
```py
>>> import os
>>> import tempfile
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir)
... print(sorted(os.listdir(tmp_dir)))
['config.json', 'pytorch_model.bin']
```
Now let's use a maximum shard size of 200MB:
```py
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir, max_shard_size="200MB")
... print(sorted(os.listdir(tmp_dir)))
['config.json', 'pytorch_model-00001-of-00003.bin', 'pytorch_model-00002-of-00003.bin', 'pytorch_model-00003-of-00003.bin', 'pytorch_model.bin.index.json']
```
On top of the configuration of the model, we see three different weights files, and an `index.json` file which is our index. A checkpoint like this can be fully reloaded using the [`~PreTrainedModel.from_pretrained`] method:
```py
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir, max_shard_size="200MB")
... new_model = AutoModel.from_pretrained(tmp_dir)
```
The main advantage of doing this for big models is that during step 2 of the workflow shown above, each shard of the checkpoint is loaded after the previous one, capping the memory usage in RAM to the model size plus the size of the biggest shard.
Behind the scenes, the index file is used to determine which keys are in the checkpoint, and where the corresponding weights are stored. We can load that index like any json and get a dictionary:
```py
>>> import json
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir, max_shard_size="200MB")
... with open(os.path.join(tmp_dir, "pytorch_model.bin.index.json"), "r") as f:
... index = json.load(f)
>>> print(index.keys())
dict_keys(['metadata', 'weight_map'])
```
The metadata just consists of the total size of the model for now. We plan to add other information in the future:
```py
>>> index["metadata"]
{'total_size': 433245184}
```
The weights map is the main part of this index, which maps each parameter name (as usually found in a PyTorch model `state_dict`) to the file it's stored in:
```py
>>> index["weight_map"]
{'embeddings.LayerNorm.bias': 'pytorch_model-00001-of-00003.bin',
'embeddings.LayerNorm.weight': 'pytorch_model-00001-of-00003.bin',
...
```
If you want to directly load such a sharded checkpoint inside a model without using [`~PreTrainedModel.from_pretrained`] (like you would do `model.load_state_dict()` for a full checkpoint) you should use [`~modeling_utils.load_sharded_checkpoint`]:
```py
>>> from transformers.modeling_utils import load_sharded_checkpoint
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir, max_shard_size="200MB")
... load_sharded_checkpoint(model, tmp_dir)
```
## Low memory loading
Sharded checkpoints reduce the memory usage during step 2 of the workflow mentioned above, but in order to use that model in a low memory setting, we recommend leveraging our tools based on the Accelerate library.
Please read the following guide for more information: [Large model loading using Accelerate](./main_classes/model#large-model-loading)
| transformers/docs/source/en/big_models.md/0 | {
"file_path": "transformers/docs/source/en/big_models.md",
"repo_id": "transformers",
"token_count": 1718
} | 218 |
<!---
Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Installation
Install ๐ค Transformers for whichever deep learning library you're working with, setup your cache, and optionally configure ๐ค Transformers to run offline.
๐ค Transformers is tested on Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, and Flax. Follow the installation instructions below for the deep learning library you are using:
* [PyTorch](https://pytorch.org/get-started/locally/) installation instructions.
* [TensorFlow 2.0](https://www.tensorflow.org/install/pip) installation instructions.
* [Flax](https://flax.readthedocs.io/en/latest/) installation instructions.
## Install with pip
You should install ๐ค Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, take a look at this [guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). A virtual environment makes it easier to manage different projects, and avoid compatibility issues between dependencies.
Start by creating a virtual environment in your project directory:
```bash
python -m venv .env
```
Activate the virtual environment. On Linux and MacOs:
```bash
source .env/bin/activate
```
Activate Virtual environment on Windows
```bash
.env/Scripts/activate
```
Now you're ready to install ๐ค Transformers with the following command:
```bash
pip install transformers
```
For CPU-support only, you can conveniently install ๐ค Transformers and a deep learning library in one line. For example, install ๐ค Transformers and PyTorch with:
```bash
pip install 'transformers[torch]'
```
๐ค Transformers and TensorFlow 2.0:
```bash
pip install 'transformers[tf-cpu]'
```
<Tip warning={true}>
M1 / ARM Users
You will need to install the following before installing TensorFLow 2.0
```
brew install cmake
brew install pkg-config
```
</Tip>
๐ค Transformers and Flax:
```bash
pip install 'transformers[flax]'
```
Finally, check if ๐ค Transformers has been properly installed by running the following command. It will download a pretrained model:
```bash
python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))"
```
Then print out the label and score:
```bash
[{'label': 'POSITIVE', 'score': 0.9998704791069031}]
```
## Install from source
Install ๐ค Transformers from source with the following command:
```bash
pip install git+https://github.com/huggingface/transformers
```
This command installs the bleeding edge `main` version rather than the latest `stable` version. The `main` version is useful for staying up-to-date with the latest developments. For instance, if a bug has been fixed since the last official release but a new release hasn't been rolled out yet. However, this means the `main` version may not always be stable. We strive to keep the `main` version operational, and most issues are usually resolved within a few hours or a day. If you run into a problem, please open an [Issue](https://github.com/huggingface/transformers/issues) so we can fix it even sooner!
Check if ๐ค Transformers has been properly installed by running the following command:
```bash
python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))"
```
## Editable install
You will need an editable install if you'd like to:
* Use the `main` version of the source code.
* Contribute to ๐ค Transformers and need to test changes in the code.
Clone the repository and install ๐ค Transformers with the following commands:
```bash
git clone https://github.com/huggingface/transformers.git
cd transformers
pip install -e .
```
These commands will link the folder you cloned the repository to and your Python library paths. Python will now look inside the folder you cloned to in addition to the normal library paths. For example, if your Python packages are typically installed in `~/anaconda3/envs/main/lib/python3.7/site-packages/`, Python will also search the folder you cloned to: `~/transformers/`.
<Tip warning={true}>
You must keep the `transformers` folder if you want to keep using the library.
</Tip>
Now you can easily update your clone to the latest version of ๐ค Transformers with the following command:
```bash
cd ~/transformers/
git pull
```
Your Python environment will find the `main` version of ๐ค Transformers on the next run.
## Install with conda
Install from the conda channel `conda-forge`:
```bash
conda install conda-forge::transformers
```
## Cache setup
Pretrained models are downloaded and locally cached at: `~/.cache/huggingface/hub`. This is the default directory given by the shell environment variable `TRANSFORMERS_CACHE`. On Windows, the default directory is given by `C:\Users\username\.cache\huggingface\hub`. You can change the shell environment variables shown below - in order of priority - to specify a different cache directory:
1. Shell environment variable (default): `HUGGINGFACE_HUB_CACHE` or `TRANSFORMERS_CACHE`.
2. Shell environment variable: `HF_HOME`.
3. Shell environment variable: `XDG_CACHE_HOME` + `/huggingface`.
<Tip>
๐ค Transformers will use the shell environment variables `PYTORCH_TRANSFORMERS_CACHE` or `PYTORCH_PRETRAINED_BERT_CACHE` if you are coming from an earlier iteration of this library and have set those environment variables, unless you specify the shell environment variable `TRANSFORMERS_CACHE`.
</Tip>
## Offline mode
Run ๐ค Transformers in a firewalled or offline environment with locally cached files by setting the environment variable `TRANSFORMERS_OFFLINE=1`.
<Tip>
Add [๐ค Datasets](https://huggingface.co/docs/datasets/) to your offline training workflow with the environment variable `HF_DATASETS_OFFLINE=1`.
</Tip>
```bash
HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ...
```
This script should run without hanging or waiting to timeout because it won't attempt to download the model from the Hub.
You can also bypass loading a model from the Hub from each [`~PreTrainedModel.from_pretrained`] call with the [`local_files_only`] parameter. When set to `True`, only local files are loaded:
```py
from transformers import T5Model
model = T5Model.from_pretrained("./path/to/local/directory", local_files_only=True)
```
### Fetch models and tokenizers to use offline
Another option for using ๐ค Transformers offline is to download the files ahead of time, and then point to their local path when you need to use them offline. There are three ways to do this:
* Download a file through the user interface on the [Model Hub](https://huggingface.co/models) by clicking on the โ icon.

* Use the [`PreTrainedModel.from_pretrained`] and [`PreTrainedModel.save_pretrained`] workflow:
1. Download your files ahead of time with [`PreTrainedModel.from_pretrained`]:
```py
>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
>>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B")
```
2. Save your files to a specified directory with [`PreTrainedModel.save_pretrained`]:
```py
>>> tokenizer.save_pretrained("./your/path/bigscience_t0")
>>> model.save_pretrained("./your/path/bigscience_t0")
```
3. Now when you're offline, reload your files with [`PreTrainedModel.from_pretrained`] from the specified directory:
```py
>>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0")
>>> model = AutoModel.from_pretrained("./your/path/bigscience_t0")
```
* Programmatically download files with the [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) library:
1. Install the `huggingface_hub` library in your virtual environment:
```bash
python -m pip install huggingface_hub
```
2. Use the [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) function to download a file to a specific path. For example, the following command downloads the `config.json` file from the [T0](https://huggingface.co/bigscience/T0_3B) model to your desired path:
```py
>>> from huggingface_hub import hf_hub_download
>>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0")
```
Once your file is downloaded and locally cached, specify it's local path to load and use it:
```py
>>> from transformers import AutoConfig
>>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json")
```
<Tip>
See the [How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream) section for more details on downloading files stored on the Hub.
</Tip>
| transformers/docs/source/en/installation.md/0 | {
"file_path": "transformers/docs/source/en/installation.md",
"repo_id": "transformers",
"token_count": 2895
} | 219 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Data Collator
Data collators are objects that will form a batch by using a list of dataset elements as input. These elements are of
the same type as the elements of `train_dataset` or `eval_dataset`.
To be able to build batches, data collators may apply some processing (like padding). Some of them (like
[`DataCollatorForLanguageModeling`]) also apply some random data augmentation (like random masking)
on the formed batch.
Examples of use can be found in the [example scripts](../examples) or [example notebooks](../notebooks).
## Default data collator
[[autodoc]] data.data_collator.default_data_collator
## DefaultDataCollator
[[autodoc]] data.data_collator.DefaultDataCollator
## DataCollatorWithPadding
[[autodoc]] data.data_collator.DataCollatorWithPadding
## DataCollatorForTokenClassification
[[autodoc]] data.data_collator.DataCollatorForTokenClassification
## DataCollatorForSeq2Seq
[[autodoc]] data.data_collator.DataCollatorForSeq2Seq
## DataCollatorForLanguageModeling
[[autodoc]] data.data_collator.DataCollatorForLanguageModeling
- numpy_mask_tokens
- tf_mask_tokens
- torch_mask_tokens
## DataCollatorForWholeWordMask
[[autodoc]] data.data_collator.DataCollatorForWholeWordMask
- numpy_mask_tokens
- tf_mask_tokens
- torch_mask_tokens
## DataCollatorForPermutationLanguageModeling
[[autodoc]] data.data_collator.DataCollatorForPermutationLanguageModeling
- numpy_mask_tokens
- tf_mask_tokens
- torch_mask_tokens
| transformers/docs/source/en/main_classes/data_collator.md/0 | {
"file_path": "transformers/docs/source/en/main_classes/data_collator.md",
"repo_id": "transformers",
"token_count": 681
} | 220 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ALBERT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=albert">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-albert-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/albert-base-v2">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
The ALBERT model was proposed in [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942) by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma,
Radu Soricut. It presents two parameter-reduction techniques to lower memory consumption and increase the training
speed of BERT:
- Splitting the embedding matrix into two smaller matrices.
- Using repeating layers split among groups.
The abstract from the paper is the following:
*Increasing model size when pretraining natural language representations often results in improved performance on
downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations,
longer training times, and unexpected model degradation. To address these problems, we present two parameter-reduction
techniques to lower memory consumption and increase the training speed of BERT. Comprehensive empirical evidence shows
that our proposed methods lead to models that scale much better compared to the original BERT. We also use a
self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks
with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and
SQuAD benchmarks while having fewer parameters compared to BERT-large.*
This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by
[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT).
## Usage tips
- ALBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather
than the left.
- ALBERT uses repeating layers which results in a small memory footprint, however the computational cost remains
similar to a BERT-like architecture with the same number of hidden layers as it has to iterate through the same
number of (repeating) layers.
- Embedding size E is different from hidden size H justified because the embeddings are context independent (one embedding vector represents one token), whereas hidden states are context dependent (one hidden state represents a sequence of tokens) so it's more logical to have H >> E. Also, the embedding matrix is large since it's V x E (V being the vocab size). If E < H, it has less parameters.
- Layers are split in groups that share parameters (to save memory).
Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not.
This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by
[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT).
## Resources
The resources provided in the following sections consist of a list of official Hugging Face and community (indicated by ๐) resources to help you get started with AlBERT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
<PipelineTag pipeline="text-classification"/>
- [`AlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification).
- [`TFAlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification).
- [`FlaxAlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb).
- Check the [Text classification task guide](../tasks/sequence_classification) on how to use the model.
<PipelineTag pipeline="token-classification"/>
- [`AlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification).
- [`TFAlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb).
- [`FlaxAlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification).
- [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the ๐ค Hugging Face Course.
- Check the [Token classification task guide](../tasks/token_classification) on how to use the model.
<PipelineTag pipeline="fill-mask"/>
- [`AlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb).
- [`TFAlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
- [`FlaxAlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb).
- [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the ๐ค Hugging Face Course.
- Check the [Masked language modeling task guide](../tasks/masked_language_modeling) on how to use the model.
<PipelineTag pipeline="question-answering"/>
- [`AlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb).
- [`TFAlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb).
- [`FlaxAlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering).
- [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the ๐ค Hugging Face Course.
- Check the [Question answering task guide](../tasks/question_answering) on how to use the model.
**Multiple choice**
- [`AlbertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb).
- [`TFAlbertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb).
- Check the [Multiple choice task guide](../tasks/multiple_choice) on how to use the model.
## AlbertConfig
[[autodoc]] AlbertConfig
## AlbertTokenizer
[[autodoc]] AlbertTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## AlbertTokenizerFast
[[autodoc]] AlbertTokenizerFast
## Albert specific outputs
[[autodoc]] models.albert.modeling_albert.AlbertForPreTrainingOutput
[[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput
<frameworkcontent>
<pt>
## AlbertModel
[[autodoc]] AlbertModel
- forward
## AlbertForPreTraining
[[autodoc]] AlbertForPreTraining
- forward
## AlbertForMaskedLM
[[autodoc]] AlbertForMaskedLM
- forward
## AlbertForSequenceClassification
[[autodoc]] AlbertForSequenceClassification
- forward
## AlbertForMultipleChoice
[[autodoc]] AlbertForMultipleChoice
## AlbertForTokenClassification
[[autodoc]] AlbertForTokenClassification
- forward
## AlbertForQuestionAnswering
[[autodoc]] AlbertForQuestionAnswering
- forward
</pt>
<tf>
## TFAlbertModel
[[autodoc]] TFAlbertModel
- call
## TFAlbertForPreTraining
[[autodoc]] TFAlbertForPreTraining
- call
## TFAlbertForMaskedLM
[[autodoc]] TFAlbertForMaskedLM
- call
## TFAlbertForSequenceClassification
[[autodoc]] TFAlbertForSequenceClassification
- call
## TFAlbertForMultipleChoice
[[autodoc]] TFAlbertForMultipleChoice
- call
## TFAlbertForTokenClassification
[[autodoc]] TFAlbertForTokenClassification
- call
## TFAlbertForQuestionAnswering
[[autodoc]] TFAlbertForQuestionAnswering
- call
</tf>
<jax>
## FlaxAlbertModel
[[autodoc]] FlaxAlbertModel
- __call__
## FlaxAlbertForPreTraining
[[autodoc]] FlaxAlbertForPreTraining
- __call__
## FlaxAlbertForMaskedLM
[[autodoc]] FlaxAlbertForMaskedLM
- __call__
## FlaxAlbertForSequenceClassification
[[autodoc]] FlaxAlbertForSequenceClassification
- __call__
## FlaxAlbertForMultipleChoice
[[autodoc]] FlaxAlbertForMultipleChoice
- __call__
## FlaxAlbertForTokenClassification
[[autodoc]] FlaxAlbertForTokenClassification
- __call__
## FlaxAlbertForQuestionAnswering
[[autodoc]] FlaxAlbertForQuestionAnswering
- __call__
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/albert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/albert.md",
"repo_id": "transformers",
"token_count": 3405
} | 221 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# CLIP
## Overview
The CLIP model was proposed in [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh,
Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. CLIP
(Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be
instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing
for the task, similarly to the zero-shot capabilities of GPT-2 and 3.
The abstract from the paper is the following:
*State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This
restricted form of supervision limits their generality and usability since additional labeled data is needed to specify
any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a
much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes
with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400
million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference
learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study
the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks
such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The
model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need
for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot
without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained
model weights at this https URL.*
This model was contributed by [valhalla](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/openai/CLIP).
## Usage tips and example
CLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image
classification. CLIP uses a ViT like transformer to get visual features and a causal language model to get the text
features. Both the text and visual features are then projected to a latent space with identical dimension. The dot
product between the projected image and text features is then used as a similar score.
To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches,
which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors
also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder.
The [`CLIPImageProcessor`] can be used to resize (or rescale) and normalize images for the model.
The [`CLIPTokenizer`] is used to encode the text. The [`CLIPProcessor`] wraps
[`CLIPImageProcessor`] and [`CLIPTokenizer`] into a single instance to both
encode the text and prepare the images. The following example shows how to get the image-text similarity scores using
[`CLIPProcessor`] and [`CLIPModel`].
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import CLIPProcessor, CLIPModel
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with CLIP.
- [Fine tuning CLIP with Remote Sensing (Satellite) images and captions](https://huggingface.co/blog/fine-tune-clip-rsicd), a blog post about how to fine-tune CLIP with [RSICD dataset](https://github.com/201528014227051/RSICD_optimal) and comparison of performance changes due to data augmentation.
- This [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/contrastive-image-text) shows how to train a CLIP-like vision-text dual encoder model using a pre-trained vision and text encoder using [COCO dataset](https://cocodataset.org/#home).
<PipelineTag pipeline="image-to-text"/>
- A [notebook](https://colab.research.google.com/drive/1tuoAC5F4sC7qid56Z0ap-stR3rwdk0ZV?usp=sharing) on how to use a pretrained CLIP for inference with beam search for image captioning. ๐
**Image retrieval**
- A [notebook](https://colab.research.google.com/drive/1bLVwVKpAndpEDHqjzxVPr_9nGrSbuOQd?usp=sharing) on image retrieval using pretrained CLIP and computing MRR(Mean Reciprocal Rank) score. ๐
- A [notebook](https://colab.research.google.com/github/deep-diver/image_search_with_natural_language/blob/main/notebooks/Image_Search_CLIP.ipynb) on image retrieval and showing the similarity score. ๐
- A [notebook](https://colab.research.google.com/drive/1xO-wC_m_GNzgjIBQ4a4znvQkvDoZJvH4?usp=sharing) on how to map images and texts to the same vector space using Multilingual CLIP. ๐
- A [notebook](https://colab.research.google.com/github/vivien000/clip-demo/blob/master/clip.ipynb#scrollTo=uzdFhRGqiWkR) on how to run CLIP on semantic image search using [Unsplash](https://unsplash.com) and [TMDB](https://www.themoviedb.org/) datasets. ๐
**Explainability**
- A [notebook](https://colab.research.google.com/github/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP_explainability.ipynb) on how to visualize similarity between input token and image segment. ๐
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we will review it.
The resource should ideally demonstrate something new instead of duplicating an existing resource.
## CLIPConfig
[[autodoc]] CLIPConfig
- from_text_vision_configs
## CLIPTextConfig
[[autodoc]] CLIPTextConfig
## CLIPVisionConfig
[[autodoc]] CLIPVisionConfig
## CLIPTokenizer
[[autodoc]] CLIPTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## CLIPTokenizerFast
[[autodoc]] CLIPTokenizerFast
## CLIPImageProcessor
[[autodoc]] CLIPImageProcessor
- preprocess
## CLIPFeatureExtractor
[[autodoc]] CLIPFeatureExtractor
## CLIPProcessor
[[autodoc]] CLIPProcessor
<frameworkcontent>
<pt>
## CLIPModel
[[autodoc]] CLIPModel
- forward
- get_text_features
- get_image_features
## CLIPTextModel
[[autodoc]] CLIPTextModel
- forward
## CLIPTextModelWithProjection
[[autodoc]] CLIPTextModelWithProjection
- forward
## CLIPVisionModelWithProjection
[[autodoc]] CLIPVisionModelWithProjection
- forward
## CLIPVisionModel
[[autodoc]] CLIPVisionModel
- forward
</pt>
<tf>
## TFCLIPModel
[[autodoc]] TFCLIPModel
- call
- get_text_features
- get_image_features
## TFCLIPTextModel
[[autodoc]] TFCLIPTextModel
- call
## TFCLIPVisionModel
[[autodoc]] TFCLIPVisionModel
- call
</tf>
<jax>
## FlaxCLIPModel
[[autodoc]] FlaxCLIPModel
- __call__
- get_text_features
- get_image_features
## FlaxCLIPTextModel
[[autodoc]] FlaxCLIPTextModel
- __call__
## FlaxCLIPTextModelWithProjection
[[autodoc]] FlaxCLIPTextModelWithProjection
- __call__
## FlaxCLIPVisionModel
[[autodoc]] FlaxCLIPVisionModel
- __call__
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/clip.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/clip.md",
"repo_id": "transformers",
"token_count": 2668
} | 222 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Decision Transformer
## Overview
The Decision Transformer model was proposed in [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345)
by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
The abstract from the paper is the following:
*We introduce a framework that abstracts Reinforcement Learning (RL) as a sequence modeling problem.
This allows us to draw upon the simplicity and scalability of the Transformer architecture, and associated advances
in language modeling such as GPT-x and BERT. In particular, we present Decision Transformer, an architecture that
casts the problem of RL as conditional sequence modeling. Unlike prior approaches to RL that fit value functions or
compute policy gradients, Decision Transformer simply outputs the optimal actions by leveraging a causally masked
Transformer. By conditioning an autoregressive model on the desired return (reward), past states, and actions, our
Decision Transformer model can generate future actions that achieve the desired return. Despite its simplicity,
Decision Transformer matches or exceeds the performance of state-of-the-art model-free offline RL baselines on
Atari, OpenAI Gym, and Key-to-Door tasks.*
This version of the model is for tasks where the state is a vector.
This model was contributed by [edbeeching](https://huggingface.co/edbeeching). The original code can be found [here](https://github.com/kzl/decision-transformer).
## DecisionTransformerConfig
[[autodoc]] DecisionTransformerConfig
## DecisionTransformerGPT2Model
[[autodoc]] DecisionTransformerGPT2Model
- forward
## DecisionTransformerModel
[[autodoc]] DecisionTransformerModel
- forward
| transformers/docs/source/en/model_doc/decision_transformer.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/decision_transformer.md",
"repo_id": "transformers",
"token_count": 639
} | 223 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# EfficientNet
## Overview
The EfficientNet model was proposed in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
by Mingxing Tan and Quoc V. Le. EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, yet being an order-of-magnitude smaller and faster than previous models.
The abstract from the paper is the following:
*Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet.
To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.*
This model was contributed by [adirik](https://huggingface.co/adirik).
The original code can be found [here](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet).
## EfficientNetConfig
[[autodoc]] EfficientNetConfig
## EfficientNetImageProcessor
[[autodoc]] EfficientNetImageProcessor
- preprocess
## EfficientNetModel
[[autodoc]] EfficientNetModel
- forward
## EfficientNetForImageClassification
[[autodoc]] EfficientNetForImageClassification
- forward
| transformers/docs/source/en/model_doc/efficientnet.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/efficientnet.md",
"repo_id": "transformers",
"token_count": 725
} | 224 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Funnel Transformer
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=funnel">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-funnel-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/funnel-transformer-small">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
The Funnel Transformer model was proposed in the paper [Funnel-Transformer: Filtering out Sequential Redundancy for
Efficient Language Processing](https://arxiv.org/abs/2006.03236). It is a bidirectional transformer model, like
BERT, but with a pooling operation after each block of layers, a bit like in traditional convolutional neural networks
(CNN) in computer vision.
The abstract from the paper is the following:
*With the success of language pretraining, it is highly desirable to develop more efficient architectures of good
scalability that can exploit the abundant unlabeled data at a lower cost. To improve the efficiency, we examine the
much-overlooked redundancy in maintaining a full-length token-level presentation, especially for tasks that only
require a single-vector presentation of the sequence. With this intuition, we propose Funnel-Transformer which
gradually compresses the sequence of hidden states to a shorter one and hence reduces the computation cost. More
importantly, by re-investing the saved FLOPs from length reduction in constructing a deeper or wider model, we further
improve the model capacity. In addition, to perform token-level predictions as required by common pretraining
objectives, Funnel-Transformer is able to recover a deep representation for each token from the reduced hidden sequence
via a decoder. Empirically, with comparable or fewer FLOPs, Funnel-Transformer outperforms the standard Transformer on
a wide variety of sequence-level prediction tasks, including text classification, language understanding, and reading
comprehension.*
This model was contributed by [sgugger](https://huggingface.co/sgugger). The original code can be found [here](https://github.com/laiguokun/Funnel-Transformer).
## Usage tips
- Since Funnel Transformer uses pooling, the sequence length of the hidden states changes after each block of layers. This way, their length is divided by 2, which speeds up the computation of the next hidden states.
The base model therefore has a final sequence length that is a quarter of the original one. This model can be used
directly for tasks that just require a sentence summary (like sequence classification or multiple choice). For other
tasks, the full model is used; this full model has a decoder that upsamples the final hidden states to the same
sequence length as the input.
- For tasks such as classification, this is not a problem, but for tasks like masked language modeling or token classification, we need a hidden state with the same sequence length as the original input. In those cases, the final hidden states are upsampled to the input sequence length and go through two additional layers. That's why there are two versions of each checkpoint. The version suffixed with โ-baseโ contains only the three blocks, while the version without that suffix contains the three blocks and the upsampling head with its additional layers.
- The Funnel Transformer checkpoints are all available with a full version and a base version. The first ones should be
used for [`FunnelModel`], [`FunnelForPreTraining`],
[`FunnelForMaskedLM`], [`FunnelForTokenClassification`] and
[`FunnelForQuestionAnswering`]. The second ones should be used for
[`FunnelBaseModel`], [`FunnelForSequenceClassification`] and
[`FunnelForMultipleChoice`].
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## FunnelConfig
[[autodoc]] FunnelConfig
## FunnelTokenizer
[[autodoc]] FunnelTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## FunnelTokenizerFast
[[autodoc]] FunnelTokenizerFast
## Funnel specific outputs
[[autodoc]] models.funnel.modeling_funnel.FunnelForPreTrainingOutput
[[autodoc]] models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput
<frameworkcontent>
<pt>
## FunnelBaseModel
[[autodoc]] FunnelBaseModel
- forward
## FunnelModel
[[autodoc]] FunnelModel
- forward
## FunnelModelForPreTraining
[[autodoc]] FunnelForPreTraining
- forward
## FunnelForMaskedLM
[[autodoc]] FunnelForMaskedLM
- forward
## FunnelForSequenceClassification
[[autodoc]] FunnelForSequenceClassification
- forward
## FunnelForMultipleChoice
[[autodoc]] FunnelForMultipleChoice
- forward
## FunnelForTokenClassification
[[autodoc]] FunnelForTokenClassification
- forward
## FunnelForQuestionAnswering
[[autodoc]] FunnelForQuestionAnswering
- forward
</pt>
<tf>
## TFFunnelBaseModel
[[autodoc]] TFFunnelBaseModel
- call
## TFFunnelModel
[[autodoc]] TFFunnelModel
- call
## TFFunnelModelForPreTraining
[[autodoc]] TFFunnelForPreTraining
- call
## TFFunnelForMaskedLM
[[autodoc]] TFFunnelForMaskedLM
- call
## TFFunnelForSequenceClassification
[[autodoc]] TFFunnelForSequenceClassification
- call
## TFFunnelForMultipleChoice
[[autodoc]] TFFunnelForMultipleChoice
- call
## TFFunnelForTokenClassification
[[autodoc]] TFFunnelForTokenClassification
- call
## TFFunnelForQuestionAnswering
[[autodoc]] TFFunnelForQuestionAnswering
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/funnel.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/funnel.md",
"repo_id": "transformers",
"token_count": 1879
} | 225 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# I-BERT
## Overview
The I-BERT model was proposed in [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by
Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney and Kurt Keutzer. It's a quantized version of RoBERTa running
inference up to four times faster.
The abstract from the paper is the following:
*Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language
Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive for
efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this,
previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot
efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM
processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes
the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for
nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT
inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using
RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to
the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4 - 4.0x for
INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has
been open-sourced.*
This model was contributed by [kssteven](https://huggingface.co/kssteven). The original code can be found [here](https://github.com/kssteven418/I-BERT).
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/masked_language_modeling)
## IBertConfig
[[autodoc]] IBertConfig
## IBertModel
[[autodoc]] IBertModel
- forward
## IBertForMaskedLM
[[autodoc]] IBertForMaskedLM
- forward
## IBertForSequenceClassification
[[autodoc]] IBertForSequenceClassification
- forward
## IBertForMultipleChoice
[[autodoc]] IBertForMultipleChoice
- forward
## IBertForTokenClassification
[[autodoc]] IBertForTokenClassification
- forward
## IBertForQuestionAnswering
[[autodoc]] IBertForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/ibert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/ibert.md",
"repo_id": "transformers",
"token_count": 947
} | 226 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LLaVa
## Overview
LLaVa is an open-source chatbot trained by fine-tuning LlamA/Vicuna on GPT-generated multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture. In other words, it is an multi-modal version of LLMs fine-tuned for chat / instructions.
The LLaVa model was proposed in [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) and improved in [Improved Baselines with Visual Instruction Tuning](https://arxiv.org/pdf/2310.03744) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee.
The abstract from the paper is the following:
*Large multimodal models (LMM) have recently shown encouraging progress with visual instruction tuning. In this note, we show that the fully-connected vision-language cross-modal connector in LLaVA is surprisingly powerful and data-efficient. With simple modifications to LLaVA, namely, using CLIP-ViT-L-336px with an MLP projection and adding academic-task-oriented VQA data with simple response formatting prompts, we establish stronger baselines that achieve state-of-the-art across 11 benchmarks. Our final 13B checkpoint uses merely 1.2M publicly available data, and finishes full training in โผ1 day on a single 8-A100 node. We hope this can make state-of-the-art LMM research more accessible. Code and model will be publicly available*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llava_architecture.jpg"
alt="drawing" width="600"/>
<small> LLaVa architecture. Taken from the <a href="https://arxiv.org/abs/2304.08485">original paper.</a> </small>
This model was contributed by [ArthurZ](https://huggingface.co/ArthurZ) and [ybelkada](https://huggingface.co/ybelkada).
The original code can be found [here](https://github.com/haotian-liu/LLaVA/tree/main/llava).
## Usage tips
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating.
- Note the model has not been explicitly trained to process multiple images in the same prompt, although this is technically possible, you may experience inaccurate results.
- For better results, we recommend users to prompt the model with the correct prompt format:
```bash
"USER: <image>\n<prompt>ASSISTANT:"
```
For multiple turns conversation:
```bash
"USER: <image>\n<prompt1>ASSISTANT: <answer1>USER: <prompt2>ASSISTANT: <answer2>USER: <prompt3>ASSISTANT:"
```
### Using Flash Attention 2
Flash Attention 2 is an even faster, optimized version of the previous optimization, please refer to the [Flash Attention 2 section of performance docs](https://huggingface.co/docs/transformers/perf_infer_gpu_one).
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with BEiT.
<PipelineTag pipeline="image-to-text"/>
- A [Google Colab demo](https://colab.research.google.com/drive/1qsl6cd2c8gGtEW1xV5io7S8NHh-Cp1TV?usp=sharing) on how to run Llava on a free-tier Google colab instance leveraging 4-bit inference.
- A [similar notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LLaVa/Inference_with_LLaVa_for_multimodal_generation.ipynb) showcasing batched inference. ๐
## LlavaConfig
[[autodoc]] LlavaConfig
## LlavaProcessor
[[autodoc]] LlavaProcessor
## LlavaForConditionalGeneration
[[autodoc]] LlavaForConditionalGeneration
- forward
| transformers/docs/source/en/model_doc/llava.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/llava.md",
"repo_id": "transformers",
"token_count": 1228
} | 227 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# MVP
## Overview
The MVP model was proposed in [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
According to the abstract,
- MVP follows a standard Transformer encoder-decoder architecture.
- MVP is supervised pre-trained using labeled datasets.
- MVP also has task-specific soft prompts to stimulate the model's capacity in performing a certain task.
- MVP is specially designed for natural language generation and can be adapted to a wide range of generation tasks, including but not limited to summarization, data-to-text generation, open-ended dialogue system, story generation, question answering, question generation, task-oriented dialogue system, commonsense generation, paraphrase generation, text style transfer, and text simplification. Our model can also be adapted to natural language understanding tasks such as sequence classification and (extractive) question answering.
This model was contributed by [Tianyi Tang](https://huggingface.co/StevenTang). The detailed information and instructions can be found [here](https://github.com/RUCAIBox/MVP).
## Usage tips
- We have released a series of models [here](https://huggingface.co/models?filter=mvp), including MVP, MVP with task-specific prompts, and multi-task pre-trained variants.
- If you want to use a model without prompts (standard Transformer), you can load it through `MvpForConditionalGeneration.from_pretrained('RUCAIBox/mvp')`.
- If you want to use a model with task-specific prompts, such as summarization, you can load it through `MvpForConditionalGeneration.from_pretrained('RUCAIBox/mvp-summarization')`.
- Our model supports lightweight prompt tuning following [Prefix-tuning](https://arxiv.org/abs/2101.00190) with method `set_lightweight_tuning()`.
## Usage examples
For summarization, it is an example to use MVP and MVP with summarization-specific prompts.
```python
>>> from transformers import MvpTokenizer, MvpForConditionalGeneration
>>> tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp")
>>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp")
>>> model_with_prompt = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp-summarization")
>>> inputs = tokenizer(
... "Summarize: You may want to stick it to your boss and leave your job, but don't do it if these are your reasons.",
... return_tensors="pt",
... )
>>> generated_ids = model.generate(**inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
["Why You Shouldn't Quit Your Job"]
>>> generated_ids = model_with_prompt.generate(**inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
["Don't do it if these are your reasons"]
```
For data-to-text generation, it is an example to use MVP and multi-task pre-trained variants.
```python
>>> from transformers import MvpTokenizerFast, MvpForConditionalGeneration
>>> tokenizer = MvpTokenizerFast.from_pretrained("RUCAIBox/mvp")
>>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp")
>>> model_with_mtl = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mtl-data-to-text")
>>> inputs = tokenizer(
... "Describe the following data: Iron Man | instance of | Superhero [SEP] Stan Lee | creator | Iron Man",
... return_tensors="pt",
... )
>>> generated_ids = model.generate(**inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
['Stan Lee created the character of Iron Man, a fictional superhero appearing in American comic']
>>> generated_ids = model_with_mtl.generate(**inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
['Iron Man is a fictional superhero appearing in American comic books published by Marvel Comics.']
```
For lightweight tuning, *i.e.*, fixing the model and only tuning prompts, you can load MVP with randomly initialized prompts or with task-specific prompts. Our code also supports Prefix-tuning with BART following the [original paper](https://arxiv.org/abs/2101.00190).
```python
>>> from transformers import MvpForConditionalGeneration
>>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp", use_prompt=True)
>>> # the number of trainable parameters (full tuning)
>>> sum(p.numel() for p in model.parameters() if p.requires_grad)
468116832
>>> # lightweight tuning with randomly initialized prompts
>>> model.set_lightweight_tuning()
>>> # the number of trainable parameters (lightweight tuning)
>>> sum(p.numel() for p in model.parameters() if p.requires_grad)
61823328
>>> # lightweight tuning with task-specific prompts
>>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mtl-data-to-text")
>>> model.set_lightweight_tuning()
>>> # original lightweight Prefix-tuning
>>> model = MvpForConditionalGeneration.from_pretrained("facebook/bart-large", use_prompt=True)
>>> model.set_lightweight_tuning()
```
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Question answering task guide](../tasks/question_answering)
- [Causal language modeling task guide](../tasks/language_modeling)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Translation task guide](../tasks/translation)
- [Summarization task guide](../tasks/summarization)
## MvpConfig
[[autodoc]] MvpConfig
## MvpTokenizer
[[autodoc]] MvpTokenizer
## MvpTokenizerFast
[[autodoc]] MvpTokenizerFast
## MvpModel
[[autodoc]] MvpModel
- forward
## MvpForConditionalGeneration
[[autodoc]] MvpForConditionalGeneration
- forward
## MvpForSequenceClassification
[[autodoc]] MvpForSequenceClassification
- forward
## MvpForQuestionAnswering
[[autodoc]] MvpForQuestionAnswering
- forward
## MvpForCausalLM
[[autodoc]] MvpForCausalLM
- forward
| transformers/docs/source/en/model_doc/mvp.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/mvp.md",
"repo_id": "transformers",
"token_count": 1922
} | 228 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Speech Encoder Decoder Models
The [`SpeechEncoderDecoderModel`] can be used to initialize a speech-to-text model
with any pretrained speech autoencoding model as the encoder (*e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert)) and any pretrained autoregressive model as the decoder.
The effectiveness of initializing speech-sequence-to-text-sequence models with pretrained checkpoints for speech
recognition and speech translation has *e.g.* been shown in [Large-Scale Self- and Semi-Supervised Learning for Speech
Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli,
Alexis Conneau.
An example of how to use a [`SpeechEncoderDecoderModel`] for inference can be seen in [Speech2Text2](speech_to_text_2).
## Randomly initializing `SpeechEncoderDecoderModel` from model configurations.
[`SpeechEncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`Wav2Vec2Model`] configuration for the encoder
and the default [`BertForCausalLM`] configuration for the decoder.
```python
>>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel
>>> config_encoder = Wav2Vec2Config()
>>> config_decoder = BertConfig()
>>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> model = SpeechEncoderDecoderModel(config=config)
```
## Initialising `SpeechEncoderDecoderModel` from a pretrained encoder and a pretrained decoder.
[`SpeechEncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained Transformer-based speech model, *e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert) can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder.
Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized.
Initializing [`SpeechEncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder).
To do so, the `SpeechEncoderDecoderModel` class provides a [`SpeechEncoderDecoderModel.from_encoder_decoder_pretrained`] method.
```python
>>> from transformers import SpeechEncoderDecoderModel
>>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
... "facebook/hubert-large-ll60k", "bert-base-uncased"
... )
```
## Loading an existing `SpeechEncoderDecoderModel` checkpoint and perform inference.
To load fine-tuned checkpoints of the `SpeechEncoderDecoderModel` class, [`SpeechEncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers.
To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling.
```python
>>> from transformers import Wav2Vec2Processor, SpeechEncoderDecoderModel
>>> from datasets import load_dataset
>>> import torch
>>> # load a fine-tuned speech translation model and corresponding processor
>>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
>>> # let's perform inference on a piece of English speech (which we'll translate to German)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values
>>> # autoregressively generate transcription (uses greedy decoding by default)
>>> generated_ids = model.generate(input_values)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> print(generated_text)
Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heiรen zu kรถnnen.
```
## Training
Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model on a dataset of (speech, text) pairs.
As you can see, only 2 inputs are required for the model in order to compute a loss: `input_values` (which are the
speech inputs) and `labels` (which are the `input_ids` of the encoded target sequence).
```python
>>> from transformers import AutoTokenizer, AutoFeatureExtractor, SpeechEncoderDecoderModel
>>> from datasets import load_dataset
>>> encoder_id = "facebook/wav2vec2-base-960h" # acoustic model encoder
>>> decoder_id = "bert-base-uncased" # text decoder
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
>>> tokenizer = AutoTokenizer.from_pretrained(decoder_id)
>>> # Combine pre-trained encoder and pre-trained decoder to form a Seq2Seq model
>>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id)
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
>>> model.config.pad_token_id = tokenizer.pad_token_id
>>> # load an audio input and pre-process (normalise mean/std to 0/1)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values
>>> # load its corresponding transcription and tokenize to generate labels
>>> labels = tokenizer(ds[0]["text"], return_tensors="pt").input_ids
>>> # the forward function automatically creates the correct decoder_input_ids
>>> loss = model(input_values=input_values, labels=labels).loss
>>> loss.backward()
```
## SpeechEncoderDecoderConfig
[[autodoc]] SpeechEncoderDecoderConfig
## SpeechEncoderDecoderModel
[[autodoc]] SpeechEncoderDecoderModel
- forward
- from_encoder_decoder_pretrained
## FlaxSpeechEncoderDecoderModel
[[autodoc]] FlaxSpeechEncoderDecoderModel
- __call__
- from_encoder_decoder_pretrained
| transformers/docs/source/en/model_doc/speech-encoder-decoder.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/speech-encoder-decoder.md",
"repo_id": "transformers",
"token_count": 2084
} | 229 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Time Series Transformer
## Overview
The Time Series Transformer model is a vanilla encoder-decoder Transformer for time series forecasting.
This model was contributed by [kashif](https://huggingface.co/kashif).
## Usage tips
- Similar to other models in the library, [`TimeSeriesTransformerModel`] is the raw Transformer without any head on top, and [`TimeSeriesTransformerForPrediction`]
adds a distribution head on top of the former, which can be used for time-series forecasting. Note that this is a so-called probabilistic forecasting model, not a
point forecasting model. This means that the model learns a distribution, from which one can sample. The model doesn't directly output values.
- [`TimeSeriesTransformerForPrediction`] consists of 2 blocks: an encoder, which takes a `context_length` of time series values as input (called `past_values`),
and a decoder, which predicts a `prediction_length` of time series values into the future (called `future_values`). During training, one needs to provide
pairs of (`past_values` and `future_values`) to the model.
- In addition to the raw (`past_values` and `future_values`), one typically provides additional features to the model. These can be the following:
- `past_time_features`: temporal features which the model will add to `past_values`. These serve as "positional encodings" for the Transformer encoder.
Examples are "day of the month", "month of the year", etc. as scalar values (and then stacked together as a vector).
e.g. if a given time-series value was obtained on the 11th of August, then one could have [11, 8] as time feature vector (11 being "day of the month", 8 being "month of the year").
- `future_time_features`: temporal features which the model will add to `future_values`. These serve as "positional encodings" for the Transformer decoder.
Examples are "day of the month", "month of the year", etc. as scalar values (and then stacked together as a vector).
e.g. if a given time-series value was obtained on the 11th of August, then one could have [11, 8] as time feature vector (11 being "day of the month", 8 being "month of the year").
- `static_categorical_features`: categorical features which are static over time (i.e., have the same value for all `past_values` and `future_values`).
An example here is the store ID or region ID that identifies a given time-series.
Note that these features need to be known for ALL data points (also those in the future).
- `static_real_features`: real-valued features which are static over time (i.e., have the same value for all `past_values` and `future_values`).
An example here is the image representation of the product for which you have the time-series values (like the [ResNet](resnet) embedding of a "shoe" picture,
if your time-series is about the sales of shoes).
Note that these features need to be known for ALL data points (also those in the future).
- The model is trained using "teacher-forcing", similar to how a Transformer is trained for machine translation. This means that, during training, one shifts the
`future_values` one position to the right as input to the decoder, prepended by the last value of `past_values`. At each time step, the model needs to predict the
next target. So the set-up of training is similar to a GPT model for language, except that there's no notion of `decoder_start_token_id` (we just use the last value
of the context as initial input for the decoder).
- At inference time, we give the final value of the `past_values` as input to the decoder. Next, we can sample from the model to make a prediction at the next time step,
which is then fed to the decoder in order to make the next prediction (also called autoregressive generation).
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
- Check out the Time Series Transformer blog-post in HuggingFace blog: [Probabilistic Time Series Forecasting with ๐ค Transformers](https://huggingface.co/blog/time-series-transformers)
## TimeSeriesTransformerConfig
[[autodoc]] TimeSeriesTransformerConfig
## TimeSeriesTransformerModel
[[autodoc]] TimeSeriesTransformerModel
- forward
## TimeSeriesTransformerForPrediction
[[autodoc]] TimeSeriesTransformerForPrediction
- forward
| transformers/docs/source/en/model_doc/time_series_transformer.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/time_series_transformer.md",
"repo_id": "transformers",
"token_count": 1371
} | 230 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.