text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
import argparse
import json
import logging
import os
from collections import Counter
from dataclasses import dataclass
from operator import attrgetter
from typing import Dict, List, Optional, Union
import safetensors
import torch
import torch.nn as nn
from diffusers import UNet2DConditionModel
from transformers import CLIPTextModel
from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict
from peft.tuners.lokr.layer import factorization
# Default kohya_ss LoRA replacement modules
# https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L661
UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"]
UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
PREFIX_UNET = "lora_unet"
PREFIX_TEXT_ENCODER = "lora_te"
@dataclass
class LoRAInfo:
kohya_key: str
peft_key: str
alpha: Optional[float] = None
rank: Optional[int] = None
lora_A: Optional[torch.Tensor] = None
lora_B: Optional[torch.Tensor] = None
def peft_state_dict(self) -> Dict[str, torch.Tensor]:
if self.lora_A is None or self.lora_B is None:
raise ValueError("At least one of lora_A or lora_B is None, they must both be provided")
return {
f"base_model.model.{self.peft_key}.lora_A.weight": self.lora_A,
f"base_model.model.{self.peft_key}.lora_B.weight": self.lora_B,
}
@dataclass
class LoHaInfo:
kohya_key: str
peft_key: str
alpha: Optional[float] = None
rank: Optional[int] = None
hada_w1_a: Optional[torch.Tensor] = None
hada_w1_b: Optional[torch.Tensor] = None
hada_w2_a: Optional[torch.Tensor] = None
hada_w2_b: Optional[torch.Tensor] = None
hada_t1: Optional[torch.Tensor] = None
hada_t2: Optional[torch.Tensor] = None
def peft_state_dict(self) -> Dict[str, torch.Tensor]:
if self.hada_w1_a is None or self.hada_w1_b is None or self.hada_w2_a is None or self.hada_w2_b is None:
raise ValueError(
"At least one of hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b is missing, they all must be provided"
)
state_dict = {
f"base_model.model.{self.peft_key}.hada_w1_a": self.hada_w1_a,
f"base_model.model.{self.peft_key}.hada_w1_b": self.hada_w1_b,
f"base_model.model.{self.peft_key}.hada_w2_a": self.hada_w2_a,
f"base_model.model.{self.peft_key}.hada_w2_b": self.hada_w2_b,
}
if not (
(self.hada_t1 is None and self.hada_t2 is None) or (self.hada_t1 is not None and self.hada_t2 is not None)
):
raise ValueError("hada_t1 and hada_t2 must be either both present or not present at the same time")
if self.hada_t1 is not None and self.hada_t2 is not None:
state_dict[f"base_model.model.{self.peft_key}.hada_t1"] = self.hada_t1
state_dict[f"base_model.model.{self.peft_key}.hada_t2"] = self.hada_t2
return state_dict
@dataclass
class LoKrInfo:
kohya_key: str
peft_key: str
alpha: Optional[float] = None
rank: Optional[int] = None
lokr_w1: Optional[torch.Tensor] = None
lokr_w1_a: Optional[torch.Tensor] = None
lokr_w1_b: Optional[torch.Tensor] = None
lokr_w2: Optional[torch.Tensor] = None
lokr_w2_a: Optional[torch.Tensor] = None
lokr_w2_b: Optional[torch.Tensor] = None
lokr_t2: Optional[torch.Tensor] = None
def peft_state_dict(self) -> Dict[str, torch.Tensor]:
if (self.lokr_w1 is None) and ((self.lokr_w1_a is None) or (self.lokr_w1_b is None)):
raise ValueError("Either lokr_w1 or both lokr_w1_a and lokr_w1_b should be provided")
if (self.lokr_w2 is None) and ((self.lokr_w2_a is None) or (self.lokr_w2_b is None)):
raise ValueError("Either lokr_w2 or both lokr_w2_a and lokr_w2_b should be provided")
state_dict = {}
if self.lokr_w1 is not None:
state_dict[f"base_model.model.{self.peft_key}.lokr_w1"] = self.lokr_w1
elif self.lokr_w1_a is not None:
state_dict[f"base_model.model.{self.peft_key}.lokr_w1_a"] = self.lokr_w1_a
state_dict[f"base_model.model.{self.peft_key}.lokr_w1_b"] = self.lokr_w1_b
if self.lokr_w2 is not None:
state_dict[f"base_model.model.{self.peft_key}.lokr_w2"] = self.lokr_w2
elif self.lokr_w2_a is not None:
state_dict[f"base_model.model.{self.peft_key}.lokr_w2_a"] = self.lokr_w2_a
state_dict[f"base_model.model.{self.peft_key}.lokr_w2_b"] = self.lokr_w2_b
if self.lokr_t2 is not None:
state_dict[f"base_model.model.{self.peft_key}.lokr_t2"] = self.lokr_t2
return state_dict
def construct_peft_loraconfig(info: Dict[str, LoRAInfo], **kwargs) -> LoraConfig:
"""Constructs LoraConfig from data extracted from adapter checkpoint
Args:
info (Dict[str, LoRAInfo]): Information extracted from adapter checkpoint
Returns:
LoraConfig: config for constructing LoRA
"""
# Unpack all ranks and alphas
ranks = {key: val.rank for key, val in info.items()}
alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()}
# Determine which modules needs to be transformed
target_modules = sorted(info.keys())
# Determine most common rank and alpha
r = int(Counter(ranks.values()).most_common(1)[0][0])
lora_alpha = Counter(alphas.values()).most_common(1)[0][0]
# Determine which modules have different rank and alpha
rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0]))
alpha_pattern = dict(sorted(filter(lambda x: x[1] != lora_alpha, alphas.items()), key=lambda x: x[0]))
config = LoraConfig(
r=r,
lora_alpha=lora_alpha,
target_modules=target_modules,
lora_dropout=0.0,
bias="none",
init_lora_weights=False,
rank_pattern=rank_pattern,
alpha_pattern=alpha_pattern,
)
return config
def construct_peft_lohaconfig(info: Dict[str, LoHaInfo], **kwargs) -> LoHaConfig:
"""Constructs LoHaConfig from data extracted from adapter checkpoint
Args:
info (Dict[str, LoHaInfo]): Information extracted from adapter checkpoint
Returns:
LoHaConfig: config for constructing LoHA
"""
# Unpack all ranks and alphas
ranks = {x[0]: x[1].rank for x in info.items()}
alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()}
# Determine which modules needs to be transformed
target_modules = sorted(info.keys())
# Determine most common rank and alpha
r = int(Counter(ranks.values()).most_common(1)[0][0])
alpha = Counter(alphas.values()).most_common(1)[0][0]
# Determine which modules have different rank and alpha
rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0]))
alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0]))
# Determine whether any of modules have effective conv2d decomposition
use_effective_conv2d = any((val.hada_t1 is not None) or (val.hada_t2 is not None) for val in info.values())
config = LoHaConfig(
r=r,
alpha=alpha,
target_modules=target_modules,
rank_dropout=0.0,
module_dropout=0.0,
init_weights=False,
rank_pattern=rank_pattern,
alpha_pattern=alpha_pattern,
use_effective_conv2d=use_effective_conv2d,
)
return config
def construct_peft_lokrconfig(info: Dict[str, LoKrInfo], decompose_factor: int = -1, **kwargs) -> LoKrConfig:
"""Constructs LoKrConfig from data extracted from adapter checkpoint
Args:
info (Dict[str, LoKrInfo]): Information extracted from adapter checkpoint
Returns:
LoKrConfig: config for constructing LoKr
"""
# Unpack all ranks and alphas
ranks = {x[0]: x[1].rank for x in info.items()}
alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()}
# Determine which modules needs to be transformed
target_modules = sorted(info.keys())
# Determine most common rank and alpha
r = int(Counter(ranks.values()).most_common(1)[0][0])
alpha = Counter(alphas.values()).most_common(1)[0][0]
# Determine which modules have different rank and alpha
rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0]))
alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0]))
# Determine whether any of modules have effective conv2d decomposition
use_effective_conv2d = any((val.lokr_t2 is not None) for val in info.values())
# decompose_both should be enabled if any w1 matrix in any layer is decomposed into 2
decompose_both = any((val.lokr_w1_a is not None and val.lokr_w1_b is not None) for val in info.values())
# Determining decompose factor is a bit tricky (but it is most often -1)
# Check that decompose_factor is equal to provided
for val in info.values():
# Determine shape of first matrix
if val.lokr_w1 is not None:
w1_shape = tuple(val.lokr_w1.shape)
else:
w1_shape = (val.lokr_w1_a.shape[0], val.lokr_w1_b.shape[1])
# Determine shape of second matrix
if val.lokr_w2 is not None:
w2_shape = tuple(val.lokr_w2.shape[:2])
elif val.lokr_t2 is not None:
w2_shape = (val.lokr_w2_a.shape[1], val.lokr_w2_b.shape[1])
else:
# We may iterate over Conv2d layer, for which second item in shape is multiplied by ksize^2
w2_shape = (val.lokr_w2_a.shape[0], val.lokr_w2_b.shape[1])
# We need to check, whether decompose_factor is really -1 or not
shape = (w1_shape[0], w2_shape[0])
if factorization(shape[0] * shape[1], factor=-1) != shape:
raise ValueError("Cannot infer decompose_factor, probably it is not equal to -1")
config = LoKrConfig(
r=r,
alpha=alpha,
target_modules=target_modules,
rank_dropout=0.0,
module_dropout=0.0,
init_weights=False,
rank_pattern=rank_pattern,
alpha_pattern=alpha_pattern,
use_effective_conv2d=use_effective_conv2d,
decompose_both=decompose_both,
decompose_factor=decompose_factor,
)
return config
def combine_peft_state_dict(info: Dict[str, Union[LoRAInfo, LoHaInfo]]) -> Dict[str, torch.Tensor]:
result = {}
for key_info in info.values():
result.update(key_info.peft_state_dict())
return result
def detect_adapter_type(keys: List[str]) -> PeftType:
# Detect type of adapter by keys
# Inspired by this:
# https://github.com/bmaltais/kohya_ss/blob/ed4e3b0239a40506de9a17e550e6cf2d0b867a4f/tools/lycoris_utils.py#L312
for key in keys:
if "alpha" in key:
continue
elif any(x in key for x in ["lora_down", "lora_up"]):
# LoRA
return PeftType.LORA
elif any(x in key for x in ["hada_w1", "hada_w2", "hada_t1", "hada_t2"]):
# LoHa may have the following keys:
# hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b, hada_t1, hada_t2
return PeftType.LOHA
elif any(x in key for x in ["lokr_w1", "lokr_w2", "lokr_t1", "lokr_t2"]):
# LoKr may have the following keys:
# lokr_w1, lokr_w2, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t1, lokr_t2
return PeftType.LOKR
elif "diff" in key:
raise ValueError("Currently full diff adapters are not implemented")
else:
raise ValueError("Unknown adapter type, probably not implemented")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sd_checkpoint", default=None, type=str, required=True, help="SD checkpoint to use")
parser.add_argument(
"--adapter_path",
default=None,
type=str,
required=True,
help="Path to downloaded adapter to convert",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output peft adapter.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--loha_conv2d_weights_fix",
action="store_true",
help="""LoHa checkpoints trained with lycoris-lora<=1.9.0 contain a bug described in this PR https://github.com/KohakuBlueleaf/LyCORIS/pull/115.
This option fixes this bug during weight conversion (replaces hada_t2 with hada_t1 for Conv2d 3x3 layers).
The output results may differ from webui, but in general, they should be better in terms of quality.
This option should be set to True in case the provided checkpoint has been trained with lycoris-lora version for which the mentioned PR wasn't merged.
This option should be set to False in case the provided checkpoint has been trained with lycoris-lora version for which the mentioned PR is merged or full compatibility with webui outputs is required.""",
)
args = parser.parse_args()
# Load all models that we need to add adapter to
text_encoder = CLIPTextModel.from_pretrained(args.sd_checkpoint, subfolder="text_encoder")
unet = UNet2DConditionModel.from_pretrained(args.sd_checkpoint, subfolder="unet")
# Construct possible mapping from kohya keys to peft keys
models_keys = {}
for model, model_key, model_name in [
(text_encoder, PREFIX_TEXT_ENCODER, "text_encoder"),
(unet, PREFIX_UNET, "unet"),
]:
models_keys.update(
{
f"{model_key}.{peft_key}".replace(".", "_"): peft_key
for peft_key in (x[0] for x in model.named_modules())
}
)
# Store conversion info (model_type -> peft_key -> LoRAInfo | LoHaInfo | LoKrInfo)
adapter_info: Dict[str, Dict[str, Union[LoRAInfo, LoHaInfo, LoKrInfo]]] = {
"text_encoder": {},
"unet": {},
}
# Store decompose_factor for LoKr
decompose_factor = -1
# Open adapter checkpoint
with safetensors.safe_open(args.adapter_path, framework="pt", device="cpu") as f:
# Extract information about adapter structure
metadata = f.metadata()
# It may be difficult to determine rank for LoKr adapters
# If checkpoint was trained with large rank it may not be utilized during weights creation at all
# So we need to get it from checkpoint metadata (along with decompose_factor)
rank, conv_rank = None, None
if metadata is not None:
rank = metadata.get("ss_network_dim", None)
rank = int(rank) if rank else None
if "ss_network_args" in metadata:
network_args = json.loads(metadata["ss_network_args"])
conv_rank = network_args.get("conv_dim", None)
conv_rank = int(conv_rank) if conv_rank else rank
decompose_factor = network_args.get("factor", -1)
decompose_factor = int(decompose_factor)
# Detect adapter type based on keys
adapter_type = detect_adapter_type(f.keys())
adapter_info_cls = {
PeftType.LORA: LoRAInfo,
PeftType.LOHA: LoHaInfo,
PeftType.LOKR: LoKrInfo,
}[adapter_type]
# Iterate through available info and unpack all the values
for key in f.keys():
kohya_key, kohya_type = key.split(".")[:2]
# Find which model this key belongs to
if kohya_key.startswith(PREFIX_TEXT_ENCODER):
model_type, model = "text_encoder", text_encoder
elif kohya_key.startswith(PREFIX_UNET):
model_type, model = "unet", unet
else:
raise ValueError(f"Cannot determine model for key: {key}")
# Find corresponding peft key
if kohya_key not in models_keys:
raise ValueError(f"Cannot find corresponding key for diffusers/transformers model: {kohya_key}")
peft_key = models_keys[kohya_key]
# Retrieve corresponding layer of model
layer = attrgetter(peft_key)(model)
# Create a corresponding adapter info
if peft_key not in adapter_info[model_type]:
adapter_info[model_type][peft_key] = adapter_info_cls(kohya_key=kohya_key, peft_key=peft_key)
tensor = f.get_tensor(key)
if kohya_type == "alpha":
adapter_info[model_type][peft_key].alpha = tensor.item()
elif kohya_type == "lora_down":
adapter_info[model_type][peft_key].lora_A = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[0]
elif kohya_type == "lora_up":
adapter_info[model_type][peft_key].lora_B = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[1]
elif kohya_type == "hada_w1_a":
adapter_info[model_type][peft_key].hada_w1_a = tensor
elif kohya_type == "hada_w1_b":
adapter_info[model_type][peft_key].hada_w1_b = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[0]
elif kohya_type == "hada_w2_a":
adapter_info[model_type][peft_key].hada_w2_a = tensor
elif kohya_type == "hada_w2_b":
adapter_info[model_type][peft_key].hada_w2_b = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[0]
elif kohya_type in {"hada_t1", "hada_t2"}:
if args.loha_conv2d_weights_fix:
if kohya_type == "hada_t1":
# This code block fixes a bug that exists for some LoHa checkpoints
# that resulted in accidentally using hada_t1 weight instead of hada_t2, see
# https://github.com/KohakuBlueleaf/LyCORIS/pull/115
adapter_info[model_type][peft_key].hada_t1 = tensor
adapter_info[model_type][peft_key].hada_t2 = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[0]
else:
if kohya_type == "hada_t1":
adapter_info[model_type][peft_key].hada_t1 = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[0]
elif kohya_type == "hada_t2":
adapter_info[model_type][peft_key].hada_t2 = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[0]
elif kohya_type == "lokr_t2":
adapter_info[model_type][peft_key].lokr_t2 = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[0]
elif kohya_type == "lokr_w1":
adapter_info[model_type][peft_key].lokr_w1 = tensor
if isinstance(layer, nn.Linear) or (
isinstance(layer, nn.Conv2d) and tuple(layer.weight.shape[2:]) == (1, 1)
):
adapter_info[model_type][peft_key].rank = rank
elif isinstance(layer, nn.Conv2d):
adapter_info[model_type][peft_key].rank = conv_rank
elif kohya_type == "lokr_w2":
adapter_info[model_type][peft_key].lokr_w2 = tensor
if isinstance(layer, nn.Linear) or (
isinstance(layer, nn.Conv2d) and tuple(layer.weight.shape[2:]) == (1, 1)
):
adapter_info[model_type][peft_key].rank = rank
elif isinstance(layer, nn.Conv2d):
adapter_info[model_type][peft_key].rank = conv_rank
elif kohya_type == "lokr_w1_a":
adapter_info[model_type][peft_key].lokr_w1_a = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[1]
elif kohya_type == "lokr_w1_b":
adapter_info[model_type][peft_key].lokr_w1_b = tensor
adapter_info[model_type][peft_key].rank = tensor.shape[0]
elif kohya_type == "lokr_w2_a":
adapter_info[model_type][peft_key].lokr_w2_a = tensor
elif kohya_type == "lokr_w2_b":
adapter_info[model_type][peft_key].lokr_w2_b = tensor
else:
raise ValueError(f"Unknown weight name in key: {key} - {kohya_type}")
# Get function which will create adapter config based on extracted info
construct_config_fn = {
PeftType.LORA: construct_peft_loraconfig,
PeftType.LOHA: construct_peft_lohaconfig,
PeftType.LOKR: construct_peft_lokrconfig,
}[adapter_type]
# Process each model sequentially
for model, model_name in [(text_encoder, "text_encoder"), (unet, "unet")]:
# Skip model if no data was provided
if len(adapter_info[model_name]) == 0:
continue
config = construct_config_fn(adapter_info[model_name], decompose_factor=decompose_factor)
# Output warning for LoHa with use_effective_conv2d
if (
isinstance(config, LoHaConfig)
and getattr(config, "use_effective_conv2d", False)
and args.loha_conv2d_weights_fix is False
):
logging.warning(
'lycoris-lora<=1.9.0 LoHa implementation contains a bug, which can be fixed with "--loha_conv2d_weights_fix".\n'
"For more info, please refer to https://github.com/huggingface/peft/pull/1021 and https://github.com/KohakuBlueleaf/LyCORIS/pull/115"
)
model = get_peft_model(model, config)
missing_keys, unexpected_keys = set_peft_model_state_dict(
model, combine_peft_state_dict(adapter_info[model_name])
)
if len(unexpected_keys) > 0:
raise ValueError(f"Unexpected keys {unexpected_keys} found during conversion")
if args.half:
model.to(torch.float16)
# Save model to disk
model.save_pretrained(os.path.join(args.dump_path, model_name))
| peft/examples/stable_diffusion/convert_sd_adapter_to_peft.py/0 | {
"file_path": "peft/examples/stable_diffusion/convert_sd_adapter_to_peft.py",
"repo_id": "peft",
"token_count": 10390
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import warnings
from dataclasses import asdict, dataclass, field
from typing import Dict, Optional, Union
from huggingface_hub import hf_hub_download
from transformers.utils import PushToHubMixin
from .utils import CONFIG_NAME, PeftType, TaskType
# we expect at least these keys to be present in a PEFT adapter_config.json
MIN_EXPECTED_CONFIG_KEYS = {"peft_type"}
def _check_and_remove_unused_kwargs(cls, kwargs):
"""Make PEFT configs forward-compatible by removing unused kwargs that were added in later PEFT versions.
This assumes that removing the unused kwargs will not affect the default behavior.
Returns the filtered kwargs and the set of removed keys.
"""
# it's not pretty but eh
signature_parameters = inspect.signature(cls.__init__).parameters
unexpected_kwargs = set(kwargs.keys()) - set(signature_parameters.keys())
for key in unexpected_kwargs:
del kwargs[key]
return kwargs, unexpected_kwargs
@dataclass
class PeftConfigMixin(PushToHubMixin):
r"""
This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all
PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to
push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a
directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.
Args:
peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
"""
task_type: Optional[TaskType] = field(default=None, metadata={"help": "The type of task."})
peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."})
auto_mapping: Optional[dict] = field(
default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."}
)
def __post_init__(self):
# check for invalid task type
if (self.task_type is not None) and (self.task_type not in list(TaskType)):
raise ValueError(
f"Invalid task type: '{self.task_type}'. Must be one of the following task types: {', '.join(TaskType)}."
)
def to_dict(self) -> Dict:
r"""
Returns the configuration for your adapter model as a dictionary.
"""
return asdict(self)
def save_pretrained(self, save_directory: str, **kwargs) -> None:
r"""
This method saves the configuration of your adapter model in a directory.
Args:
save_directory (`str`):
The directory where the configuration will be saved.
kwargs (additional keyword arguments, *optional*):
Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]
method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
auto_mapping_dict = kwargs.pop("auto_mapping_dict", None)
output_dict = self.to_dict()
# converting set type to list
for key, value in output_dict.items():
if isinstance(value, set):
output_dict[key] = list(value)
output_path = os.path.join(save_directory, CONFIG_NAME)
# Add auto mapping details for custom models.
if auto_mapping_dict is not None:
output_dict["auto_mapping"] = auto_mapping_dict
# save it
with open(output_path, "w") as writer:
writer.write(json.dumps(output_dict, indent=2, sort_keys=True))
@classmethod
def from_peft_type(cls, **kwargs):
r"""
This method loads the configuration of your adapter model from a set of kwargs.
The appropriate configuration type is determined by the `peft_type` argument. If `peft_type` is not provided,
the calling class type is instantiated.
Args:
kwargs (configuration keyword arguments):
Keyword arguments passed along to the configuration initialization.
"""
# Avoid circular dependency .. TODO: fix this with a larger refactor
from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING
# TODO: this hack is needed to fix the following issue (on commit 702f937):
# if someone saves a default config and loads it back with `PeftConfig` class it yields to
# not loading the correct config class.
#
# from peft import AdaLoraConfig, PeftConfig
# peft_config = AdaLoraConfig()
# print(peft_config)
# >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None,
# revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ...
#
# peft_config.save_pretrained("./test_config")
# peft_config = PeftConfig.from_pretrained("./test_config")
# print(peft_config)
# >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False)
if "peft_type" in kwargs:
peft_type = kwargs["peft_type"]
config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type]
else:
config_cls = cls
try:
config = config_cls(**kwargs)
except TypeError as exc:
# Here we potentially handle forward compatibility. Sometimes new keywords are added to configs, which makes
# new configs incompatible with older PEFT versions. We catch these and remove them to allow the program to
# continue, but warn the user about it.
# First check if the error is due to unexpected keyword arguments, we don't want to accidentally catch
# other TypeErrors.
if "got an unexpected keyword argument" not in str(exc):
raise exc
filtered_kwargs, unexpected_kwargs = _check_and_remove_unused_kwargs(cls, kwargs)
if not MIN_EXPECTED_CONFIG_KEYS.issubset(set(filtered_kwargs.keys())):
raise TypeError(
f"The {cls.__name__} config that is trying to be loaded is missing required keys: "
f"{MIN_EXPECTED_CONFIG_KEYS}."
)
warnings.warn(
f"Unexpected keyword arguments {sorted(unexpected_kwargs)} for class {cls.__name__}, these are "
"ignored. This probably means that you're loading a configuration file that was saved using a "
"higher version of the library and additional parameters have been introduced since. It is "
"highly recommended to upgrade the PEFT version before continuing (e.g. by running `pip install "
"-U peft`)."
)
config = config_cls.from_peft_type(**filtered_kwargs)
return config
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs):
r"""
This method loads the configuration of your adapter model from a directory.
Args:
pretrained_model_name_or_path (`str`):
The directory or the Hub repository id where the configuration is saved.
kwargs (additional keyword arguments, *optional*):
Additional keyword arguments passed along to the child class initialization.
"""
path = (
os.path.join(pretrained_model_name_or_path, subfolder)
if subfolder is not None
else pretrained_model_name_or_path
)
hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs)
if os.path.isfile(os.path.join(path, CONFIG_NAME)):
config_file = os.path.join(path, CONFIG_NAME)
else:
try:
config_file = hf_hub_download(
pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs
)
except Exception as exc:
raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'") from exc
loaded_attributes = cls.from_json_file(config_file)
kwargs = {**class_kwargs, **loaded_attributes}
kwargs = cls.check_kwargs(**kwargs)
return cls.from_peft_type(**kwargs)
@classmethod
def from_json_file(cls, path_json_file: str, **kwargs):
r"""
Loads a configuration file from a json file.
Args:
path_json_file (`str`):
The path to the json file.
"""
with open(path_json_file) as file:
json_object = json.load(file)
# Sanity check that config does not contain a runtime_config
if "runtime_config" in json_object:
warnings.warn(
"The configuration file contains a `runtime_config` key. This is ignored. Runtime configurations are only valid at runtime."
)
del json_object["runtime_config"]
return json_object
@classmethod
def _split_kwargs(cls, kwargs):
hf_hub_download_kwargs = {}
class_kwargs = {}
other_kwargs = {}
for key, value in kwargs.items():
if key in inspect.signature(hf_hub_download).parameters:
hf_hub_download_kwargs[key] = value
elif key in list(cls.__annotations__):
class_kwargs[key] = value
else:
other_kwargs[key] = value
return hf_hub_download_kwargs, class_kwargs, other_kwargs
@classmethod
def _get_peft_type(
cls,
model_id: str,
**hf_hub_download_kwargs,
):
subfolder = hf_hub_download_kwargs.get("subfolder", None)
path = os.path.join(model_id, subfolder) if subfolder is not None else model_id
if os.path.isfile(os.path.join(path, CONFIG_NAME)):
config_file = os.path.join(path, CONFIG_NAME)
else:
try:
config_file = hf_hub_download(
model_id,
CONFIG_NAME,
**hf_hub_download_kwargs,
)
except Exception:
raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'")
loaded_attributes = cls.from_json_file(config_file)
return loaded_attributes["peft_type"]
@classmethod
def check_kwargs(cls, **kwargs):
"""Check kwargs before initializing the config instance.
Subclasses can override this method to add specific checks.
"""
return kwargs
@property
def is_prompt_learning(self) -> bool:
r"""
Utility method to check if the configuration is for prompt learning.
"""
return False
@property
def is_adaption_prompt(self) -> bool:
"""Return True if this is an adaption prompt config."""
return False
@dataclass
class PeftConfig(PeftConfigMixin):
"""
This is the base configuration class to store the configuration of a [`PeftModel`].
Args:
peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.
inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.
"""
base_model_name_or_path: Optional[str] = field(
default=None, metadata={"help": "The name of the base model to use."}
)
revision: Optional[str] = field(default=None, metadata={"help": "The specific base model version to use."})
peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"})
task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"})
inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"})
@dataclass
class PromptLearningConfig(PeftConfig):
"""
This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or
[`PromptTuning`].
Args:
num_virtual_tokens (`int`): The number of virtual tokens to use.
token_dim (`int`): The hidden embedding dimension of the base transformer model.
num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.
num_attention_heads (`int`): The number of attention heads in the base transformer model.
num_layers (`int`): The number of layers in the base transformer model.
"""
num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"})
token_dim: int = field(
default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"}
)
num_transformer_submodules: Optional[int] = field(
default=None, metadata={"help": "Number of transformer submodules"}
)
num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"})
num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"})
@property
def is_prompt_learning(self) -> bool:
r"""
Utility method to check if the configuration is for prompt learning.
"""
return True
| peft/src/peft/config.py/0 | {
"file_path": "peft/src/peft/config.py",
"repo_id": "peft",
"token_count": 5717
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, List, Optional
import packaging
import torch
import transformers
from torch import nn
from peft.tuners.lora import LoraLayer
from peft.tuners.tuners_utils import check_adapters_to_merge
from peft.utils import transpose
if packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.33.0"):
from transformers.integrations import deepspeed_config
else:
from transformers.deepspeed import deepspeed_config
class AdaLoraLayer(LoraLayer):
# List all names of layers that may contain adapter weights
# Note: ranknum doesn't need to be included as it is not an nn.Module
adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B")
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout", "ranknum")
def __init__(self, base_layer: nn.Module) -> None:
super().__init__(base_layer)
self.lora_E = nn.ParameterDict({})
self.lora_A = nn.ParameterDict({})
self.lora_B = nn.ParameterDict({})
self.ranknum = nn.ParameterDict({})
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
if r < 0:
# note: r == 0 is allowed for AdaLora, see #1539
raise ValueError(f"`r` should be a positive integer or 0, but the value passed is {r}")
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
# Actual trainable parameters
# Right singular vectors
self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features))
# Singular values
self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1))
# Left singular vectors
self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r))
# The current rank
self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False)
self.ranknum[adapter_name].data.fill_(float(r))
self.ranknum[adapter_name].requires_grad = False
self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
if init_lora_weights:
self.reset_lora_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_lora_parameters(self, adapter_name):
if adapter_name in self.lora_A.keys():
nn.init.zeros_(self.lora_E[adapter_name])
nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
class SVDLinear(nn.Module, AdaLoraLayer):
# SVD-based adaptation by a dense layer
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
fan_in_fan_out: bool = False,
init_lora_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
# Freezing the pre-trained weight matrix
self.get_base_layer().weight.requires_grad = False
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
base_layer = self.get_base_layer()
if active_adapter in self.lora_A.keys():
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
return (
transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out)
* self.scaling[adapter]
/ (self.ranknum[adapter] + 1e-5)
)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-5
x = self._cast_input_dtype(x, lora_A.dtype)
result += (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "adalora." + rep
class RankAllocator:
"""
The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY
Args:
config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
model: the model that we apply AdaLoRA to.
"""
def __init__(self, model, peft_config, adapter_name):
self.peft_config = peft_config
self.adapter_name = adapter_name
self.beta1 = peft_config.beta1
self.beta2 = peft_config.beta2
assert self.beta1 > 0 and self.beta1 < 1
assert self.beta2 > 0 and self.beta2 < 1
self.reset_ipt()
self._set_budget_scheduler(model)
def set_total_step(self, total_step):
self.peft_config.total_step = total_step
def reset_ipt(self):
self.ipt = {}
self.exp_avg_ipt = {}
self.exp_avg_unc = {}
def _set_budget_scheduler(self, model):
self.init_bgt = 0
self.name_set = set()
for n, p in model.named_parameters():
if f"lora_A.{self.adapter_name}" in n:
self.init_bgt += p.size(0)
self.name_set.add(n.replace("lora_A", "%s"))
self.name_set = sorted(self.name_set)
# The total final rank budget
self.target_bgt = self.peft_config.target_r * len(self.name_set)
def budget_schedule(self, step: int):
tinit = self.peft_config.tinit
tfinal = self.peft_config.tfinal
total_step = self.peft_config.total_step
# Initial warmup
if step <= tinit:
budget = self.init_bgt
mask_ind = False
# Final fine-tuning
elif step > total_step - tfinal:
budget = self.target_bgt
mask_ind = True
else:
# Budget decreasing with a cubic scheduler
mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit)
budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt)
mask_ind = True if step % self.peft_config.deltaT == 0 else False
return budget, mask_ind
def update_ipt(self, model):
# Update the sensitivity and uncertainty for every weight
for n, p in model.named_parameters():
if "lora_" in n and self.adapter_name in n:
if n not in self.ipt:
self.ipt[n] = torch.zeros_like(p)
self.exp_avg_ipt[n] = torch.zeros_like(p)
self.exp_avg_unc[n] = torch.zeros_like(p)
with torch.no_grad():
if deepspeed_config() is not None:
import deepspeed
grad = deepspeed.utils.safe_get_full_grad(p)
self.ipt[n] = (p * grad).abs().detach()
else:
self.ipt[n] = (p * p.grad).abs().detach()
# Sensitivity smoothing
self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n]
# Uncertainty quantification
self.exp_avg_unc[n] = (
self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs()
)
def _element_score(self, n):
return self.exp_avg_ipt[n] * self.exp_avg_unc[n]
def _combine_ipt(self, ipt_E, ipt_AB):
ipt_AB = ipt_AB.sum(dim=1, keepdim=False)
sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1)
return sum_ipt
def mask_to_budget(self, model, budget):
value_ipt = {}
vector_ipt = {}
triplet_ipt = {}
# Get the importance score for A, E, B
for n, p in model.named_parameters():
if f"lora_A.{self.adapter_name}" in n:
entry_ipt = self._element_score(n)
comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True)
name_m = n.replace("lora_A", "%s")
if name_m not in vector_ipt:
vector_ipt[name_m] = [comb_ipt]
else:
vector_ipt[name_m].append(comb_ipt)
if f"lora_B.{self.adapter_name}" in n:
entry_ipt = self._element_score(n)
comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1)
name_m = n.replace("lora_B", "%s")
if name_m not in vector_ipt:
vector_ipt[name_m] = [comb_ipt]
else:
vector_ipt[name_m].append(comb_ipt)
if f"lora_E.{self.adapter_name}" in n:
entry_ipt = self._element_score(n)
name_m = n.replace("lora_E", "%s")
value_ipt[name_m] = entry_ipt
all_score = []
# Calculate the score for each triplet
for name_m in vector_ipt:
ipt_E = value_ipt[name_m]
ipt_AB = torch.cat(vector_ipt[name_m], dim=1)
sum_ipt = self._combine_ipt(ipt_E, ipt_AB)
name_E = name_m % "lora_E"
triplet_ipt[name_E] = sum_ipt.view(-1, 1)
all_score.append(sum_ipt.view(-1))
# Get the threshold by ranking ipt
mask_threshold = torch.kthvalue(
torch.cat(all_score),
k=self.init_bgt - budget,
)[0].item()
rank_pattern = {}
# Mask the unimportant triplets
with torch.no_grad():
for n, p in model.named_parameters():
if f"lora_E.{self.adapter_name}" in n:
p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0)
rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist()
return rank_pattern
def update_and_allocate(self, model, global_step, force_mask=False):
# # Update the importance score and allocate the budget
if global_step < self.peft_config.total_step - self.peft_config.tfinal:
self.update_ipt(model)
budget, mask_ind = self.budget_schedule(global_step)
# Allocate the budget according to importance scores
if mask_ind or force_mask:
rank_pattern = self.mask_to_budget(model, budget)
else:
rank_pattern = None
return budget, rank_pattern
def mask_using_rank_pattern(self, model, rank_pattern):
# Mask the unimportant triplets
is_adapter_name_truncated = False
if self.adapter_name not in next(iter(rank_pattern.keys())):
is_adapter_name_truncated = True
with torch.no_grad():
for n, p in model.named_parameters():
if f"lora_E.{self.adapter_name}" in n:
key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "")
mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device)
p.masked_fill_(~mask.bool(), 0.0)
| peft/src/peft/tuners/adalora/layer.py/0 | {
"file_path": "peft/src/peft/tuners/adalora/layer.py",
"repo_id": "peft",
"token_count": 7174
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class BoneLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("bone_block",)
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("bone_r",)
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.bone_r = {}
self.bone_block = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.in_features, self.out_features = base_layer.in_features, base_layer.out_features
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
def update_layer(
self,
adapter_name: str,
r: int,
init_weights: bool,
**kwargs,
) -> None:
"""Internal function to create bone adapter
Args:
adapter_name (`str`): Name for the adapter to add.
r (`int`): Rank for the added adapter.
init_weights (`bool`): Whether to initialize weights.
"""
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.bone_r[adapter_name] = r
# Determine shape of Bone weights
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.bone_block[adapter_name] = nn.Parameter(torch.zeros(r, self.out_features), requires_grad=True)
else:
raise TypeError(f"Bone is not implemented for base layers of type {type(base_layer).__name__}")
# Initialize weights
if init_weights == "bat":
if self.in_features % r != 0 or self.out_features % r != 0:
raise ValueError("The weight matrix must be fully divisible into [r, r] blocks.")
self.reset_bat_parameters(adapter_name, r)
elif init_weights:
self.reset_bone_parameters(adapter_name, r)
else:
self.reset_bone_parameters_random(adapter_name)
# Move new weights to device
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_bone_parameters(self, adapter_name: str, r):
self.bone_block[adapter_name] = nn.Parameter(torch.zeros(r, self.out_features), requires_grad=True)
def reset_bat_parameters(self, adapter_name: str, r):
self.bone_block[adapter_name] = nn.Parameter(torch.zeros(self.out_features // r, r, r), requires_grad=True)
def reset_bone_parameters_random(self, adapter_name: str):
nn.init.kaiming_uniform_(self.bone_block[adapter_name], a=math.sqrt(5))
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.bone_block.keys():
continue
warnings.warn("Scaling operation for Bone not supported! Automatically set scale to 1.")
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.bone_block.keys():
continue
warnings.warn("Unscaling operation for Bone not supported! Keeping scale at 1.")
class BoneLinear(nn.Module, BoneLayer):
"""
Bone implemented in a dense layer.
"""
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
init_weights: Union[bool, str] = True,
**kwargs,
) -> None:
super().__init__()
BoneLayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, init_weights, **kwargs)
self.bone_fn = init_weights
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If `None`, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.bone_block.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
if self.bone_fn == "bat":
delta_weight = self.get_delta_weight(active_adapter, orig_weight)
orig_weight += delta_weight
else:
delta_weight = self.get_delta_weight_bone(active_adapter, self.base_layer.weight.data)
orig_weight = delta_weight
if not torch.isfinite(orig_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.base_layer.weight.data = orig_weight
else:
if self.bone_fn == "bat":
delta_weight = self.get_delta_weight(active_adapter, self.base_layer.weight.data)
self.base_layer.weight.data += delta_weight
else:
delta_weight = self.get_delta_weight_bone(active_adapter, self.base_layer.weight.data)
self.base_layer.weight.data = delta_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.bone_block.keys():
orig_weight = self.get_base_layer().weight.data.clone()
if self.bone_fn == "bat":
delta_weight = self.get_delta_weight(active_adapter, orig_weight, re=True)
else:
delta_weight = self.get_delta_weight_bone(active_adapter, orig_weight, re=True)
self.get_base_layer().weight.data = delta_weight
def get_delta_weight(self, adapter, orig_weight, re: bool = False) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
device = self.bone_block[adapter].device
dtype = self.bone_block[adapter].dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_bone = self.bone_block[adapter]
if cast_to_fp32:
weight_bone = weight_bone.float()
r = weight_bone.size(-1)
if re:
o = orig_weight.reshape(orig_weight.size(0) // r, r, orig_weight.size(1) // r, r).permute(2, 0, 1, 3)
one = torch.eye(weight_bone.size(-1)).to(weight_bone.device)
inv_I_plus_b = torch.inverse(one + weight_bone)
w = (o - weight_bone) @ inv_I_plus_b
output_tensor = w.permute(1, 2, 0, 3).reshape(*orig_weight.shape)
else:
w = (
orig_weight.reshape(orig_weight.size(0) // r, r, orig_weight.size(1) // r, r).permute(2, 0, 1, 3)
@ weight_bone
+ weight_bone
)
output_tensor = w.permute(1, 2, 0, 3).reshape(*orig_weight.shape)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
# cast back the weights
self.bone_block[adapter].data = weight_bone.to(dtype)
return output_tensor
def get_delta_weight_bone(self, adapter, orig_weight, re: bool = False) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
device = self.bone_block[adapter].device
dtype = self.bone_block[adapter].dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_bone = self.bone_block[adapter]
if cast_to_fp32:
weight_bone = weight_bone.float()
in_features = orig_weight.size(-1)
r = weight_bone.size(0)
if in_features % r != 0:
last_size = in_features % r
n_block = in_features // r
n_block_size = n_block * r
if re:
orig_weight[:, :n_block_size] = (
(orig_weight[:, :n_block_size].reshape(-1, n_block, r).permute(1, 2, 0) - weight_bone)
.permute(2, 0, 1)
.reshape(*orig_weight[:, :n_block_size].shape)
)
orig_weight[:, n_block_size:] = (
orig_weight[:, n_block_size:] - (weight_bone.transpose(0, 1))[:, :last_size]
)
else:
orig_weight[:, :n_block_size] = (
(orig_weight[:, :n_block_size].reshape(-1, n_block, r).permute(1, 2, 0) + weight_bone)
.permute(2, 0, 1)
.reshape(*orig_weight[:, :n_block_size].shape)
)
orig_weight[:, n_block_size:] = (
orig_weight[:, n_block_size:] + (weight_bone.transpose(0, 1))[:, :last_size]
)
output_tensor = orig_weight
else:
if re:
w = orig_weight.reshape(-1, orig_weight.size(1) // r, r).permute(1, 2, 0) - weight_bone
output_tensor = w.permute(2, 0, 1).reshape(*orig_weight.shape)
else:
w = orig_weight.reshape(-1, orig_weight.size(1) // r, r).permute(1, 2, 0) + weight_bone
output_tensor = w.permute(2, 0, 1).reshape(*orig_weight.shape)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
# cast back the weights
self.bone_block[adapter].data = weight_bone.to(dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
if self.bone_fn == "bat":
orig_weight = self.base_layer.weight.data.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self.bone_block.keys():
continue
delta_weight = self.get_delta_weight(active_adapter, orig_weight)
orig_weight = orig_weight + delta_weight
result = F.linear(input=x, weight=orig_weight, bias=self.base_layer.bias)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.bone_block.keys():
continue
bone = self.bone_block[active_adapter]
r = bone.size(0)
if x.size(-1) % r != 0:
padding_size = (r - x.size(-1) % r) % r
x = F.pad(x, (0, padding_size))
result = result + torch.sum(x.reshape(*x.shape[:-1], x.size(-1) // r, r), dim=-2) @ bone
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "bone." + rep
| peft/src/peft/tuners/bone/layer.py/0 | {
"file_path": "peft/src/peft/tuners/bone/layer.py",
"repo_id": "peft",
"token_count": 6693
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, List, Optional
import torch
import torch.nn as nn
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils import transpose
class IA3Layer(BaseTunerLayer):
# All names of layers that may contain adapter weights
adapter_layer_names = ("ia3_l",)
def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None:
self.base_layer = base_layer
self.ia3_l = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.is_feedforward = is_feedforward
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, (nn.Conv2d, nn.Conv3d)):
in_features, out_features = base_layer.in_channels, base_layer.out_channels
elif isinstance(base_layer, nn.Embedding):
in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim
elif isinstance(base_layer, Conv1D):
in_features, out_features = (
base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
)
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
self.in_features = in_features
self.out_features = out_features
def update_layer(self, adapter_name, init_ia3_weights):
# This code works for linear layers, override for other layer types
# Actual trainable parameters
if self.is_feedforward:
weight = torch.randn((1, self.in_features))
else:
weight = torch.randn((self.out_features, 1))
self.ia3_l[adapter_name] = nn.Parameter(weight)
if init_ia3_weights:
self.reset_ia3_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_ia3_parameters(self, adapter_name):
if adapter_name in self.ia3_l.keys():
# initialize learned vector with torch.ones
nn.init.constant_(self.ia3_l[adapter_name], 1.0)
class Linear(nn.Module, IA3Layer):
# (IA)^3 implemented in a dense layer
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer
is_target_conv_1d_layer: bool = False, # whether target module is a conv1d layer. useful while unloading later
init_ia3_weights: bool = True, # whether to initialize IA3 weights
**kwargs,
) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
self.fan_in_fan_out = fan_in_fan_out
self.is_target_conv_1d_layer = is_target_conv_1d_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.ia3_l.keys():
base_layer = self.get_base_layer()
ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out)
orig_dtype = base_layer.weight.data.dtype
if safe_merge:
orig_weights = base_layer.weight.data
orig_weights = torch.mul(orig_weights, ia3_l)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights.to(orig_dtype)
else:
base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l).to(orig_dtype)
if not self.is_feedforward and (base_layer.bias is not None):
scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
orig_dtype = base_layer.bias.data.dtype
base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data).to(orig_dtype)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
warnings.warn("Unmerge result can be inaccurate for (IA)^3.")
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.ia3_l.keys():
base_layer = self.get_base_layer()
# Add tolerace to avoid division by zero
ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-8
orig_dtype = base_layer.weight.data.dtype
base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l).to(orig_dtype)
if not self.is_feedforward and (base_layer.bias is not None):
scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
orig_dtype = base_layer.bias.data.dtype
base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-8).to(orig_dtype)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
dtype = previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
dtype = self.ia3_l[active_adapter].dtype
ia3_scaling *= self.ia3_l[active_adapter].flatten()
if self.is_feedforward:
x = x.to(dtype)
# TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype
# e.g. bf16 vs fp32. Is that okay?
interm = (x * ia3_scaling).to(previous_dtype)
result = self.base_layer(interm, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
result_dtype = result.dtype
result = (result * ia3_scaling).to(result_dtype)
return result
class _ConvNd(nn.Module, IA3Layer):
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer
init_ia3_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self._kernel_dim = base_layer.weight.dim()
self.update_layer(adapter_name, init_ia3_weights)
def update_layer(self, adapter_name, init_ia3_weights):
# Actual trainable parameters
num_features = self.in_features if self.is_feedforward else self.out_features
weights_size = (1, num_features) + (1,) * (self._kernel_dim - 2)
weight = torch.randn(weights_size)
self.ia3_l[adapter_name] = nn.Parameter(weight)
if init_ia3_weights:
self.reset_ia3_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.ia3_l.keys():
base_layer = self.get_base_layer()
ia3_scaling = self.ia3_l[active_adapter].data
if not self.is_feedforward:
ia3_scaling = ia3_scaling.transpose(0, 1)
if safe_merge:
output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone()
if not torch.isfinite(output_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = output_weight
else:
base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling)
if not self.is_feedforward and (base_layer.bias is not None):
scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
warnings.warn("Unmerge result can be inaccurate for (IA)^3.")
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.ia3_l.keys():
base_layer = self.get_base_layer()
# divide by (IA)^3 vector. Add tolerace to avoid division by zero
ia3_scaling = self.ia3_l[active_adapter].data
if not self.is_feedforward:
ia3_scaling = ia3_scaling.transpose(0, 1)
base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-8)
if not self.is_feedforward and (base_layer.bias is not None):
scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
dtype = previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
dtype = self.ia3_l[active_adapter].dtype
ia3_scaling *= self.ia3_l[active_adapter]
if self.is_feedforward:
x = x.to(dtype)
# TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype
# e.g. bf16 vs fp32. Is that okay?
interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype)
result = self.base_layer(interm, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
result = result.to(dtype) * ia3_scaling
result = result.to(previous_dtype)
return result
class Conv2d(_ConvNd):
# IA3 implemented in a 2D convolutional layer
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self._kernel_dim == 4:
raise ValueError(f"Conv2d layer kernel must have 4 dimensions, not {self._kernel_dim}")
class Conv3d(_ConvNd):
# IA3 implemented in a 3D convolutional layer
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self._kernel_dim == 5:
raise ValueError(f"Conv2d layer kernel must have 5 dimensions, not {self._kernel_dim}")
| peft/src/peft/tuners/ia3/layer.py/0 | {
"file_path": "peft/src/peft/tuners/ia3/layer.py",
"repo_id": "peft",
"token_count": 6856
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.metadata as importlib_metadata
from typing import Any, Optional
import packaging.version
import torch
from peft.import_utils import is_auto_awq_available
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
class AwqLoraLinear(torch.nn.Module, LoraLayer):
def __init__(
self,
base_layer,
adapter_name,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
use_rslora: bool = False,
use_dora: bool = False,
lora_bias: bool = False,
**kwargs,
):
if use_dora:
raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
super().__init__()
LoraLayer.__init__(self, base_layer)
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
lora_bias=lora_bias,
)
def forward(self, x: torch.Tensor):
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = self._cast_input_dtype(x, lora_A.weight.dtype)
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result = result + output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
def dispatch_awq(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_auto_awq_available():
from awq.modules.linear import WQLinear_GEMM
if isinstance(target_base_layer, WQLinear_GEMM):
# Raise the error only at the dispatch level
AUTOAWQ_MINIMUM_VERSION = packaging.version.parse("0.2.0")
version_autoawq = packaging.version.parse(importlib_metadata.version("autoawq"))
if AUTOAWQ_MINIMUM_VERSION > version_autoawq:
raise ImportError(
f"Found an incompatible version of auto-awq. Found version {version_autoawq}, "
f"but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT."
)
new_module = AwqLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
return new_module
| peft/src/peft/tuners/lora/awq.py/0 | {
"file_path": "peft/src/peft/tuners/lora/awq.py",
"repo_id": "peft",
"token_count": 1820
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from peft.config import PromptLearningConfig
from peft.utils import PeftType
@dataclass
class PrefixTuningConfig(PromptLearningConfig):
"""
This is the configuration class to store the configuration of a [`PrefixEncoder`].
Args:
encoder_hidden_size (`int`): The hidden size of the prompt encoder.
prefix_projection (`bool`): Whether to project the prefix embeddings.
"""
encoder_hidden_size: int = field(
default=None,
metadata={"help": "The hidden size of the encoder"},
)
prefix_projection: bool = field(
default=False,
metadata={"help": "Whether to project the prefix tokens"},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.PREFIX_TUNING
| peft/src/peft/tuners/prefix_tuning/config.py/0 | {
"file_path": "peft/src/peft/tuners/prefix_tuning/config.py",
"repo_id": "peft",
"token_count": 463
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import builtins
from typing import Optional, Union
import torch
import torch.nn as nn
from .config import XLoraConfig
Number = Union[builtins.int, builtins.float, builtins.bool]
class TemperatureScaledSoftmax(nn.Module):
def __init__(self, temperature=1.0):
super().__init__()
self.temperature = temperature
self.softmax = nn.Softmax(dim=-1)
def forward(self, logits):
# Scale logits by the temperature
scaled_logits = logits / self.temperature
# Apply softmax to the scaled logits
return self.softmax(scaled_logits)
class XLoraClassifier(nn.Module):
"""
A classifier to select LoRA layers for XLora.
"""
def __init__(
self,
model: nn.Module, # PeftModel
config: XLoraConfig,
n_classes: int,
n_layers: int,
device: torch.device,
):
"""
Construct an X-LoRA classifier from a model, config and some metadata. Note that n_layers is the number of LoRA
adapter layers, not the number of model layers.
"""
super().__init__()
self.n_classes = n_classes
self.n_layers = n_layers
self.config = config
self.log_scalings = []
self.softmax = TemperatureScaledSoftmax(temperature=self.config.softmax_temperature)
self.override_scaling_pass_value: Number = config.scaling_pass_value
self.scalings_logging = False
self.dtype = next(model.parameters()).dtype
add_dropout = config.xlora_dropout_p > 0.0
layers = []
if self.config.xlora_depth == 1:
if config.layerwise_scalings: # bias=False if we have just one layer
last = nn.Linear(config.hidden_size, n_classes * n_layers, bias=True).to(device).to(self.dtype)
else:
last = nn.Linear(config.hidden_size, n_classes, bias=True).to(device).to(self.dtype)
else:
if self.config.xlora_depth <= 0:
raise ValueError("X-LoRA depth must be strictly positive.")
layers.append(nn.Linear(config.hidden_size, config.xlora_size, bias=True).to(device).to(self.dtype))
layers.append(nn.ReLU())
if add_dropout:
layers.append(nn.Dropout(p=config.xlora_dropout_p))
for _ in range(config.xlora_depth - 2):
layers.append(nn.Linear(config.xlora_size, config.xlora_size, bias=True).to(device).to(self.dtype))
layers.append(nn.ReLU())
if add_dropout:
layers.append(nn.Dropout(p=config.xlora_dropout_p))
if config.layerwise_scalings:
last = nn.Linear(config.xlora_size, n_classes * n_layers, bias=True).to(device).to(self.dtype)
else:
last = nn.Linear(config.xlora_size, n_classes, bias=True).to(device).to(self.dtype)
self.layers = nn.Sequential(*layers, last)
def make_dummy_scalings(
self,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
*args,
**kwargs,
) -> torch.Tensor:
"""
Make some dummy scalings for the scalings pass (the one to get the logits for the X-LoRA classifier). These are
of shape (batch_size, seq_len, n_layers, n_classes) and filled with the override scalings pass value. Note that
n_layers is the number of LoRA adapter layers, not the number of model layers.
"""
if input_ids is not None:
batch_size = input_ids.shape[0]
device = input_ids.device
seq_len = input_ids.shape[1]
else:
batch_size = inputs_embeds.shape[0]
device = inputs_embeds.device
seq_len = inputs_embeds.shape[1]
return torch.full( # type: ignore
(batch_size, seq_len, self.n_layers, self.n_classes),
self.override_scaling_pass_value,
).to(device=device, dtype=self.dtype)
def forward(
self,
result,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
*args,
**kwargs,
) -> torch.Tensor:
"""
Using the hidden states of the model, predict `n_classes` LoRA alpha values. Returns the scalings.
"""
if input_ids is not None:
batch_size = input_ids.shape[0]
seq_len = input_ids.shape[1]
else:
batch_size = inputs_embeds.shape[0]
seq_len = inputs_embeds.shape[1]
hidden_states = result.hidden_states # type: ignore
hidden_state = hidden_states[-1] # Get the last hidden state
### Classifier run
# hidden_state=[batch_size, seq_len, hidden_size]
logits = self.layers.forward(hidden_state)
### Repeat to make layerwise scalings
### If layerwise_scalings=False, then the classifier only outputs logits which are not layer-wise.
### So, we expand them to the correct shape.
if not self.config.layerwise_scalings:
logits = logits.unsqueeze(2)
logits = logits.expand(-1, -1, self.n_layers, -1)
### Classifier run
scalings = logits.reshape(batch_size, seq_len, self.n_layers, self.n_classes)
# scalings = [batch_size, seq_len, n_layers, n_classes]
if self.config.enable_softmax:
scalings = self.softmax(scalings)
if self.scalings_logging:
self.log_scalings.append(scalings)
return scalings
def _get_bucketed_scalings(self) -> dict[int, tuple[list[int], list[torch.Tensor]]]:
"""
Returns bucketed scalings, bucketed by seq_len. Each value consists of the positions (the first) and the
associated tensors. The positions are paired with the associated tensors and give the position in the scaling
log. Each scaling is a tensor of shape (batch_size, seq_len, n_layers, n_classes)).
"""
seqlens_map: dict[int, tuple[list[int], list[torch.Tensor]]] = {}
for i, scaling in enumerate(self.log_scalings):
seq_len = scaling.shape[1]
if seq_len not in seqlens_map:
seqlens_map[seq_len] = ([i], [scaling])
else:
seqlens_map[seq_len][0].append(i)
seqlens_map[seq_len][1].append(scaling)
return seqlens_map
def _set_override_scaling_pass_value(self, value: Union[Number, None]):
if value is None:
self.override_scaling_pass_value = 1 / self.n_classes
else:
self.override_scaling_pass_value = value
self.config.scaling_pass_value = self.override_scaling_pass_value
| peft/src/peft/tuners/xlora/classifier.py/0 | {
"file_path": "peft/src/peft/tuners/xlora/classifier.py",
"repo_id": "peft",
"token_count": 3252
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def pytest_addoption(parser):
parser.addoption("--regression", action="store_true", default=False, help="run regression tests")
def pytest_configure(config):
config.addinivalue_line("markers", "regression: mark regression tests")
def pytest_collection_modifyitems(config, items):
if config.getoption("--regression"):
return
skip_regression = pytest.mark.skip(reason="need --regression option to run regression tests")
for item in items:
if "regression" in item.keywords:
item.add_marker(skip_regression)
| peft/tests/conftest.py/0 | {
"file_path": "peft/tests/conftest.py",
"repo_id": "peft",
"token_count": 356
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/decomposition/tests/test_incremental_pca.py
import pytest
import torch
from datasets import load_dataset
from torch.testing import assert_close
from peft.utils.incremental_pca import IncrementalPCA
torch.manual_seed(1999)
iris = load_dataset("scikit-learn/iris", split="train")
def test_incremental_pca():
# Incremental PCA on dense arrays.
n_components = 2
X = torch.tensor([iris["SepalLengthCm"], iris["SepalWidthCm"], iris["PetalLengthCm"], iris["PetalWidthCm"]]).T
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
ipca.fit(X)
X_transformed = ipca.transform(X)
# PCA
U, S, Vh = torch.linalg.svd(X - torch.mean(X, dim=0))
max_abs_rows = torch.argmax(torch.abs(Vh), dim=1)
signs = torch.sign(Vh[range(Vh.shape[0]), max_abs_rows])
Vh *= signs.view(-1, 1)
explained_variance = S**2 / (X.size(0) - 1)
explained_variance_ratio = explained_variance / explained_variance.sum()
assert X_transformed.shape == (X.shape[0], 2)
assert_close(
ipca.explained_variance_ratio_.sum().item(),
explained_variance_ratio[:n_components].sum().item(),
rtol=1e-3,
atol=1e-3,
)
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
n, p = 100, 3
X = torch.randn(n, p, dtype=torch.float64) * 0.1
X[:10] += torch.tensor([3, 4, 5])
Xt = 0.1 * torch.randn(1, p, dtype=torch.float64) + torch.tensor([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= torch.sqrt((Yt**2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_close(torch.abs(Yt[0][0]).item(), 1.0, atol=1e-1, rtol=1e-1)
def test_incremental_pca_validation():
# Test that n_components is <= n_features.
X = torch.tensor([[0, 1, 0], [1, 0, 0]])
n_samples, n_features = X.shape
n_components = 4
with pytest.raises(
ValueError,
match=(
f"n_components={n_components} invalid"
f" for n_features={n_features}, need more rows than"
" columns for IncrementalPCA"
" processing"
),
):
IncrementalPCA(n_components, batch_size=10).fit(X)
# Tests that n_components is also <= n_samples.
n_components = 3
with pytest.raises(
ValueError,
match=(f"n_components={n_components} must be less or equal to the batch number of samples {n_samples}"),
):
IncrementalPCA(n_components=n_components).partial_fit(X)
def test_n_components_none():
# Ensures that n_components == None is handled correctly
for n_samples, n_features in [(50, 10), (10, 50)]:
X = torch.rand(n_samples, n_features)
ipca = IncrementalPCA(n_components=None)
# First partial_fit call, ipca.n_components_ is inferred from
# min(X.shape)
ipca.partial_fit(X)
assert ipca.n_components == min(X.shape)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
n_samples = 100
X = torch.randn(n_samples, 20)
X2 = torch.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
n_samples = 100
n_features = 3
X = torch.randn(n_samples, n_features)
all_components = []
batch_sizes = torch.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_close(torch.sign(i), torch.sign(j), rtol=1e-6, atol=1e-6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
n_samples = 100
n_features = 3
X = torch.randn(n_samples, n_features)
all_components = []
batch_sizes = torch.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_close(i, j, rtol=1e-1, atol=1e-1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
n, p = 50, 3
X = torch.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += torch.tensor([5, 4, 3]) # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = torch.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_close(ipca.components_, pipca.components_, rtol=1e-3, atol=1e-3)
def test_incremental_pca_lowrank():
# Test that lowrank mode is equivalent to non-lowrank mode.
n_components = 2
X = torch.tensor([iris["SepalLengthCm"], iris["SepalWidthCm"], iris["PetalLengthCm"], iris["PetalWidthCm"]]).T
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
ipca.fit(X)
ipcalr = IncrementalPCA(n_components=n_components, batch_size=batch_size, lowrank=True)
ipcalr.fit(X)
assert_close(ipca.components_, ipcalr.components_, rtol=1e-7, atol=1e-7)
| peft/tests/test_incremental_pca.py/0 | {
"file_path": "peft/tests/test_incremental_pca.py",
"repo_id": "peft",
"token_count": 2752
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is not a full on test suite of vision models, since we already run many tests on dummy models with Conv2d layers
# and on stable diffusion models. Instead, this file contains specific tests for bugs that have been found in the past.
import gc
import numpy as np
import pytest
import torch
from datasets import load_dataset
from safetensors.torch import load_file
from transformers import (
AutoImageProcessor,
AutoModelForImageClassification,
AutoProcessor,
LlavaForConditionalGeneration,
)
from peft import (
HRAConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
OFTConfig,
PeftModel,
PrefixTuningConfig,
get_peft_model,
)
CONFIGS = {
"lora": LoraConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
"loha": LoHaConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
"lokr": LoKrConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
"oft": OFTConfig(r=1, target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
"hra": HRAConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
# TODO: cannot use BOFT because some convolutional kernel dimensions are even (64) and others odd (147). There is no
# common denominator for the boft_block_size except 1, but using 1 results in an error in the fbd_cuda kernel:
# > Error in forward_fast_block_diag_cuda_kernel: an illegal memory access was encountered
# "boft": BOFTConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"], boft_block_size=2),
}
# Ensure that models like Llava that pass past_key_values automatically do not fail, see #1938
class TestPastKV:
def test_past_kv(self):
model_id = "peft-internal-testing/tiny-LlavaForConditionalGeneration"
prompt = "USER: <image>\nWhat are these?\nASSISTANT:"
# prepare model and inputs
model = LlavaForConditionalGeneration.from_pretrained(
model_id,
low_cpu_mem_usage=True,
)
processor = AutoProcessor.from_pretrained(model_id)
raw_image = np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8)
inputs = processor(prompt, raw_image, return_tensors="pt")
# get peft model
peft_config = PrefixTuningConfig(task_type="CAUSAL_LM", num_virtual_tokens=20)
model.language_model = get_peft_model(model.language_model, peft_config)
# check that this does not raise
model(**inputs, output_hidden_states=True)
class TestResnet:
model_id = "hf-internal-testing/tiny-random-ResNetForImageClassification"
@pytest.fixture(autouse=True)
def teardown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
@pytest.fixture(scope="class")
def image_processor(self):
image_processor = AutoImageProcessor.from_pretrained(self.model_id)
return image_processor
@pytest.fixture(scope="class")
def data(self, image_processor):
dataset = load_dataset("huggingface/cats-image", trust_remote_code=True)
image = dataset["test"]["image"][0]
return image_processor(image, return_tensors="pt")
@pytest.mark.parametrize("config", CONFIGS.values(), ids=CONFIGS.keys())
def test_model_with_batchnorm_reproducibility(self, config, tmp_path, data):
# see 1732
torch.manual_seed(0)
model = AutoModelForImageClassification.from_pretrained(self.model_id)
model = get_peft_model(model, config)
# record outputs before training
model.eval()
with torch.inference_mode():
output_before = model(**data)
model.train()
# train the model
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)
batch_size = 4
max_steps = 5 * batch_size
labels = torch.zeros(1, 3)
labels[0, 1] = 1
for i in range(0, max_steps, batch_size):
optimizer.zero_grad()
outputs = model(**data, labels=labels)
loss = outputs.loss
loss.backward()
optimizer.step()
# record outputs after training
model.eval()
with torch.inference_mode():
output_after = model(**data)
assert torch.isfinite(output_after.logits).all()
atol, rtol = 1e-4, 1e-4
# sanity check: model was updated
assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol)
# check saving the model and loading it
model.save_pretrained(tmp_path)
del model
torch.manual_seed(0)
model = AutoModelForImageClassification.from_pretrained(self.model_id)
model = PeftModel.from_pretrained(model, tmp_path).eval()
with torch.inference_mode():
output_loaded = model(**data)
assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol)
# ensure that the checkpoint file contains the buffers
model_running_mean = len([k for k in model.state_dict().keys() if "running_mean" in k])
state_dict = load_file(tmp_path / "adapter_model.safetensors")
checkpoint_running_mean = len([k for k in state_dict.keys() if "running_mean" in k])
# note that the model has twice as many "running_mean", as there is one copy per ModulesToSaveWrapper, we need
# to multiply by 2 to get the same number
assert model_running_mean == checkpoint_running_mean * 2
| peft/tests/test_vision_models.py/0 | {
"file_path": "peft/tests/test_vision_models.py",
"repo_id": "peft",
"token_count": 2462
} |
# Adversarial Inception v3
**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifier](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
This particular model was trained for study of adversarial examples (adversarial training).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('adv_inception_v3', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `adv_inception_v3`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('adv_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1804-00097,
author = {Alexey Kurakin and
Ian J. Goodfellow and
Samy Bengio and
Yinpeng Dong and
Fangzhou Liao and
Ming Liang and
Tianyu Pang and
Jun Zhu and
Xiaolin Hu and
Cihang Xie and
Jianyu Wang and
Zhishuai Zhang and
Zhou Ren and
Alan L. Yuille and
Sangxia Huang and
Yao Zhao and
Yuzhe Zhao and
Zhonglin Han and
Junjiajia Long and
Yerkebulan Berdibekov and
Takuya Akiba and
Seiya Tokui and
Motoki Abe},
title = {Adversarial Attacks and Defences Competition},
journal = {CoRR},
volume = {abs/1804.00097},
year = {2018},
url = {http://arxiv.org/abs/1804.00097},
archivePrefix = {arXiv},
eprint = {1804.00097},
timestamp = {Thu, 31 Oct 2019 16:31:22 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: Adversarial Inception v3
Paper:
Title: Adversarial Attacks and Defences Competition
URL: https://paperswithcode.com/paper/adversarial-attacks-and-defences-competition
Models:
- Name: adv_inception_v3
In Collection: Adversarial Inception v3
Metadata:
FLOPs: 7352418880
Parameters: 23830000
File Size: 95549439
Architecture:
- 1x1 Convolution
- Auxiliary Classifier
- Average Pooling
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inception-v3 Module
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: adv_inception_v3
Crop Pct: '0.875'
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v3.py#L456
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.58%
Top 5 Accuracy: 93.74%
--> | pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2249
} |
# (Gluon) ResNet
**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('gluon_resnet101_v1b', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `gluon_resnet101_v1b`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('gluon_resnet101_v1b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/HeZRS15,
author = {Kaiming He and
Xiangyu Zhang and
Shaoqing Ren and
Jian Sun},
title = {Deep Residual Learning for Image Recognition},
journal = {CoRR},
volume = {abs/1512.03385},
year = {2015},
url = {http://arxiv.org/abs/1512.03385},
archivePrefix = {arXiv},
eprint = {1512.03385},
timestamp = {Wed, 17 Apr 2019 17:23:45 +0200},
biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: Gloun ResNet
Paper:
Title: Deep Residual Learning for Image Recognition
URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition
Models:
- Name: gluon_resnet101_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 10068547584
Parameters: 44550000
File Size: 178723172
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L89
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.3%
Top 5 Accuracy: 94.53%
- Name: gluon_resnet101_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 10376567296
Parameters: 44570000
File Size: 178802575
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L113
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.53%
Top 5 Accuracy: 94.59%
- Name: gluon_resnet101_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 10377018880
Parameters: 44570000
File Size: 178802755
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L138
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.4%
Top 5 Accuracy: 95.02%
- Name: gluon_resnet101_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 11805511680
Parameters: 44670000
File Size: 179221777
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L166
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.29%
Top 5 Accuracy: 95.16%
- Name: gluon_resnet152_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 14857660416
Parameters: 60190000
File Size: 241534001
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L97
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.69%
Top 5 Accuracy: 94.73%
- Name: gluon_resnet152_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 15165680128
Parameters: 60210000
File Size: 241613404
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L121
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.91%
Top 5 Accuracy: 94.85%
- Name: gluon_resnet152_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 15166131712
Parameters: 60210000
File Size: 241613584
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L147
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.48%
Top 5 Accuracy: 95.2%
- Name: gluon_resnet152_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 16594624512
Parameters: 60320000
File Size: 242032606
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L175
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.02%
Top 5 Accuracy: 95.42%
- Name: gluon_resnet18_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 2337073152
Parameters: 11690000
File Size: 46816736
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet18_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L65
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 70.84%
Top 5 Accuracy: 89.76%
- Name: gluon_resnet34_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 4718469120
Parameters: 21800000
File Size: 87295112
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet34_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L73
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.59%
Top 5 Accuracy: 92.0%
- Name: gluon_resnet50_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 5282531328
Parameters: 25560000
File Size: 102493763
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L81
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.58%
Top 5 Accuracy: 93.72%
- Name: gluon_resnet50_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 5590551040
Parameters: 25580000
File Size: 102573166
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L105
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.01%
Top 5 Accuracy: 93.99%
- Name: gluon_resnet50_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 5591002624
Parameters: 25580000
File Size: 102573346
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L129
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.06%
Top 5 Accuracy: 94.46%
- Name: gluon_resnet50_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 7019495424
Parameters: 25680000
File Size: 102992368
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L156
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.7%
Top 5 Accuracy: 94.25%
--> | pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 7212
} |
# MobileNet v3
**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('mobilenetv3_large_100', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `mobilenetv3_large_100`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('mobilenetv3_large_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1905-02244,
author = {Andrew Howard and
Mark Sandler and
Grace Chu and
Liang{-}Chieh Chen and
Bo Chen and
Mingxing Tan and
Weijun Wang and
Yukun Zhu and
Ruoming Pang and
Vijay Vasudevan and
Quoc V. Le and
Hartwig Adam},
title = {Searching for MobileNetV3},
journal = {CoRR},
volume = {abs/1905.02244},
year = {2019},
url = {http://arxiv.org/abs/1905.02244},
archivePrefix = {arXiv},
eprint = {1905.02244},
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: MobileNet V3
Paper:
Title: Searching for MobileNetV3
URL: https://paperswithcode.com/paper/searching-for-mobilenetv3
Models:
- Name: mobilenetv3_large_100
In Collection: MobileNet V3
Metadata:
FLOPs: 287193752
Parameters: 5480000
File Size: 22076443
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: mobilenetv3_large_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L363
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.77%
Top 5 Accuracy: 92.54%
- Name: mobilenetv3_rw
In Collection: MobileNet V3
Metadata:
FLOPs: 287190638
Parameters: 5480000
File Size: 22064048
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: mobilenetv3_rw
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L384
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.62%
Top 5 Accuracy: 92.71%
--> | pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/mobilenet-v3.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2581
} |
# SK-ResNet
**SK ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNet are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('skresnet18', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `skresnet18`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('skresnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{li2019selective,
title={Selective Kernel Networks},
author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang},
year={2019},
eprint={1903.06586},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: SKResNet
Paper:
Title: Selective Kernel Networks
URL: https://paperswithcode.com/paper/selective-kernel-networks
Models:
- Name: skresnet18
In Collection: SKResNet
Metadata:
FLOPs: 2333467136
Parameters: 11960000
File Size: 47923238
Architecture:
- Convolution
- Dense Connections
- Global Average Pooling
- Max Pooling
- Residual Connection
- Selective Kernel
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: skresnet18
LR: 0.1
Epochs: 100
Layers: 18
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L148
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 73.03%
Top 5 Accuracy: 91.17%
- Name: skresnet34
In Collection: SKResNet
Metadata:
FLOPs: 4711849952
Parameters: 22280000
File Size: 89299314
Architecture:
- Convolution
- Dense Connections
- Global Average Pooling
- Max Pooling
- Residual Connection
- Selective Kernel
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: skresnet34
LR: 0.1
Epochs: 100
Layers: 34
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L165
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.93%
Top 5 Accuracy: 93.32%
--> | pytorch-image-models/hfdocs/source/models/skresnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/skresnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2084
} |
from copy import deepcopy
__all__ = ['get_img_extensions', 'is_img_extension', 'set_img_extensions', 'add_img_extensions', 'del_img_extensions']
IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') # singleton, kept public for bwd compat use
_IMG_EXTENSIONS_SET = set(IMG_EXTENSIONS) # set version, private, kept in sync
def _set_extensions(extensions):
global IMG_EXTENSIONS
global _IMG_EXTENSIONS_SET
dedupe = set() # NOTE de-duping tuple while keeping original order
IMG_EXTENSIONS = tuple(x for x in extensions if x not in dedupe and not dedupe.add(x))
_IMG_EXTENSIONS_SET = set(extensions)
def _valid_extension(x: str):
return x and isinstance(x, str) and len(x) >= 2 and x.startswith('.')
def is_img_extension(ext):
return ext in _IMG_EXTENSIONS_SET
def get_img_extensions(as_set=False):
return deepcopy(_IMG_EXTENSIONS_SET if as_set else IMG_EXTENSIONS)
def set_img_extensions(extensions):
assert len(extensions)
for x in extensions:
assert _valid_extension(x)
_set_extensions(extensions)
def add_img_extensions(ext):
if not isinstance(ext, (list, tuple, set)):
ext = (ext,)
for x in ext:
assert _valid_extension(x)
extensions = IMG_EXTENSIONS + tuple(ext)
_set_extensions(extensions)
def del_img_extensions(ext):
if not isinstance(ext, (list, tuple, set)):
ext = (ext,)
extensions = tuple(x for x in IMG_EXTENSIONS if x not in ext)
_set_extensions(extensions)
| pytorch-image-models/timm/data/readers/img_extensions.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/img_extensions.py",
"repo_id": "pytorch-image-models",
"token_count": 582
} |
""" Activations
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
NOTE: I don't have a working inplace variant
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
def forward(self, x):
return mish(x)
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
# PyTorch has this, but not with a consistent inplace argument interface
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def tanh(x, inplace: bool = False):
return x.tanh_() if inplace else x.tanh()
# PyTorch has this, but not with a consistent inplace argument interface
class Tanh(nn.Module):
def __init__(self, inplace: bool = False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def hard_swish(x, inplace: bool = False):
inner = F.relu6(x + 3.).div_(6.)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
def hard_mish(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_mish(x, self.inplace)
class PReLU(nn.PReLU):
"""Applies PReLU (w/ dummy inplace arg)
"""
def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None:
super(PReLU, self).__init__(num_parameters=num_parameters, init=init)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.prelu(input, self.weight)
def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
return F.gelu(x)
class GELU(nn.Module):
"""Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
"""
def __init__(self, inplace: bool = False):
super(GELU, self).__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.gelu(input)
def gelu_tanh(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
return F.gelu(x, approximate='tanh')
class GELUTanh(nn.Module):
"""Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
"""
def __init__(self, inplace: bool = False):
super(GELUTanh, self).__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.gelu(input, approximate='tanh')
def quick_gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
return x * torch.sigmoid(1.702 * x)
class QuickGELU(nn.Module):
"""Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
"""
def __init__(self, inplace: bool = False):
super(QuickGELU, self).__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return quick_gelu(input)
| pytorch-image-models/timm/layers/activations.py/0 | {
"file_path": "pytorch-image-models/timm/layers/activations.py",
"repo_id": "pytorch-image-models",
"token_count": 2008
} |
""" Create Conv2d Factory Method
Hacked together by / Copyright 2020 Ross Wightman
"""
from .mixed_conv2d import MixedConv2d
from .cond_conv2d import CondConv2d
from .conv2d_same import create_conv2d_pad
def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
""" Select a 2d convolution implementation based on arguments
Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
Used extensively by EfficientNet, MobileNetv3 and related networks.
"""
if isinstance(kernel_size, list):
assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
if 'groups' in kwargs:
groups = kwargs.pop('groups')
if groups == in_channels:
kwargs['depthwise'] = True
else:
assert groups == 1
# We're going to use only lists for defining the MixedConv2d kernel groups,
# ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)
else:
depthwise = kwargs.pop('depthwise', False)
# for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0
groups = in_channels if depthwise else kwargs.pop('groups', 1)
if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
else:
m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
return m
| pytorch-image-models/timm/layers/create_conv2d.py/0 | {
"file_path": "pytorch-image-models/timm/layers/create_conv2d.py",
"repo_id": "pytorch-image-models",
"token_count": 652
} |
import torch
from torch import nn as nn
try:
from inplace_abn.functions import inplace_abn, inplace_abn_sync
has_iabn = True
except ImportError:
has_iabn = False
def inplace_abn(x, weight, bias, running_mean, running_var,
training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01):
raise ImportError(
"Please install InplaceABN:'pip install git+https://github.com/mapillary/[email protected]'")
def inplace_abn_sync(**kwargs):
inplace_abn(**kwargs)
class InplaceAbn(nn.Module):
"""Activated Batch Normalization
This gathers a BatchNorm and an activation function in a single module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
act_layer : str or nn.Module type
Name or type of the activation functions, one of: `leaky_relu`, `elu`
act_param : float
Negative slope for the `leaky_relu` activation.
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True,
act_layer="leaky_relu", act_param=0.01, drop_layer=None):
super(InplaceAbn, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
if apply_act:
if isinstance(act_layer, str):
assert act_layer in ('leaky_relu', 'elu', 'identity', '')
self.act_name = act_layer if act_layer else 'identity'
else:
# convert act layer passed as type to string
if act_layer == nn.ELU:
self.act_name = 'elu'
elif act_layer == nn.LeakyReLU:
self.act_name = 'leaky_relu'
elif act_layer is None or act_layer == nn.Identity:
self.act_name = 'identity'
else:
assert False, f'Invalid act layer {act_layer.__name__} for IABN'
else:
self.act_name = 'identity'
self.act_param = act_param
if self.affine:
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.running_mean, 0)
nn.init.constant_(self.running_var, 1)
if self.affine:
nn.init.constant_(self.weight, 1)
nn.init.constant_(self.bias, 0)
def forward(self, x):
output = inplace_abn(
x, self.weight, self.bias, self.running_mean, self.running_var,
self.training, self.momentum, self.eps, self.act_name, self.act_param)
if isinstance(output, tuple):
output = output[0]
return output
| pytorch-image-models/timm/layers/inplace_abn.py/0 | {
"file_path": "pytorch-image-models/timm/layers/inplace_abn.py",
"repo_id": "pytorch-image-models",
"token_count": 1556
} |
""" Position Embedding Utilities
Hacked together by / Copyright 2022 Ross Wightman
"""
import logging
import math
from typing import List, Tuple, Optional, Union
import torch
import torch.nn.functional as F
from .helpers import to_2tuple
_logger = logging.getLogger(__name__)
def resample_abs_pos_embed(
posemb: torch.Tensor,
new_size: List[int],
old_size: Optional[List[int]] = None,
num_prefix_tokens: int = 1,
interpolation: str = 'bicubic',
antialias: bool = True,
verbose: bool = False,
):
# sort out sizes, assume square if old size not provided
num_pos_tokens = posemb.shape[1]
num_new_tokens = new_size[0] * new_size[1] + num_prefix_tokens
if num_new_tokens == num_pos_tokens and new_size[0] == new_size[1]:
return posemb
if old_size is None:
hw = int(math.sqrt(num_pos_tokens - num_prefix_tokens))
old_size = hw, hw
if num_prefix_tokens:
posemb_prefix, posemb = posemb[:, :num_prefix_tokens], posemb[:, num_prefix_tokens:]
else:
posemb_prefix, posemb = None, posemb
# do the interpolation
embed_dim = posemb.shape[-1]
orig_dtype = posemb.dtype
posemb = posemb.float() # interpolate needs float32
posemb = posemb.reshape(1, old_size[0], old_size[1], -1).permute(0, 3, 1, 2)
posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias)
posemb = posemb.permute(0, 2, 3, 1).reshape(1, -1, embed_dim)
posemb = posemb.to(orig_dtype)
# add back extra (class, etc) prefix tokens
if posemb_prefix is not None:
posemb = torch.cat([posemb_prefix, posemb], dim=1)
if not torch.jit.is_scripting() and verbose:
_logger.info(f'Resized position embedding: {old_size} to {new_size}.')
return posemb
def resample_abs_pos_embed_nhwc(
posemb: torch.Tensor,
new_size: List[int],
interpolation: str = 'bicubic',
antialias: bool = True,
verbose: bool = False,
):
if new_size[0] == posemb.shape[-3] and new_size[1] == posemb.shape[-2]:
return posemb
orig_dtype = posemb.dtype
posemb = posemb.float()
posemb = posemb.reshape(1, posemb.shape[-3], posemb.shape[-2], posemb.shape[-1]).permute(0, 3, 1, 2)
posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias)
posemb = posemb.permute(0, 2, 3, 1).to(orig_dtype)
if not torch.jit.is_scripting() and verbose:
_logger.info(f'Resized position embedding: {posemb.shape[-3:-1]} to {new_size}.')
return posemb
| pytorch-image-models/timm/layers/pos_embed.py/0 | {
"file_path": "pytorch-image-models/timm/layers/pos_embed.py",
"repo_id": "pytorch-image-models",
"token_count": 1128
} |
""" Binary Cross Entropy w/ a few extras
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
class BinaryCrossEntropy(nn.Module):
""" BCE with optional one-hot from dense targets, label smoothing, thresholding
NOTE for experiments comparing CE to BCE /w label smoothing, may remove
"""
def __init__(
self,
smoothing=0.1,
target_threshold: Optional[float] = None,
weight: Optional[torch.Tensor] = None,
reduction: str = 'mean',
sum_classes: bool = False,
pos_weight: Optional[Union[torch.Tensor, float]] = None,
):
super(BinaryCrossEntropy, self).__init__()
assert 0. <= smoothing < 1.0
if pos_weight is not None:
if not isinstance(pos_weight, torch.Tensor):
pos_weight = torch.tensor(pos_weight)
self.smoothing = smoothing
self.target_threshold = target_threshold
self.reduction = 'none' if sum_classes else reduction
self.sum_classes = sum_classes
self.register_buffer('weight', weight)
self.register_buffer('pos_weight', pos_weight)
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]
assert batch_size == target.shape[0]
if target.shape != x.shape:
# NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse
num_classes = x.shape[-1]
# FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ
off_value = self.smoothing / num_classes
on_value = 1. - self.smoothing + off_value
target = target.long().view(-1, 1)
target = torch.full(
(batch_size, num_classes),
off_value,
device=x.device, dtype=x.dtype).scatter_(1, target, on_value)
if self.target_threshold is not None:
# Make target 0, or 1 if threshold set
target = target.gt(self.target_threshold).to(dtype=target.dtype)
loss = F.binary_cross_entropy_with_logits(
x, target,
self.weight,
pos_weight=self.pos_weight,
reduction=self.reduction,
)
if self.sum_classes:
loss = loss.sum(-1).mean()
return loss
| pytorch-image-models/timm/loss/binary_cross_entropy.py/0 | {
"file_path": "pytorch-image-models/timm/loss/binary_cross_entropy.py",
"repo_id": "pytorch-image-models",
"token_count": 1082
} |
""" DeiT - Data-efficient Image Transformers
DeiT model defs and weights from https://github.com/facebookresearch/deit, original copyright below
paper: `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
paper: `DeiT III: Revenge of the ViT` - https://arxiv.org/abs/2204.07118
Modifications copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from functools import partial
from typing import Optional
import torch
from torch import nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import resample_abs_pos_embed
from timm.models.vision_transformer import VisionTransformer, trunc_normal_, checkpoint_filter_fn
from ._builder import build_model_with_cfg
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['VisionTransformerDistilled'] # model_registry will add each entrypoint fn to this
class VisionTransformerDistilled(VisionTransformer):
""" Vision Transformer w/ Distillation Token and Head
Distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, *args, **kwargs):
weight_init = kwargs.pop('weight_init', '')
super().__init__(*args, **kwargs, weight_init='skip')
assert self.global_pool in ('token',)
self.num_prefix_tokens = 2
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
self.init_weights(weight_init)
def init_weights(self, mode=''):
trunc_normal_(self.dist_token, std=.02)
super().init_weights(mode=mode)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed|dist_token',
blocks=[
(r'^blocks\.(\d+)', None),
(r'^norm', (99999,))] # final norm w/ last block
)
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head, self.head_dist
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
def _pos_embed(self, x):
if self.dynamic_img_size:
B, H, W, C = x.shape
prev_grid_size = self.patch_embed.grid_size
pos_embed = resample_abs_pos_embed(
self.pos_embed,
new_size=(H, W),
old_size=prev_grid_size,
num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens,
)
x = x.view(B, -1, C)
else:
pos_embed = self.pos_embed
if self.no_embed_class:
# deit-3, updated JAX (big vision)
# position embedding does not overlap with class token, add then concat
x = x + pos_embed
x = torch.cat((
self.cls_token.expand(x.shape[0], -1, -1),
self.dist_token.expand(x.shape[0], -1, -1),
x),
dim=1)
else:
# original timm, JAX, and deit vit impl
# pos_embed has entry for class token, concat then add
x = torch.cat((
self.cls_token.expand(x.shape[0], -1, -1),
self.dist_token.expand(x.shape[0], -1, -1),
x),
dim=1)
x = x + pos_embed
return self.pos_drop(x)
def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
x, x_dist = x[:, 0], x[:, 1]
if pre_logits:
return (x + x_dist) / 2
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train / finetune, inference average the classifier predictions
return (x + x_dist) / 2
def _create_deit(variant, pretrained=False, distilled=False, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
model_cls = VisionTransformerDistilled if distilled else VisionTransformer
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
pretrained_filter_fn=partial(checkpoint_filter_fn, adapt_layer_scale=True),
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# deit models (FB weights)
'deit_tiny_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'),
'deit_small_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'),
'deit_base_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'),
'deit_base_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit_tiny_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth',
classifier=('head', 'head_dist')),
'deit_small_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth',
classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth',
classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth',
input_size=(3, 384, 384), crop_pct=1.0,
classifier=('head', 'head_dist')),
'deit3_small_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth'),
'deit3_small_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_medium_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth'),
'deit3_base_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth'),
'deit3_base_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_large_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth'),
'deit3_large_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_huge_patch14_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth'),
'deit3_small_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth',
crop_pct=1.0),
'deit3_small_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_medium_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth',
crop_pct=1.0),
'deit3_base_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth',
crop_pct=1.0),
'deit3_base_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_large_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth',
crop_pct=1.0),
'deit3_large_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_huge_patch14_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_21k_v1.pth',
crop_pct=1.0),
})
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3)
model = _create_deit('deit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6)
model = _create_deit('deit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit('deit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit('deit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3)
model = _create_deit(
'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6)
model = _create_deit(
'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit(
'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit(
'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit3_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 small model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_small_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 small model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 medium model @ 224x224 (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_large_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 large model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_large_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 large model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_huge_patch14_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'deit3_small_patch16_224_in21ft1k': 'deit3_small_patch16_224.fb_in22k_ft_in1k',
'deit3_small_patch16_384_in21ft1k': 'deit3_small_patch16_384.fb_in22k_ft_in1k',
'deit3_medium_patch16_224_in21ft1k': 'deit3_medium_patch16_224.fb_in22k_ft_in1k',
'deit3_base_patch16_224_in21ft1k': 'deit3_base_patch16_224.fb_in22k_ft_in1k',
'deit3_base_patch16_384_in21ft1k': 'deit3_base_patch16_384.fb_in22k_ft_in1k',
'deit3_large_patch16_224_in21ft1k': 'deit3_large_patch16_224.fb_in22k_ft_in1k',
'deit3_large_patch16_384_in21ft1k': 'deit3_large_patch16_384.fb_in22k_ft_in1k',
'deit3_huge_patch14_224_in21ft1k': 'deit3_huge_patch14_224.fb_in22k_ft_in1k'
})
| pytorch-image-models/timm/models/deit.py/0 | {
"file_path": "pytorch-image-models/timm/models/deit.py",
"repo_id": "pytorch-image-models",
"token_count": 8370
} |
""" Global Context ViT
From scratch implementation of GCViT in the style of timm swin_transformer_v2_cr.py
Global Context Vision Transformers -https://arxiv.org/abs/2206.09959
@article{hatamizadeh2022global,
title={Global Context Vision Transformers},
author={Hatamizadeh, Ali and Yin, Hongxu and Kautz, Jan and Molchanov, Pavlo},
journal={arXiv preprint arXiv:2206.09959},
year={2022}
}
Free of any code related to NVIDIA GCVit impl at https://github.com/NVlabs/GCVit.
The license for this code release is Apache 2.0 with no commercial restrictions.
However, weight files adapted from NVIDIA GCVit impl ARE under a non-commercial share-alike license
(https://creativecommons.org/licenses/by-nc-sa/4.0/) until I have a chance to train new ones...
Hacked together by / Copyright 2022, Ross Wightman
"""
import math
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, to_ntuple, Mlp, ClassifierHead, LayerNorm2d, \
get_attn, get_act_layer, get_norm_layer, RelPosBias, _assert
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import named_apply, checkpoint
from ._registry import register_model, generate_default_cfgs
__all__ = ['GlobalContextVit']
class MbConvBlock(nn.Module):
""" A depthwise separable / fused mbconv style residual block with SE, `no norm.
"""
def __init__(
self,
in_chs,
out_chs=None,
expand_ratio=1.0,
attn_layer='se',
bias=False,
act_layer=nn.GELU,
):
super().__init__()
attn_kwargs = dict(act_layer=act_layer)
if isinstance(attn_layer, str) and attn_layer == 'se' or attn_layer == 'eca':
attn_kwargs['rd_ratio'] = 0.25
attn_kwargs['bias'] = False
attn_layer = get_attn(attn_layer)
out_chs = out_chs or in_chs
mid_chs = int(expand_ratio * in_chs)
self.conv_dw = nn.Conv2d(in_chs, mid_chs, 3, 1, 1, groups=in_chs, bias=bias)
self.act = act_layer()
self.se = attn_layer(mid_chs, **attn_kwargs)
self.conv_pw = nn.Conv2d(mid_chs, out_chs, 1, 1, 0, bias=bias)
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
x = self.act(x)
x = self.se(x)
x = self.conv_pw(x)
x = x + shortcut
return x
class Downsample2d(nn.Module):
def __init__(
self,
dim,
dim_out=None,
reduction='conv',
act_layer=nn.GELU,
norm_layer=LayerNorm2d, # NOTE in NCHW
):
super().__init__()
dim_out = dim_out or dim
self.norm1 = norm_layer(dim) if norm_layer is not None else nn.Identity()
self.conv_block = MbConvBlock(dim, act_layer=act_layer)
assert reduction in ('conv', 'max', 'avg')
if reduction == 'conv':
self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False)
elif reduction == 'max':
assert dim == dim_out
self.reduction = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
assert dim == dim_out
self.reduction = nn.AvgPool2d(kernel_size=2)
self.norm2 = norm_layer(dim_out) if norm_layer is not None else nn.Identity()
def forward(self, x):
x = self.norm1(x)
x = self.conv_block(x)
x = self.reduction(x)
x = self.norm2(x)
return x
class FeatureBlock(nn.Module):
def __init__(
self,
dim,
levels=0,
reduction='max',
act_layer=nn.GELU,
):
super().__init__()
reductions = levels
levels = max(1, levels)
if reduction == 'avg':
pool_fn = partial(nn.AvgPool2d, kernel_size=2)
else:
pool_fn = partial(nn.MaxPool2d, kernel_size=3, stride=2, padding=1)
self.blocks = nn.Sequential()
for i in range(levels):
self.blocks.add_module(f'conv{i+1}', MbConvBlock(dim, act_layer=act_layer))
if reductions:
self.blocks.add_module(f'pool{i+1}', pool_fn())
reductions -= 1
def forward(self, x):
return self.blocks(x)
class Stem(nn.Module):
def __init__(
self,
in_chs: int = 3,
out_chs: int = 96,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm2d, # NOTE stem in NCHW
):
super().__init__()
self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=3, stride=2, padding=1)
self.down = Downsample2d(out_chs, act_layer=act_layer, norm_layer=norm_layer)
def forward(self, x):
x = self.conv1(x)
x = self.down(x)
return x
class WindowAttentionGlobal(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
window_size: Tuple[int, int],
use_global: bool = True,
qkv_bias: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
):
super().__init__()
window_size = to_2tuple(window_size)
self.window_size = window_size
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.use_global = use_global
self.rel_pos = RelPosBias(window_size=window_size, num_heads=num_heads)
if self.use_global:
self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias)
else:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, q_global: Optional[torch.Tensor] = None):
B, N, C = x.shape
if self.use_global and q_global is not None:
_assert(x.shape[-1] == q_global.shape[-1], 'x and q_global seq lengths should be equal')
kv = self.qkv(x)
kv = kv.reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
q = q_global.repeat(B // q_global.shape[0], 1, 1, 1)
q = q.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
else:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q = q * self.scale
attn = q @ k.transpose(-2, -1).contiguous() # NOTE contiguous() fixes an odd jit bug in PyTorch 2.0
attn = self.rel_pos(attn)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def window_partition(x, window_size: Tuple[int, int]):
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class GlobalContextVitBlock(nn.Module):
def __init__(
self,
dim: int,
feat_size: Tuple[int, int],
num_heads: int,
window_size: int = 7,
mlp_ratio: float = 4.,
use_global: bool = True,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
attn_layer: Callable = WindowAttentionGlobal,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
):
super().__init__()
feat_size = to_2tuple(feat_size)
window_size = to_2tuple(window_size)
self.window_size = window_size
self.num_windows = int((feat_size[0] // window_size[0]) * (feat_size[1] // window_size[1]))
self.norm1 = norm_layer(dim)
self.attn = attn_layer(
dim,
num_heads=num_heads,
window_size=window_size,
use_global=use_global,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.ls1 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)
self.ls2 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _window_attn(self, x, q_global: Optional[torch.Tensor] = None):
B, H, W, C = x.shape
x_win = window_partition(x, self.window_size)
x_win = x_win.view(-1, self.window_size[0] * self.window_size[1], C)
attn_win = self.attn(x_win, q_global)
x = window_reverse(attn_win, self.window_size, (H, W))
return x
def forward(self, x, q_global: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class GlobalContextVitStage(nn.Module):
def __init__(
self,
dim,
depth: int,
num_heads: int,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
downsample: bool = True,
global_norm: bool = False,
stage_norm: bool = False,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: Union[List[float], float] = 0.0,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
norm_layer_cl: Callable = LayerNorm2d,
):
super().__init__()
if downsample:
self.downsample = Downsample2d(
dim=dim,
dim_out=dim * 2,
norm_layer=norm_layer,
)
dim = dim * 2
feat_size = (feat_size[0] // 2, feat_size[1] // 2)
else:
self.downsample = nn.Identity()
self.feat_size = feat_size
window_size = to_2tuple(window_size)
feat_levels = int(math.log2(min(feat_size) / min(window_size)))
self.global_block = FeatureBlock(dim, feat_levels)
self.global_norm = norm_layer_cl(dim) if global_norm else nn.Identity()
self.blocks = nn.ModuleList([
GlobalContextVitBlock(
dim=dim,
num_heads=num_heads,
feat_size=feat_size,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
use_global=(i % 2 != 0),
layer_scale=layer_scale,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
act_layer=act_layer,
norm_layer=norm_layer_cl,
)
for i in range(depth)
])
self.norm = norm_layer_cl(dim) if stage_norm else nn.Identity()
self.dim = dim
self.feat_size = feat_size
self.grad_checkpointing = False
def forward(self, x):
# input NCHW, downsample & global block are 2d conv + pooling
x = self.downsample(x)
global_query = self.global_block(x)
# reshape NCHW --> NHWC for transformer blocks
x = x.permute(0, 2, 3, 1)
global_query = self.global_norm(global_query.permute(0, 2, 3, 1))
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x, global_query)
x = self.norm(x)
x = x.permute(0, 3, 1, 2).contiguous() # back to NCHW
return x
class GlobalContextVit(nn.Module):
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
img_size: Tuple[int, int] = 224,
window_ratio: Tuple[int, ...] = (32, 32, 16, 32),
window_size: Tuple[int, ...] = None,
embed_dim: int = 64,
depths: Tuple[int, ...] = (3, 4, 19, 5),
num_heads: Tuple[int, ...] = (2, 4, 8, 16),
mlp_ratio: float = 3.0,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init='',
act_layer: str = 'gelu',
norm_layer: str = 'layernorm2d',
norm_layer_cl: str = 'layernorm',
norm_eps: float = 1e-5,
):
super().__init__()
act_layer = get_act_layer(act_layer)
norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps)
norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps)
img_size = to_2tuple(img_size)
feat_size = tuple(d // 4 for d in img_size) # stem reduction by 4
self.global_pool = global_pool
self.num_classes = num_classes
self.drop_rate = drop_rate
num_stages = len(depths)
self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (num_stages - 1))
if window_size is not None:
window_size = to_ntuple(num_stages)(window_size)
else:
assert window_ratio is not None
window_size = tuple([(img_size[0] // r, img_size[1] // r) for r in to_ntuple(num_stages)(window_ratio)])
self.stem = Stem(
in_chs=in_chans,
out_chs=embed_dim,
act_layer=act_layer,
norm_layer=norm_layer
)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
for i in range(num_stages):
last_stage = i == num_stages - 1
stage_scale = 2 ** max(i - 1, 0)
stages.append(GlobalContextVitStage(
dim=embed_dim * stage_scale,
depth=depths[i],
num_heads=num_heads[i],
feat_size=(feat_size[0] // stage_scale, feat_size[1] // stage_scale),
window_size=window_size[i],
downsample=i != 0,
stage_norm=last_stage,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
layer_scale=layer_scale,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
act_layer=act_layer,
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
))
self.stages = nn.Sequential(*stages)
# Classifier head
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
if weight_init:
named_apply(partial(self._init_weights, scheme=weight_init), self)
def _init_weights(self, module, name, scheme='vit'):
# note Conv2d left as default init
if scheme == 'vit':
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
else:
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)'
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is None:
global_pool = self.head.global_pool.pool_type
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_gcvit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(GlobalContextVit, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = generate_default_cfgs({
'gcvit_xxtiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'),
'gcvit_xtiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'),
'gcvit_tiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'),
'gcvit_small.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'),
'gcvit_base.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'),
})
@register_model
def gcvit_xxtiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(2, 2, 6, 2),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_xxtiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_xtiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 6, 5),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_xtiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_tiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_tiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_small(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(3, 6, 12, 24),
embed_dim=96,
mlp_ratio=2,
layer_scale=1e-5,
**kwargs)
return _create_gcvit('gcvit_small', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_base(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(4, 8, 16, 32),
embed_dim=128,
mlp_ratio=2,
layer_scale=1e-5,
**kwargs)
return _create_gcvit('gcvit_base', pretrained=pretrained, **model_kwargs)
| pytorch-image-models/timm/models/gcvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/gcvit.py",
"repo_id": "pytorch-image-models",
"token_count": 10814
} |
""" MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch
This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch.
99% of the implementation was done from papers, however last minute some adjustments were made
based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit
There are multiple sets of models defined for both architectures. Typically, names with a
`_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit.
These configs work well and appear to be a bit faster / lower resource than the paper.
The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to
match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match.
Papers:
MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697
@article{tu2022maxvit,
title={MaxViT: Multi-Axis Vision Transformer},
author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao},
journal={ECCV},
year={2022},
}
CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803
@article{DBLP:journals/corr/abs-2106-04803,
author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan},
title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes},
journal = {CoRR},
volume = {abs/2106.04803},
year = {2021}
}
Hacked together by / Copyright 2022, Ross Wightman
"""
import math
from collections import OrderedDict
from dataclasses import dataclass, replace, field
from functools import partial
from typing import Callable, Optional, Union, Tuple, List
import torch
from torch import nn
from torch.jit import Final
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, ConvMlp, DropPath, LayerNorm, ClassifierHead, NormMlpClassifierHead
from timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, create_pool2d
from timm.layers import trunc_normal_tf_, to_2tuple, extend_tuple, make_divisible, _assert
from timm.layers import RelPosMlp, RelPosBias, RelPosBiasTf, use_fused_attn, resize_rel_pos_bias_table
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function
from ._manipulate import named_apply, checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit']
@dataclass
class MaxxVitTransformerCfg:
dim_head: int = 32
head_first: bool = True # head ordering in qkv channel dim
expand_ratio: float = 4.0
expand_first: bool = True
shortcut_bias: bool = True
attn_bias: bool = True
attn_drop: float = 0.
proj_drop: float = 0.
pool_type: str = 'avg2'
rel_pos_type: str = 'bias'
rel_pos_dim: int = 512 # for relative position types w/ MLP
partition_ratio: int = 32
window_size: Optional[Tuple[int, int]] = None
grid_size: Optional[Tuple[int, int]] = None
no_block_attn: bool = False # disable window block attention for maxvit (ie only grid)
use_nchw_attn: bool = False # for MaxViT variants (not used for CoAt), keep tensors in NCHW order
init_values: Optional[float] = None
act_layer: str = 'gelu'
norm_layer: str = 'layernorm2d'
norm_layer_cl: str = 'layernorm'
norm_eps: float = 1e-6
def __post_init__(self):
if self.grid_size is not None:
self.grid_size = to_2tuple(self.grid_size)
if self.window_size is not None:
self.window_size = to_2tuple(self.window_size)
if self.grid_size is None:
self.grid_size = self.window_size
@dataclass
class MaxxVitConvCfg:
block_type: str = 'mbconv'
expand_ratio: float = 4.0
expand_output: bool = True # calculate expansion channels from output (vs input chs)
kernel_size: int = 3
group_size: int = 1 # 1 == depthwise
pre_norm_act: bool = False # activation after pre-norm
output_bias: bool = True # bias for shortcut + final 1x1 projection conv
stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw'
pool_type: str = 'avg2'
downsample_pool_type: str = 'avg2'
padding: str = ''
attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2
attn_layer: str = 'se'
attn_act_layer: str = 'silu'
attn_ratio: float = 0.25
init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv
act_layer: str = 'gelu'
norm_layer: str = ''
norm_layer_cl: str = ''
norm_eps: Optional[float] = None
def __post_init__(self):
# mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args
assert self.block_type in ('mbconv', 'convnext')
use_mbconv = self.block_type == 'mbconv'
if not self.norm_layer:
self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d'
if not self.norm_layer_cl and not use_mbconv:
self.norm_layer_cl = 'layernorm'
if self.norm_eps is None:
self.norm_eps = 1e-5 if use_mbconv else 1e-6
self.downsample_pool_type = self.downsample_pool_type or self.pool_type
@dataclass
class MaxxVitCfg:
embed_dim: Tuple[int, ...] = (96, 192, 384, 768)
depths: Tuple[int, ...] = (2, 3, 5, 2)
block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T')
stem_width: Union[int, Tuple[int, int]] = 64
stem_bias: bool = False
conv_cfg: MaxxVitConvCfg = field(default_factory=MaxxVitConvCfg)
transformer_cfg: MaxxVitTransformerCfg = field(default_factory=MaxxVitTransformerCfg)
head_hidden_size: int = None
weight_init: str = 'vit_eff'
class Attention2d(nn.Module):
fused_attn: Final[bool]
""" multi-head attention for 2D NCHW tensors"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
head_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first else dim
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.head_first = head_first
self.scale = dim_head ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B, C, H, W = x.shape
if self.head_first:
q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2)
else:
q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1)
if self.fused_attn:
attn_bias = None
if self.rel_pos is not None:
attn_bias = self.rel_pos.get_bias()
elif shared_rel_pos is not None:
attn_bias = shared_rel_pos
x = torch.nn.functional.scaled_dot_product_attention(
q.transpose(-1, -2).contiguous(),
k.transpose(-1, -2).contiguous(),
v.transpose(-1, -2).contiguous(),
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
).transpose(-1, -2).reshape(B, -1, H, W)
else:
q = q * self.scale
attn = q.transpose(-2, -1) @ k
if self.rel_pos is not None:
attn = self.rel_pos(attn)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class AttentionCl(nn.Module):
""" Channels-last multi-head attention (B, ..., C) """
fused_attn: Final[bool]
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
head_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first and dim_out > dim else dim
assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim'
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.head_first = head_first
self.scale = dim_head ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim_attn, dim_out, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B = x.shape[0]
restore_shape = x.shape[:-1]
if self.head_first:
q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3)
else:
q, k, v = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.dim_head).transpose(1, 3).unbind(2)
if self.fused_attn:
attn_bias = None
if self.rel_pos is not None:
attn_bias = self.rel_pos.get_bias()
elif shared_rel_pos is not None:
attn_bias = shared_rel_pos
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if self.rel_pos is not None:
attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(restore_shape + (-1,))
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma
return x.mul_(gamma) if self.inplace else x * gamma
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class Downsample2d(nn.Module):
""" A downsample pooling module supporting several maxpool and avgpool modes
* 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1
* 'max2' - MaxPool2d w/ kernel_size = stride = 2
* 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1
* 'avg2' - AvgPool2d w/ kernel_size = stride = 2
"""
def __init__(
self,
dim: int,
dim_out: int,
pool_type: str = 'avg2',
padding: str = '',
bias: bool = True,
):
super().__init__()
assert pool_type in ('max', 'max2', 'avg', 'avg2')
if pool_type == 'max':
self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=padding or 1)
elif pool_type == 'max2':
self.pool = create_pool2d('max', 2, padding=padding or 0) # kernel_size == stride == 2
elif pool_type == 'avg':
self.pool = create_pool2d(
'avg', kernel_size=3, stride=2, count_include_pad=False, padding=padding or 1)
else:
self.pool = create_pool2d('avg', 2, padding=padding or 0)
if dim != dim_out:
self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias)
else:
self.expand = nn.Identity()
def forward(self, x):
x = self.pool(x) # spatial downsample
x = self.expand(x) # expand chs
return x
def _init_transformer(module, name, scheme=''):
if isinstance(module, (nn.Conv2d, nn.Linear)):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# vit like
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
class TransformerBlock2d(nn.Module):
""" Transformer block with 2D downsampling
'2D' NCHW tensor layout
Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW
for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs.
This impl was faster on TPU w/ PT XLA than the 1D experiment.
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
rel_pos_cls: Callable = None,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
act_layer = get_act_layer(cfg.act_layer)
if stride == 2:
self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias)
self.norm1 = nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)),
]))
else:
assert dim == dim_out
self.shortcut = nn.Identity()
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim_out,
dim_head=cfg.dim_head,
expand_first=cfg.expand_first,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop
)
self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim_out)
self.mlp = ConvMlp(
in_features=dim_out,
hidden_features=int(dim_out * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def _init_conv(module, name, scheme=''):
if isinstance(module, nn.Conv2d):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# efficientnet like
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
nn.init.zeros_(module.bias)
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MbConvBlock(nn.Module):
""" Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand)
"""
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: float = 0.
):
super(MbConvBlock, self).__init__()
norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps)
mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio)
groups = num_groups(cfg.group_size, mid_chs)
if stride == 2:
self.shortcut = Downsample2d(
in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias, padding=cfg.padding)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', '1x1', 'dw')
stride_pool, stride_1, stride_2 = 1, 1, 1
if cfg.stride_mode == 'pool':
# NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1
stride_pool, dilation_2 = stride, dilation[1]
# FIXME handle dilation of avg pool
elif cfg.stride_mode == '1x1':
# NOTE I don't like this option described in paper, 1x1 w/ stride throws info away
stride_1, dilation_2 = stride, dilation[1]
else:
stride_2, dilation_2 = stride, dilation[0]
self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act)
if stride_pool > 1:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type, padding=cfg.padding)
else:
self.down = nn.Identity()
self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1)
self.norm1 = norm_act_layer(mid_chs)
self.conv2_kxk = create_conv2d(
mid_chs, mid_chs, cfg.kernel_size,
stride=stride_2, dilation=dilation_2, groups=groups, padding=cfg.padding)
attn_kwargs = {}
if isinstance(cfg.attn_layer, str):
if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca':
attn_kwargs['act_layer'] = cfg.attn_act_layer
attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs))
# two different orderings for SE and norm2 (due to some weights and trials using SE before norm2)
if cfg.attn_early:
self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.norm2 = norm_act_layer(mid_chs)
self.se = None
else:
self.se_early = None
self.norm2 = norm_act_layer(mid_chs)
self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
shortcut = self.shortcut(x)
x = self.pre_norm(x)
x = self.down(x)
# 1x1 expansion conv & norm-act
x = self.conv1_1x1(x)
x = self.norm1(x)
# depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act
x = self.conv2_kxk(x)
if self.se_early is not None:
x = self.se_early(x)
x = self.norm2(x)
if self.se is not None:
x = self.se(x)
# 1x1 linear projection to output width
x = self.conv3_1x1(x)
x = self.drop_path(x) + shortcut
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 7,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
conv_mlp: bool = True,
drop_path: float = 0.
):
super().__init__()
out_chs = out_chs or in_chs
act_layer = get_act_layer(cfg.act_layer)
if conv_mlp:
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
mlp_layer = ConvMlp
else:
assert 'layernorm' in cfg.norm_layer
norm_layer = LayerNorm
mlp_layer = Mlp
self.use_conv_mlp = conv_mlp
if stride == 2:
self.shortcut = Downsample2d(in_chs, out_chs)
elif in_chs != out_chs:
self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', 'dw')
stride_pool, stride_dw = 1, 1
# FIXME handle dilation?
if cfg.stride_mode == 'pool':
stride_pool = stride
else:
stride_dw = stride
if stride_pool == 2:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv_dw = create_conv2d(
in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1],
depthwise=True, bias=cfg.output_bias)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer)
if conv_mlp:
self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
else:
self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.down(x)
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
x = x.permute(0, 3, 1, 2)
x = self.drop_path(x) + shortcut
return x
def window_partition(x, window_size: List[int]):
B, H, W, C = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, f'width ({W}) must be divisible by window ({window_size[1]})')
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
def grid_partition(x, grid_size: List[int]):
B, H, W, C = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, f'width {W} must be divisible by grid {grid_size[1]}')
x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C)
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C)
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C)
return x
def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size):
rel_pos_cls = None
if cfg.rel_pos_type == 'mlp':
rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim)
elif cfg.rel_pos_type == 'bias':
rel_pos_cls = partial(RelPosBias, window_size=window_size)
elif cfg.rel_pos_type == 'bias_tf':
rel_pos_cls = partial(RelPosBiasTf, window_size=window_size)
return rel_pos_cls
class PartitionAttentionCl(nn.Module):
""" Grid or Block partition + Attn + FFN.
NxC 'channels last' tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = AttentionCl(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
head_first=cfg.head_first,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
if self.partition_block:
partitioned = window_partition(x, self.partition_size)
else:
partitioned = grid_partition(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse(partitioned, self.partition_size, img_size)
else:
x = grid_reverse(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ParallelPartitionAttention(nn.Module):
""" Experimental. Grid and Block partition + single FFN
NxC tensor layout.
"""
def __init__(
self,
dim: int,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
assert dim % 2 == 0
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
assert cfg.window_size == cfg.grid_size
self.partition_size = to_2tuple(cfg.window_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn_block = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
head_first=cfg.head_first,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.attn_grid = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
head_first=cfg.head_first,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
out_features=dim,
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
partitioned_block = window_partition(x, self.partition_size)
partitioned_block = self.attn_block(partitioned_block)
x_window = window_reverse(partitioned_block, self.partition_size, img_size)
partitioned_grid = grid_partition(x, self.partition_size)
partitioned_grid = self.attn_grid(partitioned_grid)
x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size)
return torch.cat([x_window, x_grid], dim=-1)
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def window_partition_nchw(x, window_size: List[int]):
B, C, H, W = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, f'width ({W}) must be divisible by window ({window_size[1]})')
x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1])
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1])
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W)
return x
def grid_partition_nchw(x, grid_size: List[int]):
B, C, H, W = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, f'width {W} must be divisible by grid {grid_size[1]}')
x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1])
windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1])
x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W)
return x
class PartitionAttention2d(nn.Module):
""" Grid or Block partition + Attn + FFN
'2D' NCHW tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
head_first=cfg.head_first,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ConvMlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[-2:]
if self.partition_block:
partitioned = window_partition_nchw(x, self.partition_size)
else:
partitioned = grid_partition_nchw(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse_nchw(partitioned, self.partition_size, img_size)
else:
x = grid_reverse_nchw(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class MaxxVitBlock(nn.Module):
""" MaxVit conv, window partition + FFN , grid partition + FFN
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
self.nchw_attn = transformer_cfg.use_nchw_attn
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
partition_layer = PartitionAttention2d if self.nchw_attn else PartitionAttentionCl
self.attn_block = None if transformer_cfg.no_block_attn else partition_layer(**attn_kwargs)
self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs)
def init_weights(self, scheme=''):
if self.attn_block is not None:
named_apply(partial(_init_transformer, scheme=scheme), self.attn_block)
named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
# NCHW format
x = self.conv(x)
if not self.nchw_attn:
x = x.permute(0, 2, 3, 1) # to NHWC (channels-last)
if self.attn_block is not None:
x = self.attn_block(x)
x = self.attn_grid(x)
if not self.nchw_attn:
x = x.permute(0, 3, 1, 2) # back to NCHW
return x
class ParallelMaxxVitBlock(nn.Module):
""" MaxVit block with parallel cat(window + grid), one FF
Experimental timm block.
"""
def __init__(
self,
dim,
dim_out,
stride=1,
num_conv=2,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path=0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
if num_conv > 1:
convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)]
convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1)
self.conv = nn.Sequential(*convs)
else:
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
x = self.conv(x)
x = x.permute(0, 2, 3, 1)
x = self.attn(x)
x = x.permute(0, 3, 1, 2)
return x
class MaxxVitStage(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 2,
depth: int = 4,
feat_size: Tuple[int, int] = (14, 14),
block_types: Union[str, Tuple[str]] = 'C',
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: Union[float, List[float]] = 0.,
):
super().__init__()
self.grad_checkpointing = False
block_types = extend_tuple(block_types, depth)
blocks = []
for i, t in enumerate(block_types):
block_stride = stride if i == 0 else 1
assert t in ('C', 'T', 'M', 'PM')
if t == 'C':
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
blocks += [conv_cls(
in_chs,
out_chs,
stride=block_stride,
cfg=conv_cfg,
drop_path=drop_path[i],
)]
elif t == 'T':
rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size)
blocks += [TransformerBlock2d(
in_chs,
out_chs,
stride=block_stride,
rel_pos_cls=rel_pos_cls,
cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'M':
blocks += [MaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'PM':
blocks += [ParallelMaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class Stem(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
padding: str = '',
bias: bool = False,
act_layer: str = 'gelu',
norm_layer: str = 'batchnorm2d',
norm_eps: float = 1e-5,
):
super().__init__()
if not isinstance(out_chs, (list, tuple)):
out_chs = to_2tuple(out_chs)
norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps)
self.out_chs = out_chs[-1]
self.stride = 2
self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2, padding=padding, bias=bias)
self.norm1 = norm_act_layer(out_chs[0])
self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1, padding=padding, bias=bias)
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.conv2(x)
return x
def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]):
if cfg.window_size is not None:
assert cfg.grid_size
return cfg
partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio
cfg = replace(cfg, window_size=partition_size, grid_size=partition_size)
return cfg
def _overlay_kwargs(cfg: MaxxVitCfg, **kwargs):
transformer_kwargs = {}
conv_kwargs = {}
base_kwargs = {}
for k, v in kwargs.items():
if k.startswith('transformer_'):
transformer_kwargs[k.replace('transformer_', '')] = v
elif k.startswith('conv_'):
conv_kwargs[k.replace('conv_', '')] = v
else:
base_kwargs[k] = v
cfg = replace(
cfg,
transformer_cfg=replace(cfg.transformer_cfg, **transformer_kwargs),
conv_cfg=replace(cfg.conv_cfg, **conv_kwargs),
**base_kwargs
)
return cfg
class MaxxVit(nn.Module):
""" CoaTNet + MaxVit base model.
Highly configurable for different block compositions, tensor layouts, pooling types.
"""
def __init__(
self,
cfg: MaxxVitCfg,
img_size: Union[int, Tuple[int, int]] = 224,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
drop_rate: float = 0.,
drop_path_rate: float = 0.,
**kwargs,
):
super().__init__()
img_size = to_2tuple(img_size)
if kwargs:
cfg = _overlay_kwargs(cfg, **kwargs)
transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = cfg.embed_dim[-1]
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
self.stem = Stem(
in_chs=in_chans,
out_chs=cfg.stem_width,
padding=cfg.conv_cfg.padding,
bias=cfg.stem_bias,
act_layer=cfg.conv_cfg.act_layer,
norm_layer=cfg.conv_cfg.norm_layer,
norm_eps=cfg.conv_cfg.norm_eps,
)
stride = self.stem.stride
self.feature_info += [dict(num_chs=self.stem.out_chs, reduction=2, module='stem')]
feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))])
num_stages = len(cfg.embed_dim)
assert len(cfg.depths) == num_stages
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
in_chs = self.stem.out_chs
stages = []
for i in range(num_stages):
stage_stride = 2
out_chs = cfg.embed_dim[i]
feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size])
stages += [MaxxVitStage(
in_chs,
out_chs,
depth=cfg.depths[i],
block_types=cfg.block_type[i],
conv_cfg=cfg.conv_cfg,
transformer_cfg=transformer_cfg,
feat_size=feat_size,
drop_path=dpr[i],
)]
stride *= stage_stride
in_chs = out_chs
self.feature_info += [dict(num_chs=out_chs, reduction=stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
final_norm_layer = partial(get_norm_layer(cfg.transformer_cfg.norm_layer), eps=cfg.transformer_cfg.norm_eps)
if cfg.head_hidden_size:
self.norm = nn.Identity()
self.head_hidden_size = cfg.head_hidden_size
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
hidden_size=self.head_hidden_size,
pool_type=global_pool,
drop_rate=drop_rate,
norm_layer=final_norm_layer,
)
else:
# standard classifier head w/ norm, pooling, fc classifier
self.head_hidden_size = self.num_features
self.norm = final_norm_layer(self.num_features)
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
# Weight init (default PyTorch init works well for AdamW if scheme not set)
assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff')
if cfg.weight_init:
named_apply(partial(self._init_weights, scheme=cfg.weight_init), self)
def _init_weights(self, module, name, scheme=''):
if hasattr(module, 'init_weights'):
try:
module.init_weights(scheme=scheme)
except TypeError:
module.init_weights()
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices)
# forward pass
feat_idx = 0 # stem is index 0
x = self.stem(x)
if feat_idx in take_indices:
intermediates.append(x)
last_idx = len(self.stages)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index]
for stage in stages:
feat_idx += 1
x = stage(x)
if feat_idx in take_indices:
if norm and feat_idx == last_idx:
x_inter = self.norm(x) # applying final norm to last intermediate
else:
x_inter = x
intermediates.append(x_inter)
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices)
self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.head = self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _rw_coat_cfg(
stride_mode='pool',
pool_type='avg2',
conv_output_bias=False,
conv_attn_early=False,
conv_attn_act_layer='relu',
conv_norm_layer='',
transformer_shortcut_bias=True,
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Common differences for initial timm models:
# - pre-norm layer in MZBConv included an activation after norm
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - SE act layer was relu, not silu
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
# Variable differences (evolved over training initial models):
# - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat)
# - SE attention was between conv2 and norm/act
# - default to avg pool for mbconv downsample instead of 1x1 or dw conv
# - transformer block shortcut has no bias
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
pre_norm_act=True,
expand_output=False,
output_bias=conv_output_bias,
attn_early=conv_attn_early,
attn_act_layer=conv_attn_act_layer,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
shortcut_bias=transformer_shortcut_bias,
pool_type=pool_type,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _rw_max_cfg(
stride_mode='dw',
pool_type='avg2',
conv_output_bias=False,
conv_attn_ratio=1 / 16,
conv_norm_layer='',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
dim_head=32,
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Differences of initial timm models:
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
output_bias=conv_output_bias,
attn_ratio=conv_attn_ratio,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
dim_head=dim_head,
window_size=window_size,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _next_cfg(
stride_mode='dw',
pool_type='avg2',
conv_norm_layer='layernorm2d',
conv_norm_layer_cl='layernorm',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
no_block_attn=False,
init_values=1e-6,
rel_pos_type='mlp', # MLP by default for maxxvit
rel_pos_dim=512,
):
# For experimental models with convnext instead of mbconv
init_values = to_2tuple(init_values)
return dict(
conv_cfg=MaxxVitConvCfg(
block_type='convnext',
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
init_values=init_values[0],
norm_layer=conv_norm_layer,
norm_layer_cl=conv_norm_layer_cl,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
window_size=window_size,
no_block_attn=no_block_attn, # enabled for MaxxViT-V2
init_values=init_values[1],
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _tf_cfg():
return dict(
conv_cfg=MaxxVitConvCfg(
norm_eps=1e-3,
act_layer='gelu_tanh',
padding='same',
),
transformer_cfg=MaxxVitTransformerCfg(
norm_eps=1e-5,
act_layer='gelu_tanh',
head_first=False, # heads are interleaved (q_nh, q_hdim, k_nh, q_hdim, ....)
rel_pos_type='bias_tf',
),
)
model_cfgs = dict(
# timm specific CoAtNet configs
coatnet_pico_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 3, 5, 2),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
stride_mode='pool',
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_0_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
conv_attn_early=True,
transformer_shortcut_bias=False,
),
),
coatnet_1_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
)
),
coatnet_2_rw=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
#init_values=1e-6,
),
),
coatnet_3_rw=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
),
),
# Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos)
coatnet_bn_0_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
transformer_norm_layer='batchnorm2d',
)
),
coatnet_rmlp_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg(
conv_output_bias=True,
conv_attn_ratio=0.25,
rel_pos_type='mlp',
rel_pos_dim=384,
),
),
coatnet_rmlp_0_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
rel_pos_type='mlp',
),
),
coatnet_rmlp_1_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
pool_type='max',
conv_attn_early=True,
transformer_shortcut_bias=False,
rel_pos_type='mlp',
rel_pos_dim=384, # was supposed to be 512, woops
),
),
coatnet_rmlp_1_rw2=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
rel_pos_type='mlp',
rel_pos_dim=512, # was supposed to be 512, woops
),
),
coatnet_rmlp_2_rw=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_rmlp_3_rw=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_nano_cc=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
block_type=('C', 'C', ('C', 'T'), ('C', 'T')),
**_rw_coat_cfg(),
),
coatnext_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(
rel_pos_type='bias',
init_values=(1e-5, None)
),
),
# Trying to be like the CoAtNet paper configs
coatnet_0=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 5, 2),
stem_width=64,
head_hidden_size=768,
),
coatnet_1=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=64,
head_hidden_size=768,
),
coatnet_2=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=128,
head_hidden_size=1024,
),
coatnet_3=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=192,
head_hidden_size=1536,
),
coatnet_4=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 12, 28, 2),
stem_width=192,
head_hidden_size=1536,
),
coatnet_5=MaxxVitCfg(
embed_dim=(256, 512, 1280, 2048),
depths=(2, 12, 28, 2),
stem_width=192,
head_hidden_size=2048,
),
# Experimental MaxVit configs
maxvit_pico_rw=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(),
),
maxvit_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_pm=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('PM',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_rmlp_pico_rw=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_tiny_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_small_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(
rel_pos_type='mlp',
init_values=1e-6,
),
),
maxvit_rmlp_base_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
head_hidden_size=768,
**_rw_max_cfg(
rel_pos_type='mlp',
),
),
maxxvit_rmlp_nano_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(),
),
maxxvit_rmlp_tiny_rw=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_next_cfg(),
),
maxxvit_rmlp_small_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(48, 96),
**_next_cfg(),
),
maxxvitv2_nano_rw=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(48, 96),
weight_init='normal',
**_next_cfg(
no_block_attn=True,
rel_pos_type='bias',
),
),
maxxvitv2_rmlp_base_rw=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 12, 2),
block_type=('M',) * 4,
stem_width=(64, 128),
**_next_cfg(
no_block_attn=True,
),
),
maxxvitv2_rmlp_large_rw=MaxxVitCfg(
embed_dim=(160, 320, 640, 1280),
depths=(2, 6, 16, 2),
block_type=('M',) * 4,
stem_width=(80, 160),
head_hidden_size=1280,
**_next_cfg(
no_block_attn=True,
),
),
# Trying to be like the MaxViT paper configs
maxvit_tiny_tf=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
stem_bias=True,
head_hidden_size=512,
**_tf_cfg(),
),
maxvit_small_tf=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
stem_bias=True,
head_hidden_size=768,
**_tf_cfg(),
),
maxvit_base_tf=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=64,
stem_bias=True,
head_hidden_size=768,
**_tf_cfg(),
),
maxvit_large_tf=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=128,
stem_bias=True,
head_hidden_size=1024,
**_tf_cfg(),
),
maxvit_xlarge_tf=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=192,
stem_bias=True,
head_hidden_size=1536,
**_tf_cfg(),
),
)
def checkpoint_filter_fn(state_dict, model: nn.Module):
model_state_dict = model.state_dict()
out_dict = {}
for k, v in state_dict.items():
if k.endswith('relative_position_bias_table'):
m = model.get_submodule(k[:-29])
if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]:
v = resize_rel_pos_bias_table(
v,
new_window_size=m.window_size,
new_bias_shape=m.relative_position_bias_table.shape,
)
if k in model_state_dict and v.ndim != model_state_dict[k].ndim and v.numel() == model_state_dict[k].numel():
# adapt between conv2d / linear layers
assert v.ndim in (2, 4)
v = v.reshape(model_state_dict[k].shape)
out_dict[k] = v
return out_dict
def _create_maxxvit(variant, cfg_variant=None, pretrained=False, **kwargs):
if cfg_variant is None:
if variant in model_cfgs:
cfg_variant = variant
else:
cfg_variant = '_'.join(variant.split('_')[:-1])
return build_model_with_cfg(
MaxxVit, variant, pretrained,
model_cfg=model_cfgs[cfg_variant],
feature_cfg=dict(flatten_sequential=True),
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = generate_default_cfgs({
# timm specific CoAtNet configs, ImageNet-1k pretrain, fixed rel-pos
'coatnet_pico_rw_224.untrained': _cfg(url=''),
'coatnet_nano_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth',
crop_pct=0.9),
'coatnet_0_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'),
'coatnet_1_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth'
),
# timm specific CoAtNet configs, ImageNet-12k pretrain w/ 1k fine-tune, fixed rel-pos
'coatnet_2_rw_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/'),
#'coatnet_3_rw_224.untrained': _cfg(url=''),
# Experimental CoAtNet configs w/ ImageNet-12k pretrain -> 1k fine-tune (different norm layers, MLP rel-pos)
'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/'),
'coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/'),
'coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
# Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos)
'coatnet_bn_0_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
crop_pct=0.95),
'coatnet_rmlp_nano_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth',
crop_pct=0.9),
'coatnet_rmlp_0_rw_224.untrained': _cfg(url=''),
'coatnet_rmlp_1_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'),
'coatnet_rmlp_2_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'),
'coatnet_rmlp_3_rw_224.untrained': _cfg(url=''),
'coatnet_nano_cc_224.untrained': _cfg(url=''),
'coatnext_nano_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth',
crop_pct=0.9),
# ImagenNet-12k pretrain CoAtNet
'coatnet_2_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
'coatnet_3_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
'coatnet_rmlp_1_rw2_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
'coatnet_rmlp_2_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
# Trying to be like the CoAtNet paper configs (will adapt if 'tf' weights are ever released)
'coatnet_0_224.untrained': _cfg(url=''),
'coatnet_1_224.untrained': _cfg(url=''),
'coatnet_2_224.untrained': _cfg(url=''),
'coatnet_3_224.untrained': _cfg(url=''),
'coatnet_4_224.untrained': _cfg(url=''),
'coatnet_5_224.untrained': _cfg(url=''),
# timm specific MaxVit configs, ImageNet-1k pretrain or untrained
'maxvit_pico_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_nano_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'),
'maxvit_tiny_rw_256.untrained': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_pm_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
# timm specific MaxVit w/ MLP rel-pos, ImageNet-1k pretrain
'maxvit_rmlp_pico_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_nano_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_tiny_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_small_rw_224.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth',
crop_pct=0.9,
),
'maxvit_rmlp_small_rw_256.untrained': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
# timm specific MaxVit w/ ImageNet-12k pretrain and 1k fine-tune
'maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
),
'maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
# timm specific MaxVit w/ ImageNet-12k pretrain
'maxvit_rmlp_base_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
),
# timm MaxxViT configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks)
'maxxvit_rmlp_nano_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_tiny_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_small_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# timm MaxxViT-V2 configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks, more width, no block attn)
'maxxvitv2_nano_rw_256.sw_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/'),
'maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxxvitv2_rmlp_large_rw_224.untrained': _cfg(url=''),
'maxxvitv2_rmlp_base_rw_224.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
# MaxViT models ported from official Tensorflow impl
'maxvit_tiny_tf_224.in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'maxvit_tiny_tf_384.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_tiny_tf_512.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_small_tf_224.in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'maxvit_small_tf_384.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_small_tf_512.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_base_tf_224.in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'maxvit_base_tf_384.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_base_tf_512.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_large_tf_224.in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'maxvit_large_tf_384.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_large_tf_512.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_base_tf_224.in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843),
'maxvit_base_tf_384.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_base_tf_512.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
'maxvit_large_tf_224.in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843),
'maxvit_large_tf_384.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_large_tf_512.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), crop_pct=1.0, crop_mode='squash'),
'maxvit_xlarge_tf_224.in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843),
'maxvit_xlarge_tf_384.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'maxvit_xlarge_tf_512.in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'),
})
@register_model
def coatnet_pico_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_pico_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_nano_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_0_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_0_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_1_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_1_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_2_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_2_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_3_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_3_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_bn_0_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_bn_0_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_nano_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_0_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_0_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_1_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_1_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_1_rw2_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_1_rw2_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_2_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_2_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_2_rw_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_2_rw_384', pretrained=pretrained, **kwargs)
@register_model
def coatnet_rmlp_3_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_rmlp_3_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_nano_cc_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_nano_cc_224', pretrained=pretrained, **kwargs)
@register_model
def coatnext_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnext_nano_rw_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_0_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_0_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_1_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_1_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_2_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_2_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_3_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_3_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_4_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_4_224', pretrained=pretrained, **kwargs)
@register_model
def coatnet_5_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('coatnet_5_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_pico_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_nano_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_pico_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_small_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_base_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_rmlp_base_rw_384', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_pm_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_pm_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvitv2_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvitv2_nano_rw_256', pretrained=pretrained, **kwargs)
@register_model
def maxxvitv2_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvitv2_rmlp_base_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxxvitv2_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvitv2_rmlp_base_rw_384', pretrained=pretrained, **kwargs)
@register_model
def maxxvitv2_rmlp_large_rw_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxxvitv2_rmlp_large_rw_224', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_tf_224', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_tf_384', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_tiny_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_tiny_tf_512', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_small_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_small_tf_224', 'maxvit_small_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_small_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_small_tf_384', 'maxvit_small_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_small_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_small_tf_512', 'maxvit_small_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_base_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_base_tf_224', 'maxvit_base_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_base_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_base_tf_384', 'maxvit_base_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_base_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_base_tf_512', 'maxvit_base_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_large_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_large_tf_224', 'maxvit_large_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_large_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_large_tf_384', 'maxvit_large_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_large_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_large_tf_512', 'maxvit_large_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_xlarge_tf_224(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_xlarge_tf_224', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_xlarge_tf_384(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_xlarge_tf_384', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)
@register_model
def maxvit_xlarge_tf_512(pretrained=False, **kwargs) -> MaxxVit:
return _create_maxxvit('maxvit_xlarge_tf_512', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)
| pytorch-image-models/timm/models/maxxvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/maxxvit.py",
"repo_id": "pytorch-image-models",
"token_count": 43954
} |
"""
An implementation of RepGhostNet Model as defined in:
RepGhost: A Hardware-Efficient Ghost Module via Re-parameterization. https://arxiv.org/abs/2211.06088
Original implementation: https://github.com/ChengpengChen/RepGhost
"""
import copy
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, Linear, make_divisible
from ._builder import build_model_with_cfg
from ._efficientnet_blocks import SqueezeExcite, ConvBnAct
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['RepGhostNet']
_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4))
class RepGhostModule(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=1,
dw_size=3,
stride=1,
relu=True,
reparam=True,
):
super(RepGhostModule, self).__init__()
self.out_chs = out_chs
init_chs = out_chs
new_chs = out_chs
self.primary_conv = nn.Sequential(
nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(init_chs),
nn.ReLU(inplace=True) if relu else nn.Identity(),
)
fusion_conv = []
fusion_bn = []
if reparam:
fusion_conv.append(nn.Identity())
fusion_bn.append(nn.BatchNorm2d(init_chs))
self.fusion_conv = nn.Sequential(*fusion_conv)
self.fusion_bn = nn.Sequential(*fusion_bn)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False),
nn.BatchNorm2d(new_chs),
# nn.ReLU(inplace=True) if relu else nn.Identity(),
)
self.relu = nn.ReLU(inplace=False) if relu else nn.Identity()
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
for conv, bn in zip(self.fusion_conv, self.fusion_bn):
x2 = x2 + bn(conv(x1))
return self.relu(x2)
def get_equivalent_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.cheap_operation[0], self.cheap_operation[1])
for conv, bn in zip(self.fusion_conv, self.fusion_bn):
kernel, bias = self._fuse_bn_tensor(conv, bn, kernel3x3.shape[0], kernel3x3.device)
kernel3x3 += self._pad_1x1_to_3x3_tensor(kernel)
bias3x3 += bias
return kernel3x3, bias3x3
@staticmethod
def _pad_1x1_to_3x3_tensor(kernel1x1):
if kernel1x1 is None:
return 0
else:
return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
@staticmethod
def _fuse_bn_tensor(conv, bn, in_channels=None, device=None):
in_channels = in_channels if in_channels else bn.running_mean.shape[0]
device = device if device else bn.weight.device
if isinstance(conv, nn.Conv2d):
kernel = conv.weight
assert conv.bias is None
else:
assert isinstance(conv, nn.Identity)
kernel = torch.ones(in_channels, 1, 1, 1, device=device)
if isinstance(bn, nn.BatchNorm2d):
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
assert isinstance(bn, nn.Identity)
return kernel, torch.zeros(in_channels).to(kernel.device)
def switch_to_deploy(self):
if len(self.fusion_conv) == 0 and len(self.fusion_bn) == 0:
return
kernel, bias = self.get_equivalent_kernel_bias()
self.cheap_operation = nn.Conv2d(
in_channels=self.cheap_operation[0].in_channels,
out_channels=self.cheap_operation[0].out_channels,
kernel_size=self.cheap_operation[0].kernel_size,
padding=self.cheap_operation[0].padding,
dilation=self.cheap_operation[0].dilation,
groups=self.cheap_operation[0].groups,
bias=True)
self.cheap_operation.weight.data = kernel
self.cheap_operation.bias.data = bias
self.__delattr__('fusion_conv')
self.__delattr__('fusion_bn')
self.fusion_conv = []
self.fusion_bn = []
def reparameterize(self):
self.switch_to_deploy()
class RepGhostBottleneck(nn.Module):
""" RepGhost bottleneck w/ optional SE"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
act_layer=nn.ReLU,
se_ratio=0.,
reparam=True,
):
super(RepGhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
# Point-wise expansion
self.ghost1 = RepGhostModule(in_chs, mid_chs, relu=True, reparam=reparam)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False)
self.bn_dw = nn.BatchNorm2d(mid_chs)
else:
self.conv_dw = None
self.bn_dw = None
# Squeeze-and-excitation
self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None
# Point-wise linear projection
self.ghost2 = RepGhostModule(mid_chs, out_chs, relu=False, reparam=reparam)
# shortcut
if in_chs == out_chs and self.stride == 1:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
shortcut = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.conv_dw is not None:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(shortcut)
return x
class RepGhostNet(nn.Module):
def __init__(
self,
cfgs,
num_classes=1000,
width=1.0,
in_chans=3,
output_stride=32,
global_pool='avg',
drop_rate=0.2,
reparam=True,
):
super(RepGhostNet, self).__init__()
# setting of inverted residual blocks
assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported'
self.cfgs = cfgs
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
# building first layer
stem_chs = make_divisible(16 * width, 4)
self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False)
self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem'))
self.bn1 = nn.BatchNorm2d(stem_chs)
self.act1 = nn.ReLU(inplace=True)
prev_chs = stem_chs
# building inverted residual blocks
stages = nn.ModuleList([])
block = RepGhostBottleneck
stage_idx = 0
net_stride = 2
for cfg in self.cfgs:
layers = []
s = 1
for k, exp_size, c, se_ratio, s in cfg:
out_chs = make_divisible(c * width, 4)
mid_chs = make_divisible(exp_size * width, 4)
layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, reparam=reparam))
prev_chs = out_chs
if s > 1:
net_stride *= 2
self.feature_info.append(dict(
num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}'))
stages.append(nn.Sequential(*layers))
stage_idx += 1
out_chs = make_divisible(exp_size * width * 2, 4)
stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1)))
self.pool_dim = prev_chs = out_chs
self.blocks = nn.Sequential(*stages)
# building last several layers
self.num_features = prev_chs
self.head_hidden_size = out_chs = 1280
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True)
self.act2 = nn.ReLU(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv_stem|bn1',
blocks=[
(r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None),
(r'conv_head', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
# NOTE: cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return x if pre_logits else self.classifier(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def convert_to_deploy(self):
repghost_model_convert(self, do_copy=False)
def repghost_model_convert(model: torch.nn.Module, save_path=None, do_copy=True):
"""
taken from from https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py
"""
if do_copy:
model = copy.deepcopy(model)
for module in model.modules():
if hasattr(module, 'switch_to_deploy'):
module.switch_to_deploy()
if save_path is not None:
torch.save(model.state_dict(), save_path)
return model
def _create_repghostnet(variant, width=1.0, pretrained=False, **kwargs):
"""
Constructs a RepGhostNet model
"""
cfgs = [
# k, t, c, SE, s
# stage1
[[3, 8, 16, 0, 1]],
# stage2
[[3, 24, 24, 0, 2]],
[[3, 36, 24, 0, 1]],
# stage3
[[5, 36, 40, 0.25, 2]],
[[5, 60, 40, 0.25, 1]],
# stage4
[[3, 120, 80, 0, 2]],
[[3, 100, 80, 0, 1],
[3, 120, 80, 0, 1],
[3, 120, 80, 0, 1],
[3, 240, 112, 0.25, 1],
[3, 336, 112, 0.25, 1]
],
# stage5
[[5, 336, 160, 0.25, 2]],
[[5, 480, 160, 0, 1],
[5, 480, 160, 0.25, 1],
[5, 480, 160, 0, 1],
[5, 480, 160, 0.25, 1]
]
]
model_kwargs = dict(
cfgs=cfgs,
width=width,
**kwargs,
)
return build_model_with_cfg(
RepGhostNet,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
**model_kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'repghostnet_050.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_5x_43M_66.95.pth.tar'
),
'repghostnet_058.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_58x_60M_68.94.pth.tar'
),
'repghostnet_080.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_8x_96M_72.24.pth.tar'
),
'repghostnet_100.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_0x_142M_74.22.pth.tar'
),
'repghostnet_111.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_11x_170M_75.07.pth.tar'
),
'repghostnet_130.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_3x_231M_76.37.pth.tar'
),
'repghostnet_150.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_5x_301M_77.45.pth.tar'
),
'repghostnet_200.in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_2_0x_516M_78.81.pth.tar'
),
})
@register_model
def repghostnet_050(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-0.5x """
model = _create_repghostnet('repghostnet_050', width=0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_058(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-0.58x """
model = _create_repghostnet('repghostnet_058', width=0.58, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_080(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-0.8x """
model = _create_repghostnet('repghostnet_080', width=0.8, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_100(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-1.0x """
model = _create_repghostnet('repghostnet_100', width=1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_111(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-1.11x """
model = _create_repghostnet('repghostnet_111', width=1.11, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_130(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-1.3x """
model = _create_repghostnet('repghostnet_130', width=1.3, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_150(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-1.5x """
model = _create_repghostnet('repghostnet_150', width=1.5, pretrained=pretrained, **kwargs)
return model
@register_model
def repghostnet_200(pretrained=False, **kwargs) -> RepGhostNet:
""" RepGhostNet-2.0x """
model = _create_repghostnet('repghostnet_200', width=2.0, pretrained=pretrained, **kwargs)
return model
| pytorch-image-models/timm/models/repghost.py/0 | {
"file_path": "pytorch-image-models/timm/models/repghost.py",
"repo_id": "pytorch-image-models",
"token_count": 8221
} |
"""
TResNet: High Performance GPU-Dedicated Architecture
https://arxiv.org/pdf/2003.13630.pdf
Original model: https://github.com/mrT23/TResNet
"""
from collections import OrderedDict
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule, ConvNormAct, DropPath
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
__all__ = ['TResNet'] # model_registry will add each entrypoint fn to this
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
use_se=True,
aa_layer=None,
drop_path_rate=0.
):
super(BasicBlock, self).__init__()
self.downsample = downsample
self.stride = stride
act_layer = partial(nn.LeakyReLU, negative_slope=1e-3)
self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer)
self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False)
self.act = nn.ReLU(inplace=True)
rd_chs = max(planes * self.expansion // 4, 64)
self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def forward(self, x):
if self.downsample is not None:
shortcut = self.downsample(x)
else:
shortcut = x
out = self.conv1(x)
out = self.conv2(out)
if self.se is not None:
out = self.se(out)
out = self.drop_path(out) + shortcut
out = self.act(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
use_se=True,
act_layer=None,
aa_layer=None,
drop_path_rate=0.,
):
super(Bottleneck, self).__init__()
self.downsample = downsample
self.stride = stride
act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=1e-3)
self.conv1 = ConvNormAct(
inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer)
self.conv2 = ConvNormAct(
planes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer)
reduction_chs = max(planes * self.expansion // 8, 64)
self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None
self.conv3 = ConvNormAct(
planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.act = nn.ReLU(inplace=True)
def forward(self, x):
if self.downsample is not None:
shortcut = self.downsample(x)
else:
shortcut = x
out = self.conv1(x)
out = self.conv2(out)
if self.se is not None:
out = self.se(out)
out = self.conv3(out)
out = self.drop_path(out) + shortcut
out = self.act(out)
return out
class TResNet(nn.Module):
def __init__(
self,
layers,
in_chans=3,
num_classes=1000,
width_factor=1.0,
v2=False,
global_pool='fast',
drop_rate=0.,
drop_path_rate=0.,
):
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
super(TResNet, self).__init__()
aa_layer = BlurPool2d
act_layer = nn.LeakyReLU
# TResnet stages
self.inplanes = int(64 * width_factor)
self.planes = int(64 * width_factor)
if v2:
self.inplanes = self.inplanes // 8 * 8
self.planes = self.planes // 8 * 8
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer)
layer1 = self._make_layer(
Bottleneck if v2 else BasicBlock,
self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0])
layer2 = self._make_layer(
Bottleneck if v2 else BasicBlock,
self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1])
layer3 = self._make_layer(
Bottleneck,
self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2])
layer4 = self._make_layer(
Bottleneck,
self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3])
# body
self.body = nn.Sequential(OrderedDict([
('s2d', SpaceToDepth()),
('conv1', conv1),
('layer1', layer1),
('layer2', layer2),
('layer3', layer3),
('layer4', layer4),
]))
self.feature_info = [
dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D?
dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'),
dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'),
dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'),
dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'),
]
# head
self.num_features = self.head_hidden_size = (self.planes * 8) * Bottleneck.expansion
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
# model initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
# residual connections special initialization
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.zeros_(m.conv2.bn.weight)
if isinstance(m, Bottleneck):
nn.init.zeros_(m.conv3.bn.weight)
def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
layers = []
if stride == 2:
# avg pooling before 1x1 conv
layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
layers += [ConvNormAct(
self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False)]
downsample = nn.Sequential(*layers)
layers = []
for i in range(blocks):
layers.append(block(
self.inplanes,
planes,
stride=stride if i == 0 else 1,
downsample=downsample if i == 0 else None,
use_se=use_se,
aa_layer=aa_layer,
drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate,
))
self.inplanes = planes * block.expansion
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)')
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = self.body.s2d(x)
x = self.body.conv1(x)
x = checkpoint_seq([
self.body.layer1,
self.body.layer2,
self.body.layer3,
self.body.layer4],
x, flatten=True)
else:
x = self.body(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'body.conv1.conv.weight' in state_dict:
return state_dict
import re
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
out_dict = {}
for k, v in state_dict.items():
k = re.sub(r'conv(\d+)\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k)
k = re.sub(r'conv(\d+)\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k)
k = re.sub(r'conv(\d+)\.0', lambda x: f'conv{int(x.group(1))}.conv', k)
k = re.sub(r'conv(\d+)\.1', lambda x: f'conv{int(x.group(1))}.bn', k)
k = re.sub(r'downsample\.(\d+)\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k)
k = re.sub(r'downsample\.(\d+)\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k)
if k.endswith('bn.weight'):
# convert weight from inplace_abn to batchnorm
v = v.abs().add(1e-5)
out_dict[k] = v
return out_dict
def _create_tresnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
TResNet,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': (0., 0., 0.), 'std': (1., 1., 1.),
'first_conv': 'body.conv1.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221),
'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_m.miil_in1k_448': _cfg(
input_size=(3, 448, 448), pool_size=(14, 14),
hf_hub_id='timm/'),
'tresnet_l.miil_in1k_448': _cfg(
input_size=(3, 448, 448), pool_size=(14, 14),
hf_hub_id='timm/'),
'tresnet_xl.miil_in1k_448': _cfg(
input_size=(3, 448, 448), pool_size=(14, 14),
hf_hub_id='timm/'),
'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'),
'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221),
})
@register_model
def tresnet_m(pretrained=False, **kwargs) -> TResNet:
model_args = dict(layers=[3, 4, 11, 3])
return _create_tresnet('tresnet_m', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def tresnet_l(pretrained=False, **kwargs) -> TResNet:
model_args = dict(layers=[4, 5, 18, 3], width_factor=1.2)
return _create_tresnet('tresnet_l', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def tresnet_xl(pretrained=False, **kwargs) -> TResNet:
model_args = dict(layers=[4, 5, 24, 3], width_factor=1.3)
return _create_tresnet('tresnet_xl', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet:
model_args = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True)
return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k',
'tresnet_m_448': 'tresnet_m.miil_in1k_448',
'tresnet_l_448': 'tresnet_l.miil_in1k_448',
'tresnet_xl_448': 'tresnet_xl.miil_in1k_448',
}) | pytorch-image-models/timm/models/tresnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/tresnet.py",
"repo_id": "pytorch-image-models",
"token_count": 6188
} |
import logging
from itertools import islice
from typing import Collection, Optional
from torch import nn as nn
from timm.models import group_parameters
_logger = logging.getLogger(__name__)
def param_groups_weight_decay(
model: nn.Module,
weight_decay: float = 1e-5,
no_weight_decay_list: Collection[str] = (),
):
no_weight_decay_list = set(no_weight_decay_list)
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def _group(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def auto_group_layers(model, layers_per_group=12, num_groups=None):
def _in_head(n, hp):
if not hp:
return True
elif isinstance(hp, (tuple, list)):
return any([n.startswith(hpi) for hpi in hp])
else:
return n.startswith(hp)
head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None)
names_trunk = []
names_head = []
for n, _ in model.named_parameters():
names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n)
# group non-head layers
num_trunk_layers = len(names_trunk)
if num_groups is not None:
layers_per_group = -(num_trunk_layers // -num_groups)
names_trunk = list(_group(names_trunk, layers_per_group))
num_trunk_groups = len(names_trunk)
layer_map = {n: i for i, l in enumerate(names_trunk) for n in l}
layer_map.update({n: num_trunk_groups for n in names_head})
return layer_map
_layer_map = auto_group_layers # backward compat
def param_groups_layer_decay(
model: nn.Module,
weight_decay: float = 0.05,
no_weight_decay_list: Collection[str] = (),
weight_decay_exclude_1d: bool = True,
layer_decay: float = .75,
end_layer_decay: Optional[float] = None,
verbose: bool = False,
):
"""
Parameter groups for layer-wise lr decay & weight decay
Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
"""
no_weight_decay_list = set(no_weight_decay_list)
param_group_names = {} # NOTE for debugging
param_groups = {}
if hasattr(model, 'group_matcher'):
# FIXME interface needs more work
layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True)
else:
# fallback
layer_map = auto_group_layers(model)
num_layers = max(layer_map.values()) + 1
layer_max = num_layers - 1
layer_scales = list(layer_decay ** (layer_max - i) for i in range(num_layers))
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# no decay: all 1D parameters and model specific ones
if (weight_decay_exclude_1d and param.ndim <= 1) or name in no_weight_decay_list:
g_decay = "no_decay"
this_decay = 0.
else:
g_decay = "decay"
this_decay = weight_decay
layer_id = layer_map.get(name, layer_max)
group_name = "layer_%d_%s" % (layer_id, g_decay)
if group_name not in param_groups:
this_scale = layer_scales[layer_id]
param_group_names[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"param_names": [],
}
param_groups[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_group_names[group_name]["param_names"].append(name)
param_groups[group_name]["params"].append(param)
if verbose:
import json
_logger.info("parameter groups: \n%s" % json.dumps(param_group_names, indent=2))
return list(param_groups.values())
| pytorch-image-models/timm/optim/_param_groups.py/0 | {
"file_path": "pytorch-image-models/timm/optim/_param_groups.py",
"repo_id": "pytorch-image-models",
"token_count": 1915
} |
""" PyTorch MADGRAD optimizer
MADGRAD: https://arxiv.org/abs/2101.11075
Code from: https://github.com/facebookresearch/madgrad
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class MADGRAD(torch.optim.Optimizer):
"""
MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic
Optimization.
.. _MADGRAD: https://arxiv.org/abs/2101.11075
MADGRAD is a general purpose optimizer that can be used in place of SGD or
Adam may converge faster and generalize better. Currently GPU-only.
Typically, the same learning rate schedule that is used for SGD or Adam may
be used. The overall learning rate is not comparable to either method and
should be determined by a hyper-parameter sweep.
MADGRAD requires less weight decay than other methods, often as little as
zero. Momentum values used for SGD or Adam's beta1 should work here also.
On sparse problems both weight_decay and momentum should be set to 0.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate (default: 1e-2).
momentum (float):
Momentum value in the range [0,1) (default: 0.9).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).
"""
def __init__(
self,
params: _params_t,
lr: float = 1e-2,
momentum: float = 0.9,
weight_decay: float = 0,
eps: float = 1e-6,
decoupled_decay: bool = False,
):
if momentum < 0 or momentum >= 1:
raise ValueError(f"Momentum {momentum} must be in the range [0,1]")
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if weight_decay < 0:
raise ValueError(f"Weight decay {weight_decay} must be non-negative")
if eps < 0:
raise ValueError(f"Eps must be non-negative")
defaults = dict(
lr=lr,
eps=eps,
momentum=momentum,
weight_decay=weight_decay,
decoupled_decay=decoupled_decay,
)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self) -> bool:
return False
@property
def supports_flat_params(self) -> bool:
return True
@torch.no_grad()
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
eps = group['eps']
lr = group['lr'] + eps
weight_decay = group['weight_decay']
momentum = group['momentum']
ck = 1 - momentum
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
if momentum != 0.0 and grad.is_sparse:
raise RuntimeError("momentum != 0 is not compatible with sparse gradients")
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['grad_sum_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
if momentum != 0:
state['x0'] = torch.clone(p).detach()
state['step'] += 1
grad_sum_sq = state['grad_sum_sq']
s = state['s']
lamb = lr * math.sqrt(state['step'])
# Apply weight decay
if weight_decay != 0:
if group['decoupled_decay']:
p.mul_(1.0 - group['lr'] * weight_decay)
else:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad.add_(p, alpha=weight_decay)
if grad.is_sparse:
grad = grad.coalesce()
grad_val = grad._values()
p_masked = p.sparse_mask(grad)
grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)
s_masked = s.sparse_mask(grad)
# Compute x_0 from other known quantities
rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)
x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1)
# Dense + sparse op
grad_sq = grad * grad
grad_sum_sq.add_(grad_sq, alpha=lamb)
grad_sum_sq_masked.add_(grad_sq, alpha=lamb)
rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)
s.add_(grad, alpha=lamb)
s_masked._values().add_(grad_val, alpha=lamb)
# update masked copy of p
p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1)
# Copy updated masked p to dense p using an add operation
p_masked._values().add_(p_kp1_masked_vals, alpha=-1)
p.add_(p_masked, alpha=-1)
else:
if momentum == 0:
# Compute x_0 from other known quantities
rms = grad_sum_sq.pow(1 / 3).add_(eps)
x0 = p.addcdiv(s, rms, value=1)
else:
x0 = state['x0']
# Accumulate second moments
grad_sum_sq.addcmul_(grad, grad, value=lamb)
rms = grad_sum_sq.pow(1 / 3).add_(eps)
# Update s
s.add_(grad, alpha=lamb)
# Step
if momentum == 0:
p.copy_(x0.addcdiv(s, rms, value=-1))
else:
z = x0.addcdiv(s, rms, value=-1)
# p is a moving average of z
p.mul_(1 - ck).add_(z, alpha=ck)
return loss
| pytorch-image-models/timm/optim/madgrad.py/0 | {
"file_path": "pytorch-image-models/timm/optim/madgrad.py",
"repo_id": "pytorch-image-models",
"token_count": 3562
} |
import abc
from abc import ABC
from typing import Any, Dict, List, Optional
import torch
class Scheduler(ABC):
""" Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
t_in_epochs: bool = True,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True,
) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.t_in_epochs = t_in_epochs
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
@abc.abstractmethod
def _get_lr(self, t: int) -> List[float]:
pass
def _get_values(self, t: int, on_epoch: bool = True) -> Optional[List[float]]:
proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs)
if not proceed:
return None
return self._get_lr(t)
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self._get_values(epoch, on_epoch=True)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self._get_values(num_updates, on_epoch=False)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
if 'lr_scale' in param_group:
param_group[self.param_group_field] = value * param_group['lr_scale']
else:
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self._is_apply_noise(t):
noise = self._calculate_noise(t)
lrs = [v + v * noise for v in lrs]
return lrs
def _is_apply_noise(self, t) -> bool:
"""Return True if scheduler in noise range."""
apply_noise = False
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
return apply_noise
def _calculate_noise(self, t) -> float:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
return noise
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
return noise
| pytorch-image-models/timm/scheduler/scheduler.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/scheduler.py",
"repo_id": "pytorch-image-models",
"token_count": 2368
} |
""" Model / state_dict utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import fnmatch
from copy import deepcopy
import torch
from torchvision.ops.misc import FrozenBatchNorm2d
from timm.layers import BatchNormAct2d, SyncBatchNormAct, FrozenBatchNormAct2d,\
freeze_batch_norm_2d, unfreeze_batch_norm_2d
from .model_ema import ModelEma
def unwrap_model(model):
if isinstance(model, ModelEma):
return unwrap_model(model.ema)
else:
if hasattr(model, 'module'):
return unwrap_model(model.module)
elif hasattr(model, '_orig_mod'):
return unwrap_model(model._orig_mod)
else:
return model
def get_state_dict(model, unwrap_fn=unwrap_model):
return unwrap_fn(model).state_dict()
def avg_sq_ch_mean(model, input, output):
""" calculate average channel square mean of output activations
"""
return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item()
def avg_ch_var(model, input, output):
""" calculate average channel variance of output activations
"""
return torch.mean(output.var(axis=[0, 2, 3])).item()
def avg_ch_var_residual(model, input, output):
""" calculate average channel variance of output activations
"""
return torch.mean(output.var(axis=[0, 2, 3])).item()
class ActivationStatsHook:
"""Iterates through each of `model`'s modules and matches modules using unix pattern
matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is
a match.
Arguments:
model (nn.Module): model from which we will extract the activation stats
hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string
matching with the name of model's modules.
hook_fns (List[Callable]): List of hook functions to be registered at every
module in `layer_names`.
Inspiration from https://docs.fast.ai/callback.hook.html.
Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example
on how to plot Signal Propagation Plots using `ActivationStatsHook`.
"""
def __init__(self, model, hook_fn_locs, hook_fns):
self.model = model
self.hook_fn_locs = hook_fn_locs
self.hook_fns = hook_fns
if len(hook_fn_locs) != len(hook_fns):
raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \
their lengths are different.")
self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns)
for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns):
self.register_hook(hook_fn_loc, hook_fn)
def _create_hook(self, hook_fn):
def append_activation_stats(module, input, output):
out = hook_fn(module, input, output)
self.stats[hook_fn.__name__].append(out)
return append_activation_stats
def register_hook(self, hook_fn_loc, hook_fn):
for name, module in self.model.named_modules():
if not fnmatch.fnmatch(name, hook_fn_loc):
continue
module.register_forward_hook(self._create_hook(hook_fn))
def extract_spp_stats(
model,
hook_fn_locs,
hook_fns,
input_shape=[8, 3, 224, 224]):
"""Extract average square channel mean and variance of activations during
forward pass to plot Signal Propagation Plots (SPP).
Paper: https://arxiv.org/abs/2101.08692
Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950
"""
x = torch.normal(0., 1., input_shape)
hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns)
_ = model(x)
return hook.stats
def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'):
"""
Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is
done in place.
Args:
root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced.
submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as
named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list
means that the whole root module will be (un)frozen. Defaults to []
include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers.
Defaults to `True`.
mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`.
"""
assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"'
if isinstance(root_module, (
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.SyncBatchNorm,
BatchNormAct2d,
SyncBatchNormAct,
)):
# Raise assertion here because we can't convert it in place
raise AssertionError(
"You have provided a batch norm layer as the `root module`. Please use "
"`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.")
if isinstance(submodules, str):
submodules = [submodules]
named_modules = submodules
submodules = [root_module.get_submodule(m) for m in submodules]
if not len(submodules):
named_modules, submodules = list(zip(*root_module.named_children()))
for n, m in zip(named_modules, submodules):
# (Un)freeze parameters
for p in m.parameters():
p.requires_grad = False if mode == 'freeze' else True
if include_bn_running_stats:
# Helper to add submodule specified as a named_module
def _add_submodule(module, name, submodule):
split = name.rsplit('.', 1)
if len(split) > 1:
module.get_submodule(split[0]).add_module(split[1], submodule)
else:
module.add_module(name, submodule)
# Freeze batch norm
if mode == 'freeze':
res = freeze_batch_norm_2d(m)
# It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't
# convert it in place, but will return the converted result. In this case `res` holds the converted
# result and we may try to re-assign the named module
if isinstance(m, (
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.SyncBatchNorm,
BatchNormAct2d,
SyncBatchNormAct,
)):
_add_submodule(root_module, n, res)
# Unfreeze batch norm
else:
res = unfreeze_batch_norm_2d(m)
# Ditto. See note above in mode == 'freeze' branch
if isinstance(m, (FrozenBatchNorm2d, FrozenBatchNormAct2d)):
_add_submodule(root_module, n, res)
def freeze(root_module, submodules=[], include_bn_running_stats=True):
"""
Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place.
Args:
root_module (nn.Module): Root module relative to which `submodules` are referenced.
submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as
named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list
means that the whole root module will be frozen. Defaults to `[]`.
include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and
`SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning,
it's good practice to freeze batch norm stats. And note that these are different to the affine parameters
which are just normal PyTorch parameters. Defaults to `True`.
Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`.
Examples::
>>> model = timm.create_model('resnet18')
>>> # Freeze up to and including layer2
>>> submodules = [n for n, _ in model.named_children()]
>>> print(submodules)
['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc']
>>> freeze(model, submodules[:submodules.index('layer2') + 1])
>>> # Check for yourself that it works as expected
>>> print(model.layer2[0].conv1.weight.requires_grad)
False
>>> print(model.layer3[0].conv1.weight.requires_grad)
True
>>> # Unfreeze
>>> unfreeze(model)
"""
_freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze")
def unfreeze(root_module, submodules=[], include_bn_running_stats=True):
"""
Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place.
Args:
root_module (nn.Module): Root module relative to which `submodules` are referenced.
submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided
as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty
list means that the whole root module will be unfrozen. Defaults to `[]`.
include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers.
These will be converted to `BatchNorm2d` in place. Defaults to `True`.
See example in docstring for `freeze`.
"""
_freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze")
def reparameterize_model(model: torch.nn.Module, inplace=False) -> torch.nn.Module:
if not inplace:
model = deepcopy(model)
def _fuse(m):
for child_name, child in m.named_children():
if hasattr(child, 'fuse'):
setattr(m, child_name, child.fuse())
elif hasattr(child, "reparameterize"):
child.reparameterize()
elif hasattr(child, "switch_to_deploy"):
child.switch_to_deploy()
_fuse(child)
_fuse(model)
return model
| pytorch-image-models/timm/utils/model.py/0 | {
"file_path": "pytorch-image-models/timm/utils/model.py",
"repo_id": "pytorch-image-models",
"token_count": 4328
} |
# Base Python image
FROM python:3.12-slim
# Set working directory
WORKDIR /app
# Install build dependencies
RUN apt-get update && apt-get install -y \
build-essential \
zlib1g-dev \
libjpeg-dev \
libpng-dev \
&& rm -rf /var/lib/apt/lists/*
# Copy package files
COPY . /app/
# Install dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Install the package
RUN pip install -e .
COPY server.py /app/server.py
# Expose the port your server will run on
EXPOSE 65432
CMD ["python", "/app/server.py"]
| smolagents/Dockerfile/0 | {
"file_path": "smolagents/Dockerfile",
"repo_id": "smolagents",
"token_count": 198
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Models
<Tip warning={true}>
Smolagents is an experimental API which is subject to change at any time. Results returned by the agents
can vary as the APIs or underlying models are prone to change.
</Tip>
To learn more about agents and tools make sure to read the [introductory guide](../index). This page
contains the API docs for the underlying classes.
## Models
You're free to create and use your own models to power your agent.
You could use any `model` callable for your agent, as long as:
1. It follows the [messages format](./chat_templating) (`List[Dict[str, str]]`) for its input `messages`, and it returns a `str`.
2. It stops generating outputs *before* the sequences passed in the argument `stop_sequences`
For defining your LLM, you can make a `custom_model` method which accepts a list of [messages](./chat_templating) and returns an object with a .content attribute containing the text. This callable also needs to accept a `stop_sequences` argument that indicates when to stop generating.
```python
from huggingface_hub import login, InferenceClient
login("<YOUR_HUGGINGFACEHUB_API_TOKEN>")
model_id = "meta-llama/Llama-3.3-70B-Instruct"
client = InferenceClient(model=model_id)
def custom_model(messages, stop_sequences=["Task"]):
response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000)
answer = response.choices[0].message
return answer
```
Additionally, `custom_model` can also take a `grammar` argument. In the case where you specify a `grammar` upon agent initialization, this argument will be passed to the calls to model, with the `grammar` that you defined upon initialization, to allow [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) in order to force properly-formatted agent outputs.
### TransformersModel
For convenience, we have added a `TransformersModel` that implements the points above by building a local `transformers` pipeline for the model_id given at initialization.
```python
from smolagents import TransformersModel
model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct")
print(model([{"role": "user", "content": [{"type": "text", "text": "Ok!"}]}], stop_sequences=["great"]))
```
```text
>>> What a
```
> [!TIP]
> You must have `transformers` and `torch` installed on your machine. Please run `pip install smolagents[transformers]` if it's not the case.
[[autodoc]] TransformersModel
### HfApiModel
The `HfApiModel` wraps huggingface_hub's [InferenceClient](https://huggingface.co/docs/huggingface_hub/main/en/guides/inference) for the execution of the LLM. It supports both HF's own [Inference API](https://huggingface.co/docs/api-inference/index) as well as all [Inference Providers](https://huggingface.co/blog/inference-providers) available on the Hub.
```python
from smolagents import HfApiModel
messages = [
{"role": "user", "content": [{"type": "text", "text": "Hello, how are you?"}]}
]
model = HfApiModel()
print(model(messages))
```
```text
>>> Of course! If you change your mind, feel free to reach out. Take care!
```
[[autodoc]] HfApiModel
### LiteLLMModel
The `LiteLLMModel` leverages [LiteLLM](https://www.litellm.ai/) to support 100+ LLMs from various providers.
You can pass kwargs upon model initialization that will then be used whenever using the model, for instance below we pass `temperature`.
```python
from smolagents import LiteLLMModel
messages = [
{"role": "user", "content": [{"type": "text", "text": "Hello, how are you?"}]}
]
model = LiteLLMModel("anthropic/claude-3-5-sonnet-latest", temperature=0.2, max_tokens=10)
print(model(messages))
```
[[autodoc]] LiteLLMModel
### OpenAIServerModel
This class lets you call any OpenAIServer compatible model.
Here's how you can set it (you can customise the `api_base` url to point to another server):
```py
import os
from smolagents import OpenAIServerModel
model = OpenAIServerModel(
model_id="gpt-4o",
api_base="https://api.openai.com/v1",
api_key=os.environ["OPENAI_API_KEY"],
)
```
[[autodoc]] OpenAIServerModel
### AzureOpenAIServerModel
`AzureOpenAIServerModel` allows you to connect to any Azure OpenAI deployment.
Below you can find an example of how to set it up, note that you can omit the `azure_endpoint`, `api_key`, and `api_version` arguments, provided you've set the corresponding environment variables -- `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_API_KEY`, and `OPENAI_API_VERSION`.
Pay attention to the lack of an `AZURE_` prefix for `OPENAI_API_VERSION`, this is due to the way the underlying [openai](https://github.com/openai/openai-python) package is designed.
```py
import os
from smolagents import AzureOpenAIServerModel
model = AzureOpenAIServerModel(
model_id = os.environ.get("AZURE_OPENAI_MODEL"),
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
api_version=os.environ.get("OPENAI_API_VERSION")
)
```
[[autodoc]] AzureOpenAIServerModel | smolagents/docs/source/en/reference/models.md/0 | {
"file_path": "smolagents/docs/source/en/reference/models.md",
"repo_id": "smolagents",
"token_count": 1797
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Tools
<Tip warning={true}>
Smolagents एक experimental API है जो किसी भी समय बदल सकता है। एजेंट्स द्वारा लौटाए गए परिणाम भिन्न हो सकते हैं क्योंकि APIs या underlying मॉडल बदलने की संभावना रखते हैं।
</Tip>
एजेंट्स और टूल्स के बारे में अधिक जानने के लिए [introductory guide](../index) पढ़ना सुनिश्चित करें।
यह पेज underlying क्लासेज के लिए API docs को शामिल करता है।
## Tools
### load_tool
[[autodoc]] load_tool
### tool
[[autodoc]] tool
### Tool
[[autodoc]] Tool
### launch_gradio_demo
[[autodoc]] launch_gradio_demo
## Default Tools
### PythonInterpreterTool
[[autodoc]] PythonInterpreterTool
### DuckDuckGoSearchTool
[[autodoc]] DuckDuckGoSearchTool
### VisitWebpageTool
[[autodoc]] VisitWebpageTool
### UserInputTool
[[autodoc]] UserInputTool
## ToolCollection
[[autodoc]] ToolCollection
## Agent टाइप्स
एजेंट्स टूल्स के बीच किसी भी प्रकार की ऑब्जेक्ट को संभाल सकते हैं; टूल्स, पूरी तरह से मल्टीमोडल होने के कारण, टेक्स्ट, इमेज, ऑडियो, वीडियो सहित अन्य प्रकारों को स्वीकार और रिटर्न कर सकते हैं।
टूल्स के बीच अनुकूलता बढ़ाने के साथ-साथ इन रिटर्न्स को ipython (jupyter, colab, ipython notebooks, ...) में सही ढंग से रेंडर करने के लिए, हम इन टाइप्स के आसपास रैपर क्लासेज को लागू करते हैं।
रैप किए गए ऑब्जेक्ट्स को प्रारंभ में जैसा व्यवहार करना चाहिए वैसा ही करना जारी रखना चाहिए; एक टेक्स्ट ऑब्जेक्ट को अभी भी स्ट्रिंग की तरह व्यवहार करना चाहिए|
एक इमेज ऑब्जेक्ट को अभी भी `PIL.Image` की तरह व्यवहार करना चाहिए।
इन टाइप्स के तीन विशिष्ट उद्देश्य हैं:
- टाइप पर `to_raw` को कॉल करने से अंतर्निहित ऑब्जेक्ट रिटर्न होना चाहिए
- टाइप पर `to_string` को कॉल करने से ऑब्जेक्ट को स्ट्रिंग के रूप में रिटर्न होना चाहिए: वह `AgentText` के मामले में स्ट्रिंग हो सकती है लेकिन अन्य उदाहरणों में ऑब्जेक्ट के सीरियलाइज्ड वर्जन का पाथ होगा
- इसे एक ipython kernel में प्रदर्शित करने पर ऑब्जेक्ट को सही ढंग से प्रदर्शित करना चाहिए
### AgentText
[[autodoc]] smolagents.agent_types.AgentText
### AgentImage
[[autodoc]] smolagents.agent_types.AgentImage
### AgentAudio
[[autodoc]] smolagents.agent_types.AgentAudio
| smolagents/docs/source/hi/reference/tools.md/0 | {
"file_path": "smolagents/docs/source/hi/reference/tools.md",
"repo_id": "smolagents",
"token_count": 2277
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# 构建好用的 agent
[[open-in-colab]]
能良好工作的 agent 和不能工作的 agent 之间,有天壤之别。
我们怎么样才能构建出属于前者的 agent 呢?
在本指南中,我们将看到构建 agent 的最佳实践。
> [!TIP]
> 如果你是 agent 构建的新手,请确保首先阅读 [agent 介绍](../conceptual_guides/intro_agents) 和 [smolagents 导览](../guided_tour)。
### 最好的 agent 系统是最简单的:尽可能简化工作流
在你的工作流中赋予 LLM 一些自主权,会引入一些错误风险。
经过良好编程的 agent 系统,通常具有良好的错误日志记录和重试机制,因此 LLM 引擎有机会自我纠错。但为了最大限度地降低 LLM 错误的风险,你应该简化你的工作流!
让我们回顾一下 [agent 介绍](../conceptual_guides/intro_agents) 中的例子:一个为冲浪旅行公司回答用户咨询的机器人。
与其让 agent 每次被问及新的冲浪地点时,都分别调用 "旅行距离 API" 和 "天气 API",你可以只创建一个统一的工具 "return_spot_information",一个同时调用这两个 API,并返回它们连接输出的函数。
这可以降低成本、延迟和错误风险!
主要的指导原则是:尽可能减少 LLM 调用的次数。
这可以带来一些启发:
- 尽可能把两个工具合并为一个,就像我们两个 API 的例子。
- 尽可能基于确定性函数,而不是 agent 决策,来实现逻辑。
### 改善流向 LLM 引擎的信息流
记住,你的 LLM 引擎就像一个 ~智能~ 机器人,被关在一个房间里,与外界唯一的交流方式是通过门缝传递的纸条。
如果你没有明确地将信息放入其提示中,它将不知道发生的任何事情。
所以首先要让你的任务非常清晰!
由于 agent 由 LLM 驱动,任务表述的微小变化可能会产生完全不同的结果。
然后,改善工具使用中流向 agent 的信息流。
需要遵循的具体指南:
- 每个工具都应该记录(只需在工具的 `forward` 方法中使用 `print` 语句)对 LLM 引擎可能有用的所有信息。
- 特别是,记录工具执行错误的详细信息会很有帮助!
例如,这里有一个根据位置和日期时间检索天气数据的工具:
首先,这是一个糟糕的版本:
```python
import datetime
from smolagents import tool
def get_weather_report_at_coordinates(coordinates, date_time):
# 虚拟函数,返回 [温度(°C),降雨风险(0-1),浪高(m)]
return [28.0, 0.35, 0.85]
def get_coordinates_from_location(location):
# 返回虚拟坐标
return [3.3, -42.0]
@tool
def get_weather_api(location: str, date_time: str) -> str:
"""
Returns the weather report.
Args:
location: the name of the place that you want the weather for.
date_time: the date and time for which you want the report.
"""
lon, lat = convert_location_to_coordinates(location)
date_time = datetime.strptime(date_time)
return str(get_weather_report_at_coordinates((lon, lat), date_time))
```
为什么它不好?
- 没有说明 `date_time` 应该使用的格式
- 没有说明位置应该如何指定
- 没有记录机制来处理明确的报错情况,如位置格式不正确或 date_time 格式不正确
- 输出格式难以理解
如果工具调用失败,内存中记录的错误跟踪,可以帮助 LLM 逆向工程工具来修复错误。但为什么要让它做这么多繁重的工作呢?
构建这个工具的更好方式如下:
```python
@tool
def get_weather_api(location: str, date_time: str) -> str:
"""
Returns the weather report.
Args:
location: the name of the place that you want the weather for. Should be a place name, followed by possibly a city name, then a country, like "Anchor Point, Taghazout, Morocco".
date_time: the date and time for which you want the report, formatted as '%m/%d/%y %H:%M:%S'.
"""
lon, lat = convert_location_to_coordinates(location)
try:
date_time = datetime.strptime(date_time)
except Exception as e:
raise ValueError("Conversion of `date_time` to datetime format failed, make sure to provide a string in format '%m/%d/%y %H:%M:%S'. Full trace:" + str(e))
temperature_celsius, risk_of_rain, wave_height = get_weather_report_at_coordinates((lon, lat), date_time)
return f"Weather report for {location}, {date_time}: Temperature will be {temperature_celsius}°C, risk of rain is {risk_of_rain*100:.0f}%, wave height is {wave_height}m."
```
一般来说,为了减轻 LLM 的负担,要问自己的好问题是:"如果我是一个第一次使用这个工具的傻瓜,使用这个工具编程并纠正自己的错误有多容易?"。
### 给 agent 更多参数
除了简单的任务描述字符串外,你还可以使用 `additional_args` 参数传递任何类型的对象:
```py
from smolagents import CodeAgent, HfApiModel
model_id = "meta-llama/Llama-3.3-70B-Instruct"
agent = CodeAgent(tools=[], model=HfApiModel(model_id=model_id), add_base_tools=True)
agent.run(
"Why does Mike not know many people in New York?",
additional_args={"mp3_sound_file_url":'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3'}
)
```
例如,你可以使用这个 `additional_args` 参数传递你希望 agent 利用的图像或字符串。
## 如何调试你的 agent
### 1. 使用更强大的 LLM
在 agent 工作流中,有些错误是实际错误,有些则是你的 LLM 引擎没有正确推理的结果。
例如,参考这个我要求创建一个汽车图片的 `CodeAgent` 的运行记录:
```text
==================================================================================================== New task ====================================================================================================
Make me a cool car picture
──────────────────────────────────────────────────────────────────────────────────────────────────── New step ─────────────────────────────────────────────────────────────────────────────────────────────────────
Agent is executing the code below: ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
image_generator(prompt="A cool, futuristic sports car with LED headlights, aerodynamic design, and vibrant color, high-res, photorealistic")
──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Last output from code snippet: ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png
Step 1:
- Time taken: 16.35 seconds
- Input tokens: 1,383
- Output tokens: 77
──────────────────────────────────────────────────────────────────────────────────────────────────── New step ─────────────────────────────────────────────────────────────────────────────────────────────────────
Agent is executing the code below: ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
final_answer("/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png")
──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Print outputs:
Last output from code snippet: ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png
Final answer:
/var/folders/6m/9b1tts6d5w960j80wbw9tx3m0000gn/T/tmpx09qfsdd/652f0007-3ee9-44e2-94ac-90dae6bb89a4.png
```
用户看到的是返回了一个路径,而不是图像。
这看起来像是系统的错误,但实际上 agent 系统并没有导致错误:只是 LLM 大脑犯了一个错误,没有把图像输出,保存到变量中。
因此,它无法再次访问图像,只能利用保存图像时记录的路径,所以它返回的是路径,而不是图像。
调试 agent 的第一步是"使用更强大的 LLM"。像 `Qwen2.5-72B-Instruct` 这样的替代方案不会犯这种错误。
### 2. 提供更多指导/更多信息
你也可以使用不太强大的模型,只要你更有效地指导它们。
站在模型的角度思考:如果你是模型在解决任务,你会因为系统提示+任务表述+工具描述中提供的信息而挣扎吗?
你需要一些额外的说明吗?
为了提供额外信息,我们不建议立即更改系统提示:默认系统提示有许多调整,除非你非常了解提示,否则你很容易翻车。
更好的指导 LLM 引擎的方法是:
- 如果是关于要解决的任务:把所有细节添加到任务中。任务可以有几百页长。
- 如果是关于如何使用工具:你的工具的 description 属性。
### 3. 更改系统提示(通常不建议)
如果上述说明不够,你可以更改系统提示。
让我们看看它是如何工作的。例如,让我们检查 [`CodeAgent`] 的默认系统提示(下面的版本通过跳过零样本示例进行了缩短)。
```python
print(agent.prompt_templates["system_prompt"])
```
你会得到:
```text
You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can.
To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.
To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use.
Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_code>' sequence.
During each intermediate step, you can use 'print()' to save whatever important information you will then need.
These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step.
In the end you have to return a final answer using the `final_answer` tool.
Here are a few examples using notional tools:
---
{examples}
Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools:
{{tool_descriptions}}
{{managed_agents_descriptions}}
Here are the rules you should always follow to solve your task:
1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_code>' sequence, else you will fail.
2. Use only variables that you have defined!
3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'.
4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block.
5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters.
6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'.
7. Never create any notional variables in our code, as having these in your logs might derail you from the true variables.
8. You can use imports in your code, but only from the following list of modules: {{authorized_imports}}
9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist.
10. Don't give up! You're in charge of solving the task, not providing directions to solve it.
Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
```
如你所见,有一些占位符,如 `"{{tool_descriptions}}"`:这些将在 agent 初始化时用于插入某些自动生成的工具或管理 agent 的描述。
因此,虽然你可以通过将自定义提示作为参数传递给 `system_prompt` 参数来覆盖此系统提示模板,但你的新系统提示必须包含以下占位符:
- `"{{tool_descriptions}}"` 用于插入工具描述。
- `"{{managed_agents_description}}"` 用于插入 managed agent 的描述(如果有)。
- 仅限 `CodeAgent`:`"{{authorized_imports}}"` 用于插入授权导入列表。
然后你可以根据如下,更改系统提示:
```py
from smolagents.prompts import CODE_SYSTEM_PROMPT
modified_system_prompt = CODE_SYSTEM_PROMPT + "\nHere you go!" # 在此更改系统提示
agent = CodeAgent(
tools=[],
model=HfApiModel(),
system_prompt=modified_system_prompt
)
```
这也适用于 [`ToolCallingAgent`]。
### 4. 额外规划
我们提供了一个用于补充规划步骤的模型,agent 可以在正常操作步骤之间定期运行。在此步骤中,没有工具调用,LLM 只是被要求更新它知道的事实列表,并根据这些事实反推它应该采取的下一步。
```py
from smolagents import load_tool, CodeAgent, HfApiModel, DuckDuckGoSearchTool
from dotenv import load_dotenv
load_dotenv()
# 从 Hub 导入工具
image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True)
search_tool = DuckDuckGoSearchTool()
agent = CodeAgent(
tools=[search_tool],
model=HfApiModel("Qwen/Qwen2.5-72B-Instruct"),
planning_interval=3 # 这是你激活规划的地方!
)
# 运行它!
result = agent.run(
"How long would a cheetah at full speed take to run the length of Pont Alexandre III?",
)
``` | smolagents/docs/source/zh/tutorials/building_good_agents.md/0 | {
"file_path": "smolagents/docs/source/zh/tutorials/building_good_agents.md",
"repo_id": "smolagents",
"token_count": 6802
} |
import re
import string
import warnings
def normalize_number_str(number_str: str) -> float:
# we replace these common units and commas to allow
# conversion to float
for char in ["$", "%", ","]:
number_str = number_str.replace(char, "")
try:
return float(number_str)
except ValueError:
print(f"String {number_str} cannot be normalized to number str.")
return float("inf")
def split_string(
s: str,
char_list: list[str] = [",", ";"],
) -> list[str]:
pattern = f"[{''.join(char_list)}]"
return re.split(pattern, s)
def is_float(element: any) -> bool:
try:
float(element)
return True
except ValueError:
return False
def question_scorer(
model_answer: str,
ground_truth: str,
) -> bool:
# if gt is a number
if is_float(ground_truth):
normalized_answer = normalize_number_str(str(model_answer))
return normalized_answer == float(ground_truth)
# if gt is a list
elif any(char in ground_truth for char in [",", ";"]):
# question with the fish: normalization removes punct
gt_elems = split_string(ground_truth)
ma_elems = split_string(model_answer)
# check length is the same
if len(gt_elems) != len(ma_elems):
warnings.warn("Answer lists have different lengths, returning False.", UserWarning)
return False
# compare each element as float or str
comparisons = []
for ma_elem, gt_elem in zip(ma_elems, gt_elems):
if is_float(gt_elem):
normalized_ma_elem = normalize_number_str(ma_elem)
comparisons.append(normalized_ma_elem == float(gt_elem))
else:
# we do not remove punct since comparisons can include punct
comparisons.append(
normalize_str(ma_elem, remove_punct=False) == normalize_str(gt_elem, remove_punct=False)
)
return all(comparisons)
# if gt is a str
else:
return normalize_str(model_answer) == normalize_str(ground_truth)
def check_prediction_contains_answer_letters_in_order(prediction, true_answer):
prediction = prediction.lower()
true_answer = true_answer.lower()
if len(prediction) > len(true_answer) * 3:
return False
i = 0
for letter in true_answer:
if letter in prediction[i:]:
i += prediction[i:].index(letter)
else:
return False
return True
def check_close_call(prediction, true_answer, is_correct):
if is_correct:
return True
else:
if is_float(true_answer):
return is_correct
else:
if (
check_prediction_contains_answer_letters_in_order(str(prediction), str(true_answer))
and len(str(true_answer)) * 0.5 <= len(str(prediction)) <= len(str(true_answer)) * 2
):
print(f"Close call: {prediction} vs {true_answer}")
return True
else:
return False
def normalize_str(input_str, remove_punct=True) -> str:
"""
Normalize a string by:
- Removing all white spaces
- Optionally removing punctuation (if remove_punct is True)
- Converting to lowercase
Parameters:
- input_str: str, the string to normalize
- remove_punct: bool, whether to remove punctuation (default: True)
Returns:
- str, the normalized string
"""
# Remove all white spaces. Required e.g for seagull vs. sea gull
no_spaces = re.sub(r"\s", "", input_str)
# Remove punctuation, if specified.
if remove_punct:
translator = str.maketrans("", "", string.punctuation)
return no_spaces.lower().translate(translator)
else:
return no_spaces.lower()
| smolagents/examples/open_deep_research/scripts/gaia_scorer.py/0 | {
"file_path": "smolagents/examples/open_deep_research/scripts/gaia_scorer.py",
"repo_id": "smolagents",
"token_count": 1643
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from dotenv import load_dotenv
from smolagents import CodeAgent, HfApiModel, LiteLLMModel, Model, OpenAIServerModel, Tool, TransformersModel
from smolagents.default_tools import TOOL_MAPPING
leopard_prompt = "How many seconds would it take for a leopard at full speed to run through Pont des Arts?"
def parse_arguments(description):
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"prompt",
type=str,
nargs="?", # Makes it optional
default=leopard_prompt,
help="The prompt to run with the agent",
)
parser.add_argument(
"--model-type",
type=str,
default="HfApiModel",
help="The model type to use (e.g., HfApiModel, OpenAIServerModel, LiteLLMModel, TransformersModel)",
)
parser.add_argument(
"--model-id",
type=str,
default="Qwen/Qwen2.5-Coder-32B-Instruct",
help="The model ID to use for the specified model type",
)
parser.add_argument(
"--imports",
nargs="*", # accepts zero or more arguments
default=[],
help="Space-separated list of imports to authorize (e.g., 'numpy pandas')",
)
parser.add_argument(
"--tools",
nargs="*",
default=["web_search"],
help="Space-separated list of tools that the agent can use (e.g., 'tool1 tool2 tool3')",
)
parser.add_argument(
"--verbosity-level",
type=int,
default=1,
help="The verbosity level, as an int in [0, 1, 2].",
)
return parser.parse_args()
def load_model(model_type: str, model_id: str) -> Model:
if model_type == "OpenAIServerModel":
return OpenAIServerModel(
api_key=os.getenv("FIREWORKS_API_KEY"),
api_base="https://api.fireworks.ai/inference/v1",
model_id=model_id,
)
elif model_type == "LiteLLMModel":
return LiteLLMModel(
model_id=model_id,
api_key=os.getenv("OPENAI_API_KEY"),
)
elif model_type == "TransformersModel":
return TransformersModel(model_id=model_id, device_map="auto", flatten_messages_as_text=False)
elif model_type == "HfApiModel":
return HfApiModel(
token=os.getenv("HF_API_KEY"),
model_id=model_id,
)
else:
raise ValueError(f"Unsupported model type: {model_type}")
def main():
load_dotenv()
args = parse_arguments(description="Run a CodeAgent with all specified parameters")
model = load_model(args.model_type, args.model_id)
available_tools = []
for tool_name in args.tools:
if "/" in tool_name:
available_tools.append(Tool.from_space(tool_name))
else:
if tool_name in TOOL_MAPPING:
available_tools.append(TOOL_MAPPING[tool_name]())
else:
raise ValueError(f"Tool {tool_name} is not recognized either as a default tool or a Space.")
print(f"Running agent with these tools: {args.tools}")
agent = CodeAgent(tools=available_tools, model=model, additional_authorized_imports=args.imports)
agent.run(args.prompt)
if __name__ == "__main__":
main()
| smolagents/src/smolagents/cli.py/0 | {
"file_path": "smolagents/src/smolagents/cli.py",
"repo_id": "smolagents",
"token_count": 1576
} |
# coding=utf-8
# Copyright 2025-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check that all tests are called in CI."""
from pathlib import Path
ROOT = Path(__file__).parent.parent
TESTS_FOLDER = ROOT / "tests"
CI_WORKFLOW_FILE = ROOT / ".github" / "workflows" / "tests.yml"
def check_tests_in_ci():
"""List all test files in `./tests/` and check if they are listed in the CI workflow.
Since each test file is triggered separately in the CI workflow, it is easy to forget a new one when adding new
tests, hence this check.
NOTE: current implementation is quite naive but should work for now. Must be updated if one want to ignore some
tests or if file naming is updated (currently only files starting by `test_*` are checked)
"""
test_files = [
path.relative_to(TESTS_FOLDER).as_posix()
for path in TESTS_FOLDER.glob("**/*.py")
if path.name.startswith("test_")
]
ci_workflow_file_content = CI_WORKFLOW_FILE.read_text()
missing_test_files = [test_file for test_file in test_files if test_file not in ci_workflow_file_content]
if missing_test_files:
print(
"❌ Some test files seem to be ignored in the CI:\n"
+ "\n".join(f" - {test_file}" for test_file in missing_test_files)
+ f"\n Please add them manually in {CI_WORKFLOW_FILE}."
)
exit(1)
else:
print("✅ All good!")
exit(0)
if __name__ == "__main__":
check_tests_in_ci()
| smolagents/utils/check_tests_in_ci.py/0 | {
"file_path": "smolagents/utils/check_tests_in_ci.py",
"repo_id": "smolagents",
"token_count": 729
} |
# Text Generation Inference - TensorRT-LLM Backend Implementation
## Description
This folder provides the sources of the TensorRT-LLM backend implementation powered by TensorRT-LLM Executor new API
## Simplified Request Sequence
```mermaid
sequenceDiagram
actor User
participant TextGenerationInference.HttpServer
participant TextGenerationInference.TensorRtLlmBackend
participant TextGenerationInference.TensorRtLlmWorkerThread
participant TensorRtLlm.Executor
participant Nvidia.Gpu
User ->> TextGenerationInference.HttpServer: POST /generate
TextGenerationInference.HttpServer ->> TextGenerationInference.TensorRtLlmBackend: Validate and forward inputs & parameters
TextGenerationInference.TensorRtLlmBackend ->> TextGenerationInference.TensorRtLlmWorkerThread: Allocate a new context and spawn a new thread to handle the request
TextGenerationInference.TensorRtLlmWorkerThread ->> TensorRtLlm.Executor: Submit the request to the In-Flight Batcher
activate Nvidia.Gpu
TensorRtLlm.Executor ->> Nvidia.Gpu: Add the request to the poll for execution
TensorRtLlm.Executor -->> TextGenerationInference.TensorRtLlmWorkerThread: Response with an unique request identifier
rect rgb(10, 92, 54)
loop every 100us
rect rgb(15, 81, 50)
alt Acquire lock to query executor
TextGenerationInference.TensorRtLlmWorkerThread ->> TensorRtLlm.Executor: Poll request number of new token(s) generated
else There are new generated tokens
TextGenerationInference.TensorRtLlmWorkerThread ->> TensorRtLlm.Executor: Retrieve newly generated tokens
TensorRtLlm.Executor -->> TextGenerationInference.TensorRtLlmWorkerThread: Return decoded token information and potential error (omitted)
rect rgb(11, 110, 79)
alt Generated token is final
TensorRtLlm.Executor ->> Nvidia.Gpu: Remove request from the scheduler and from the GPU
TextGenerationInference.TensorRtLlmWorkerThread -->> User: Stream the remaining decoded tokens and flush the connection
else Generated token is not final
TextGenerationInference.TensorRtLlmWorkerThread -->> User: Stream token back to the user as they get decoded
end
end
end
end
deactivate Nvidia.Gpu
end
end
```
| text-generation-inference/backends/trtllm/README.md/0 | {
"file_path": "text-generation-inference/backends/trtllm/README.md",
"repo_id": "text-generation-inference",
"token_count": 1019
} |
///
/// Extract the first line of the provided string reference.
/// If there is no lines in the buffer, it returns a string
/// which content is defined by the content of `fail`
/// # Arguments
///
/// * `s`: The string buffer to extract the first-line from
/// * `fail`: A string content which is returned if no lines are
/// present in `s`
///
/// returns: String
///
/// # Examples
///
/// ```
/// let s = "My name is Morgan.\n I'm working at Hugging Face.";
/// first_line(s, "No line in string");
/// ```
#[inline]
pub(crate) fn first_line(s: &str, fail: &str) -> String {
s.lines().next().unwrap_or(fail).to_string()
}
| text-generation-inference/backends/trtllm/src/utils.rs/0 | {
"file_path": "text-generation-inference/backends/trtllm/src/utils.rs",
"repo_id": "text-generation-inference",
"token_count": 201
} |
use std::sync::Arc;
use tokio::sync::{mpsc, oneshot};
use crate::radix::RadixAllocator;
#[derive(Debug, Clone)]
pub struct BlockAllocation {
pub allocation_id: u64,
pub blocks: Vec<u32>,
pub slots: Vec<u32>,
/// Prefix that was cached and for which the KV does not have to
/// be recomputed.
pub prefix_len: u32,
pub(crate) block_allocator: Option<BlockAllocator>,
}
impl Drop for BlockAllocation {
fn drop(&mut self) {
if let Some(block_allocator) = self.block_allocator.as_mut() {
block_allocator.free(self.blocks.clone(), self.allocation_id)
}
}
}
#[derive(Debug, Clone)]
pub struct BlockAllocator {
/// Channel to communicate with the background task
block_allocator: mpsc::UnboundedSender<BlockAllocatorCommand>,
}
impl BlockAllocator {
pub(crate) fn new(
max_batch_total_tokens: u32,
block_size: u32,
prefix_caching: bool,
window_size: Option<u32>,
) -> Self {
// Create channel
let (sender, receiver) = mpsc::unbounded_channel();
// Launch background queue task
tokio::spawn(block_allocator_task(
max_batch_total_tokens / block_size,
block_size,
prefix_caching,
window_size,
receiver,
));
Self {
block_allocator: sender,
}
}
pub(crate) async fn allocate(
&self,
tokens: u32,
prefill_tokens: Option<Arc<Vec<u32>>>,
) -> Option<BlockAllocation> {
let (response_sender, response_receiver) = oneshot::channel();
self.block_allocator
.send(BlockAllocatorCommand::Allocate {
tokens,
prefill_tokens,
response_sender,
})
.unwrap();
response_receiver.await.unwrap().map(|mut allocation| {
allocation.block_allocator = Some(self.clone());
allocation
})
}
pub(crate) fn free(&self, blocks: Vec<u32>, allocation_id: u64) {
self.block_allocator
.send(BlockAllocatorCommand::Free {
allocation_id,
blocks,
})
.unwrap();
}
}
async fn block_allocator_task(
blocks: u32,
block_size: u32,
prefix_caching: bool,
window_size: Option<u32>,
mut receiver: mpsc::UnboundedReceiver<BlockAllocatorCommand>,
) {
let mut allocator: Box<dyn Allocator + Send> = if prefix_caching {
Box::new(RadixAllocator::new(block_size, blocks, window_size))
} else {
Box::new(SimpleAllocator::new(blocks, block_size, window_size))
};
while let Some(cmd) = receiver.recv().await {
match cmd {
BlockAllocatorCommand::Free {
blocks,
allocation_id,
} => allocator.free(blocks, allocation_id),
BlockAllocatorCommand::Allocate {
tokens,
prefill_tokens,
response_sender,
} => {
response_sender
.send(allocator.allocate(tokens, prefill_tokens))
.unwrap();
}
}
}
}
#[derive(Debug)]
enum BlockAllocatorCommand {
Free {
blocks: Vec<u32>,
allocation_id: u64,
},
Allocate {
tokens: u32,
prefill_tokens: Option<Arc<Vec<u32>>>,
response_sender: oneshot::Sender<Option<BlockAllocation>>,
},
}
pub trait Allocator {
fn allocate(
&mut self,
tokens: u32,
prefill_tokens: Option<Arc<Vec<u32>>>,
) -> Option<BlockAllocation>;
fn free(&mut self, blocks: Vec<u32>, allocation_id: u64);
}
pub struct SimpleAllocator {
free_blocks: Vec<u32>,
block_size: u32,
window_size: Option<u32>,
}
impl SimpleAllocator {
fn new(blocks: u32, block_size: u32, window_size: Option<u32>) -> Self {
SimpleAllocator {
block_size,
// Block 0 is reserved for health checks
free_blocks: (1..blocks).collect(),
window_size,
}
}
}
impl Allocator for SimpleAllocator {
fn allocate(
&mut self,
tokens: u32,
_prefill_tokens: Option<Arc<Vec<u32>>>,
) -> Option<BlockAllocation> {
// Apply window size
let (required_blocks, repeats) = {
let (tokens, repeats) = match self.window_size {
None => (tokens, 1),
Some(window_size) => {
let repeats = tokens.div_ceil(window_size);
let tokens = core::cmp::min(tokens, window_size);
(tokens, repeats as usize)
}
};
// Pad to a multiple of block size
let required_blocks = tokens.div_ceil(self.block_size);
(required_blocks, repeats)
};
let tokens = tokens as usize;
if required_blocks > self.free_blocks.len() as u32 {
None
} else {
let blocks = self
.free_blocks
.split_off(self.free_blocks.len() - required_blocks as usize);
let mut slots =
Vec::with_capacity((required_blocks * self.block_size * repeats as u32) as usize);
'slots: for block_id in blocks.repeat(repeats).iter() {
for s in (block_id * self.block_size)..((block_id + 1) * self.block_size) {
slots.push(s);
if slots.len() == tokens {
break 'slots;
}
}
}
Some(BlockAllocation {
allocation_id: 0,
blocks,
slots,
prefix_len: 0,
block_allocator: None,
})
}
}
fn free(&mut self, blocks: Vec<u32>, _allocation_id: u64) {
self.free_blocks.extend(blocks)
}
}
| text-generation-inference/backends/v3/src/block_allocator.rs/0 | {
"file_path": "text-generation-inference/backends/v3/src/block_allocator.rs",
"repo_id": "text-generation-inference",
"token_count": 3008
} |
/// MIT License
//
// Copyright (c) 2020 hatoo
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
use std::collections::BTreeMap;
pub(crate) fn histogram(values: &[f64], bins: usize) -> Vec<(f64, usize)> {
assert!(bins >= 2);
let mut bucket: Vec<usize> = vec![0; bins];
let min = values.iter().collect::<average::Min>().min();
let max = values.iter().collect::<average::Max>().max();
let step = (max - min) / (bins - 1) as f64;
for &v in values {
let i = std::cmp::min(((v - min) / step).ceil() as usize, bins - 1);
bucket[i] += 1;
}
bucket
.into_iter()
.enumerate()
.map(|(i, v)| (min + step * i as f64, v))
.collect()
}
pub(crate) fn percentiles(values: &[f64], pecents: &[i32]) -> BTreeMap<String, f64> {
pecents
.iter()
.map(|&p| {
let i = (f64::from(p) / 100.0 * values.len() as f64) as usize;
(format!("p{p}"), *values.get(i).unwrap_or(&f64::NAN))
})
.collect()
}
| text-generation-inference/benchmark/src/utils.rs/0 | {
"file_path": "text-generation-inference/benchmark/src/utils.rs",
"repo_id": "text-generation-inference",
"token_count": 598
} |
# Vision Language Model Inference in TGI
Visual Language Model (VLM) are models that consume both image and text inputs to generate text.
VLM's are trained on a combination of image and text data and can handle a wide range of tasks, such as image captioning, visual question answering, and visual dialog.
> What distinguishes VLMs from other text and image models is their ability to handle long context and generate text that is coherent and relevant to the image even after multiple turns or in some cases, multiple images.
Below are couple of common use cases for vision language models:
- **Image Captioning**: Given an image, generate a caption that describes the image.
- **Visual Question Answering (VQA)**: Given an image and a question about the image, generate an answer to the question.
- **Mulimodal Dialog**: Generate response to multiple turns of images and conversations.
- **Image Information Retrieval**: Given an image, retrieve information from the image.
## How to Use a Vision Language Model?
### Hugging Face Hub Python Library
To infer with vision language models through Python, you can use the [`huggingface_hub`](https://pypi.org/project/huggingface-hub/) library. The `InferenceClient` class provides a simple way to interact with the [Inference API](https://huggingface.co/docs/api-inference/index). Images can be passed as URLs or base64-encoded strings. The `InferenceClient` will automatically detect the image format.
```python
from huggingface_hub import InferenceClient
client = InferenceClient("http://127.0.0.1:3000")
image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"
prompt = f"What is this a picture of?\n\n"
for token in client.text_generation(prompt, max_new_tokens=16, stream=True):
print(token)
# This is a picture of an anthropomorphic rabbit in a space suit.
```
```python
from huggingface_hub import InferenceClient
import base64
import requests
import io
client = InferenceClient("http://127.0.0.1:3000")
# read image from local file
image_path = "rabbit.png"
with open(image_path, "rb") as f:
image = base64.b64encode(f.read()).decode("utf-8")
image = f"data:image/png;base64,{image}"
prompt = f"What is this a picture of?\n\n"
for token in client.text_generation(prompt, max_new_tokens=10, stream=True):
print(token)
# This is a picture of an anthropomorphic rabbit in a space suit.
```
or via the `chat_completion` endpoint:
```python
from huggingface_hub import InferenceClient
client = InferenceClient("http://127.0.0.1:3000")
chat = client.chat_completion(
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Whats in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"
},
},
],
},
],
seed=42,
max_tokens=100,
)
print(chat)
# ChatCompletionOutput(choices=[ChatCompletionOutputComplete(finish_reason='length', index=0, message=ChatCompletionOutputMessage(role='assistant', content=" The image you've provided features an anthropomorphic rabbit in spacesuit attire. This rabbit is depicted with human-like posture and movement, standing on a rocky terrain with a vast, reddish-brown landscape in the background. The spacesuit is detailed with mission patches, circuitry, and a helmet that covers the rabbit's face and ear, with an illuminated red light on the chest area.\n\nThe artwork style is that of a", name=None, tool_calls=None), logprobs=None)], created=1714589614, id='', model='llava-hf/llava-v1.6-mistral-7b-hf', object='text_completion', system_fingerprint='2.0.2-native', usage=ChatCompletionOutputUsage(completion_tokens=100, prompt_tokens=2943, total_tokens=3043))
```
or with OpenAI's [client library](https://github.com/openai/openai-python):
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(base_url="http://localhost:3000/v1", api_key="-")
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Whats in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"
},
},
],
},
],
stream=False,
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason='eos_token', index=0, logprobs=None, message=ChatCompletionMessage(content=' The image depicts an anthropomorphic rabbit dressed in a space suit with gear that resembles NASA attire. The setting appears to be a solar eclipse with dramatic mountain peaks and a partial celestial body in the sky. The artwork is detailed and vivid, with a warm color palette and a sense of an adventurous bunny exploring or preparing for a journey beyond Earth. ', role='assistant', function_call=None, tool_calls=None))], created=1714589732, model='llava-hf/llava-v1.6-mistral-7b-hf', object='text_completion', system_fingerprint='2.0.2-native', usage=CompletionUsage(completion_tokens=84, prompt_tokens=2943, total_tokens=3027))
```
### Inference Through Sending `cURL` Requests
To use the `generate_stream` endpoint with curl, you can add the `-N` flag. This flag disables curl default buffering and shows data as it arrives from the server.
```bash
curl -N 127.0.0.1:3000/generate_stream \
-X POST \
-d '{"inputs":"What is this a picture of?\n\n","parameters":{"max_new_tokens":16, "seed": 42}}' \
-H 'Content-Type: application/json'
# ...
# data:{"index":16,"token":{"id":28723,"text":".","logprob":-0.6196289,"special":false},"generated_text":"This is a picture of an anthropomorphic rabbit in a space suit.","details":null}
```
### Inference Through JavaScript
First, we need to install the `@huggingface/inference` library.
```bash
npm install @huggingface/inference
```
If you're using the free Inference API, you can use [Huggingface.js](https://huggingface.co/docs/huggingface.js/inference/README)'s `HfInference`. If you're using inference endpoints, you can use `HfInferenceEndpoint` class to easily interact with the Inference API.
We can create a `HfInferenceEndpoint` providing our endpoint URL and We can create a `HfInferenceEndpoint` providing our endpoint URL and [Hugging Face access token](https://huggingface.co/settings/tokens).
```js
import { HfInferenceEndpoint } from "@huggingface/inference";
const hf = new HfInferenceEndpoint("http://127.0.0.1:3000", "HF_TOKEN");
const prompt =
"What is this a picture of?\n\n";
const stream = hf.textGenerationStream({
inputs: prompt,
parameters: { max_new_tokens: 16, seed: 42 },
});
for await (const r of stream) {
// yield the generated token
process.stdout.write(r.token.text);
}
// This is a picture of an anthropomorphic rabbit in a space suit.
```
## Combining Vision Language Models with Other Features
VLMs in TGI have several advantages, for example these models can be used in tandem with other features for more complex tasks. For example, you can use VLMs with [Guided Generation](/docs/conceptual/guided-generation) to generate specific JSON data from an image.
<div class="flex justify-center">
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"
width="400"
/>
</div>
For example we can extract information from the rabbit image and generate a JSON object with the location, activity, number of animals seen, and the animals seen. That would look like this:
```json
{
"activity": "Standing",
"animals": ["Rabbit"],
"animals_seen": 1,
"location": "Rocky surface with mountains in the background and a red light on the rabbit's chest"
}
```
All we need to do is provide a JSON schema to the VLM model and it will generate the JSON object for us.
```bash
curl localhost:3000/generate \
-X POST \
-H 'Content-Type: application/json' \
-d '{
"inputs":"What is this a picture of?\n\n",
"parameters": {
"max_new_tokens": 100,
"seed": 42,
"grammar": {
"type": "json",
"value": {
"properties": {
"location": {
"type": "string"
},
"activity": {
"type": "string"
},
"animals_seen": {
"type": "integer",
"minimum": 1,
"maximum": 5
},
"animals": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": ["location", "activity", "animals_seen", "animals"]
}
}
}
}'
# {
# "generated_text": "{ \"activity\": \"Standing\", \"animals\": [ \"Rabbit\" ], \"animals_seen\": 1, \"location\": \"Rocky surface with mountains in the background and a red light on the rabbit's chest\" }"
# }
```
Want to learn more about how Vision Language Models work? Check out the [awesome blog post on the topic](https://huggingface.co/blog/vlms).
| text-generation-inference/docs/source/basic_tutorials/visual_language_models.md/0 | {
"file_path": "text-generation-inference/docs/source/basic_tutorials/visual_language_models.md",
"repo_id": "text-generation-inference",
"token_count": 3724
} |
import os
import json
for root, dirs, files in os.walk("."):
for filename in files:
if filename.endswith(".json"):
with open(os.path.join(root, filename), "r") as f:
data = json.load(f)
print(os.path.join(root, filename))
try:
if filename.endswith("_load.json"):
for i in range(len(data)):
data[i]["details"]["prefill"] = []
else:
data["details"]["prefill"] = []
except Exception:
pass
with open(os.path.join(root, filename), "w") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
| text-generation-inference/integration-tests/models/__snapshots__/test.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test.py",
"repo_id": "text-generation-inference",
"token_count": 388
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 28747,
"logprob": -0.54785156,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -1.4091797,
"special": false,
"text": " Let"
},
{
"id": 307,
"logprob": -3.0273438,
"special": false,
"text": " n"
},
{
"id": 327,
"logprob": -0.94433594,
"special": false,
"text": " ="
},
{
"id": 28705,
"logprob": -0.81347656,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.2958984,
"special": false,
"text": "1"
},
{
"id": 28734,
"logprob": -2.0644531,
"special": false,
"text": "0"
},
{
"id": 387,
"logprob": -1.9580078,
"special": false,
"text": " -"
},
{
"id": 28705,
"logprob": -0.5073242,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.1816406,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": ": Let n = 10 - 1"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json",
"repo_id": "text-generation-inference",
"token_count": 865
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 2,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 54901,
"logprob": -0.84765625,
"special": false,
"text": "beach"
},
{
"id": 1,
"logprob": -0.008666992,
"special": true,
"text": "<eos>"
}
],
"top_tokens": null
},
"generated_text": "beach"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma.json",
"repo_id": "text-generation-inference",
"token_count": 266
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 1241,
"logprob": -0.9863281,
"special": false,
"text": "():"
},
{
"id": 258,
"logprob": -0.21447754,
"special": false,
"text": "\n "
},
{
"id": 942,
"logprob": -0.43701172,
"special": false,
"text": " print"
},
{
"id": 372,
"logprob": -0.5361328,
"special": false,
"text": "(\""
},
{
"id": 7371,
"logprob": -0.44555664,
"special": false,
"text": "Hello"
},
{
"id": 9956,
"logprob": -1.2412109,
"special": false,
"text": " World"
},
{
"id": 8657,
"logprob": -0.7583008,
"special": false,
"text": "!\")"
},
{
"id": 185,
"logprob": -0.76171875,
"special": false,
"text": "\n"
},
{
"id": 185,
"logprob": -0.20837402,
"special": false,
"text": "\n"
},
{
"id": 1018,
"logprob": -1.2470703,
"special": false,
"text": "print"
}
]
},
"generated_text": "():\n print(\"Hello World!\")\n\nprint"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json",
"repo_id": "text-generation-inference",
"token_count": 866
} |
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "{ \"unit\": \"fahrenheit\", \"temperature\": [ 72, 79, 88 ] }",
"role": "assistant"
}
}
],
"created": 1732525803,
"id": "",
"model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
"object": "chat.completion",
"system_fingerprint": "2.4.1-dev0-native",
"usage": {
"completion_tokens": 29,
"prompt_tokens": 136,
"total_tokens": 165
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_grammar_response_format_llama/test_grammar_response_format_llama_json.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_grammar_response_format_llama/test_grammar_response_format_llama_json.json",
"repo_id": "text-generation-inference",
"token_count": 255
} |
import pytest
@pytest.fixture(scope="module")
def flash_llama_chat_handle(launcher):
with launcher(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=2, disable_grammar_support=False
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_chat(flash_llama_chat_handle):
await flash_llama_chat_handle.health(300)
return flash_llama_chat_handle.client
@pytest.mark.private
async def test_flash_llama_simple(flash_llama_chat, response_snapshot):
response = await flash_llama_chat.chat(
max_tokens=100,
seed=1,
messages=[
{
"role": "system",
"content": "Youre a helpful assistant! Answer the users question best you can.",
},
{
"role": "user",
"content": "What is the weather like in Brooklyn, New York?",
},
],
)
print(repr(response.choices[0].message.content))
assert (
response.choices[0].message.content
== "As of your last question, the weather in Brooklyn, New York, is typically hot and humid throughout the year. The suburbs around New York City are jealously sheltered, and at least in the Lower Bronx, there are very few outdoor environments to appreciate nature.\n\nIn terms of temperature, the warmest times of the year are from June to August, when average high temperatures typically range from around 73°F or 23°C"
)
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_chat_llama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_chat_llama.py",
"repo_id": "text-generation-inference",
"token_count": 594
} |
import pytest
import json
from text_generation.types import GrammarType
@pytest.fixture(scope="module")
def flash_llama_grammar_handle(launcher):
with launcher(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=2, disable_grammar_support=False
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_grammar(flash_llama_grammar_handle):
await flash_llama_grammar_handle.health(300)
return flash_llama_grammar_handle.client
@pytest.mark.asyncio
async def test_flash_llama_grammar(flash_llama_grammar, response_snapshot):
response = await flash_llama_grammar.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_llama_grammar_regex(flash_llama_grammar, response_snapshot):
response = await flash_llama_grammar.generate(
"Whats Googles DNS",
max_new_tokens=10,
decoder_input_details=True,
seed=0,
grammar={
"type": GrammarType.Regex, # "regex"
"value": "((25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)",
},
)
assert response.details.generated_tokens == 10
assert response.generated_text == "42.1.1.101"
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_llama_grammar_json(flash_llama_grammar, response_snapshot):
response = await flash_llama_grammar.generate(
"info: david holtz like trees and has two cats. ",
max_new_tokens=100,
decoder_input_details=True,
seed=0,
grammar={
"type": GrammarType.Json, # "json"
"value": json.dumps(
{
"type": "object",
"$id": "https://example.com/person.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Person",
"properties": {
"firstName": {
"type": "string",
"description": "The person'''s first name.",
},
"lastName": {
"type": "string",
"description": "The person'''s last name.",
},
"hobby": {
"description": "The person'''s hobby.",
"type": "string",
},
"numCats": {
"description": "The number of cats the person has.",
"type": "integer",
"minimum": 0,
},
},
"required": ["firstName", "lastName", "hobby", "numCats"],
}
),
},
)
assert response.details.generated_tokens == 30
assert (
response.generated_text
== '{"firstName":"David","hobby":"Trees","lastName":"Holtz","numCats":2}'
)
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_llama_grammar_load(
flash_llama_grammar, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_grammar,
"name: david. email: ",
max_new_tokens=10,
n=4,
stop_sequences=[".com"],
seed=0,
grammar={
"type": GrammarType.Regex, # "regex"
"value": "[\\w-]+@([\\w-]+\\.)+[\\w-]+", # email regex
},
)
assert len(responses) == 4
expected = "[email protected]"
for response in responses:
assert response.generated_text == expected
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
# this is the same as the above test, but only fires off a single request
# this is only to ensure that the parallel and single inference produce the same result
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_llama_grammar_single_load_instance(
flash_llama_grammar, generate_load, response_snapshot
):
response = await flash_llama_grammar.generate(
"name: david. email: ",
max_new_tokens=10,
stop_sequences=[".com"],
seed=0,
grammar={
"type": GrammarType.Regex, # "regex"
"value": "[\\w-]+@([\\w-]+\\.)+[\\w-]+", # email regex
},
)
# assert response.details.generated_tokens == 30
assert response.generated_text == "[email protected]"
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_grammar_llama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_grammar_llama.py",
"repo_id": "text-generation-inference",
"token_count": 2366
} |
import pytest
@pytest.fixture(scope="module")
def flash_neox_sharded_handle(launcher):
with launcher("OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_neox_sharded(flash_neox_sharded_handle):
await flash_neox_sharded_handle.health(300)
return flash_neox_sharded_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_neox(flash_neox_sharded, response_snapshot):
response = await flash_neox_sharded.generate(
"<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_neox_load(flash_neox_sharded, generate_load, response_snapshot):
responses = await generate_load(
flash_neox_sharded,
"<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_neox_sharded.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_neox_sharded.py",
"repo_id": "text-generation-inference",
"token_count": 507
} |
import pytest
@pytest.fixture(scope="module")
def flash_idefics3_next_handle(launcher):
with launcher("HuggingFaceM4/Idefics3-8B-Llama3") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_idefics3_next(flash_idefics3_next_handle):
await flash_idefics3_next_handle.health(300)
return flash_idefics3_next_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics3_next_simple_url(flash_idefics3_next, response_snapshot):
ny_skyline = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
query = "What is in this image?"
response = await flash_idefics3_next.generate(
f"<|begin_of_text|><|begin_of_text|>User:{query}<end_of_utterance>\nAssistant:",
max_new_tokens=10,
seed=1337,
)
print(response)
assert (
response.generated_text == " There is a statue in the image."
), f"{repr(response.generated_text)}"
assert response.details.generated_tokens == 9
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_idefics3.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_idefics3.py",
"repo_id": "text-generation-inference",
"token_count": 454
} |
{
buildPythonPackage,
poetry-core,
huggingface-hub,
pydantic,
}:
buildPythonPackage {
name = "text-generation";
src = ../clients/python;
pyproject = true;
build-system = [ poetry-core ];
dependencies = [
huggingface-hub
pydantic
];
}
| text-generation-inference/nix/client.nix/0 | {
"file_path": "text-generation-inference/nix/client.nix",
"repo_id": "text-generation-inference",
"token_count": 98
} |
/// Text Generation Inference Webserver
pub mod config;
pub mod infer;
pub mod server;
pub mod validation;
#[cfg(feature = "kserve")]
mod kserve;
pub mod logging;
mod sagemaker;
pub mod usage_stats;
mod vertex;
use crate::infer::tool_grammar::ToolGrammar;
use crate::infer::{Infer, InferError};
use pyo3::prelude::*;
use pyo3::types::IntoPyDict;
use serde::{Deserialize, Serialize};
use tokenizers::Encoding;
use tracing::warn;
use utoipa::ToSchema;
use validation::Validation;
#[allow(clippy::large_enum_variant)]
#[derive(Clone)]
pub enum Tokenizer {
Python {
tokenizer_name: String,
revision: Option<String>,
trust_remote_code: bool,
},
Rust(tokenizers::Tokenizer),
}
pub struct PyTokenizer<'a>(pyo3::Bound<'a, pyo3::PyAny>);
impl<'a> PyTokenizer<'a> {
fn from_py(
py: Python<'a>,
tokenizer_name: String,
revision: Option<String>,
trust_remote_code: bool,
) -> PyResult<PyTokenizer<'a>> {
let transformers = py.import_bound("transformers")?;
let auto = transformers.getattr("AutoTokenizer")?;
let from_pretrained = auto.getattr("from_pretrained")?;
let args = (tokenizer_name,);
let kwargs = if let Some(rev) = &revision {
[
("revision", rev.to_string().into_py(py)),
("trust_remote_code", trust_remote_code.into_py(py)),
]
.into_py_dict_bound(py)
} else {
[("trust_remote_code", trust_remote_code.into_py(py))].into_py_dict_bound(py)
};
let tokenizer = from_pretrained.call(args, Some(&kwargs))?;
tracing::info!("Loaded a python tokenizer");
Ok(PyTokenizer(tokenizer))
}
}
trait TokenizerTrait {
fn encode_trait(
&self,
query: String,
add_special_tokens: bool,
) -> Result<tokenizers::Encoding, Box<dyn std::error::Error + Send + Sync>>;
}
impl TokenizerTrait for tokenizers::Tokenizer {
fn encode_trait(
&self,
query: String,
add_special_tokens: bool,
) -> Result<tokenizers::Encoding, Box<dyn std::error::Error + Send + Sync>> {
self.encode(query, add_special_tokens)
}
}
impl TokenizerTrait for PyTokenizer<'_> {
fn encode_trait(
&self,
query: String,
add_special_tokens: bool,
) -> Result<tokenizers::Encoding, Box<dyn std::error::Error + Send + Sync>> {
let py = self.0.py();
let kwargs = [
("text", query.into_py(py)),
("add_special_tokens", add_special_tokens.into_py(py)),
]
.into_py_dict_bound(py);
let encode = self.0.getattr("encode")?;
let input_ids: Vec<u32> = encode.call((), Some(&kwargs))?.extract()?;
Ok(Encoding::new(
input_ids,
vec![], // type ids
vec![], // tokens (strings)
vec![], // words
vec![], // offsets
vec![], // special_tokens_mask
vec![], // attention_mask
vec![], // overflowing
std::collections::HashMap::new(), //sequence_ranges
))
}
}
/// Hub type
#[derive(Clone, Debug, Deserialize)]
pub struct HubModelInfo {
#[serde(rename(deserialize = "id"))]
pub model_id: String,
pub sha: Option<String>,
pub pipeline_tag: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ChatTemplate {
name: String,
template: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(untagged)]
pub enum ChatTemplateVersions {
Single(String),
Multiple(Vec<ChatTemplate>),
}
use std::path::Path;
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct HubTokenizerConfig {
pub chat_template: Option<ChatTemplateVersions>,
pub completion_template: Option<String>,
pub bos_token: Option<TokenizerConfigToken>,
pub eos_token: Option<TokenizerConfigToken>,
pub tokenizer_class: Option<String>,
pub add_bos_token: Option<bool>,
pub add_eos_token: Option<bool>,
}
impl HubTokenizerConfig {
pub fn from_file<P: AsRef<Path>>(filename: P) -> Option<Self> {
std::fs::read_to_string(filename)
.ok()
.and_then(|content| serde_json::from_str(&content).ok())
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
pub enum TokenizerConfigToken {
String(String),
Object { content: String },
}
impl TokenizerConfigToken {
pub fn as_str(&self) -> &str {
match self {
TokenizerConfigToken::String(s) => s,
TokenizerConfigToken::Object { content } => content,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "processor_class")]
pub enum HubPreprocessorConfig {
Idefics2Processor(Idefics2Preprocessor),
Idefics3Processor(Idefics2Preprocessor),
}
impl HubPreprocessorConfig {
pub fn from_file<P: AsRef<std::path::Path>>(filename: P) -> Option<Self> {
let content = std::fs::read_to_string(filename).ok()?;
serde_json::from_str(&content).ok()
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Idefics2Preprocessor {
#[serde(default)]
do_image_splitting: bool,
}
#[derive(Debug, Clone, Deserialize, Default)]
pub struct HubProcessorConfig {
pub chat_template: Option<ChatTemplateVersions>,
pub image_seq_len: usize,
pub processor_class: Option<String>,
}
impl HubProcessorConfig {
pub fn from_file<P: AsRef<Path>>(filename: P) -> Option<Self> {
std::fs::read_to_string(filename)
.ok()
.and_then(|content| serde_json::from_str(&content).ok())
}
}
#[derive(Clone, Debug, Deserialize, ToSchema, Serialize)]
#[cfg_attr(test, derive(PartialEq))]
#[serde(tag = "type", content = "value")]
pub(crate) enum GrammarType {
/// A string that represents a [JSON Schema](https://json-schema.org/).
///
/// JSON Schema is a declarative language that allows to annotate JSON documents
/// with types and descriptions.
#[serde(rename = "json")]
#[serde(alias = "json_object")]
#[schema(example = json ! ({"properties": {"location":{"type": "string"}}}))]
Json(serde_json::Value),
#[serde(rename = "regex")]
Regex(String),
}
#[derive(Clone, Debug, Serialize, ToSchema)]
pub struct Info {
/// Model info
#[schema(example = "bigscience/blomm-560m")]
pub model_id: String,
#[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")]
pub model_sha: Option<String>,
// #[schema(example = "torch.float16")]
// pub model_dtype: String,
// #[schema(example = "cuda")]
// pub model_device_type: String,
#[schema(nullable = true, example = "text-generation")]
pub model_pipeline_tag: Option<String>,
/// Router Parameters
#[schema(example = "128")]
pub max_concurrent_requests: usize,
#[schema(example = "2")]
pub max_best_of: usize,
#[schema(example = "4")]
pub max_stop_sequences: usize,
#[schema(example = "1024")]
pub max_input_tokens: usize,
#[schema(example = "2048")]
pub max_total_tokens: usize,
#[schema(example = "2")]
pub validation_workers: usize,
#[schema(example = "32")]
pub max_client_batch_size: usize,
/// Router Info
#[schema(example = "text-generation-router")]
pub router: &'static str,
#[schema(example = "0.5.0")]
pub version: &'static str,
#[schema(nullable = true, example = "null")]
pub sha: Option<&'static str>,
#[schema(nullable = true, example = "null")]
pub docker_label: Option<&'static str>,
}
#[derive(Clone, Debug, Deserialize, ToSchema, Default)]
#[cfg_attr(test, derive(PartialEq))]
pub(crate) struct GenerateParameters {
/// Generate best_of sequences and return the one if the highest token logprobs.
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)]
pub best_of: Option<usize>,
/// The value used to module the logits distribution.
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
nullable = true,
default = "null",
example = 0.5
)]
pub temperature: Option<f32>,
/// The parameter for repetition penalty. 1.0 means no penalty.
/// See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
nullable = true,
default = "null",
example = 1.03
)]
pub repetition_penalty: Option<f32>,
/// The parameter for frequency penalty. 1.0 means no penalty
/// Penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.
#[serde(default)]
#[schema(
exclusive_minimum = -2.0,
nullable = true,
default = "null",
example = 0.1
)]
pub frequency_penalty: Option<f32>,
/// The number of highest probability vocabulary tokens to keep for top-k-filtering.
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)]
pub top_k: Option<i32>,
/// Top-p value for nucleus sampling.
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
maximum = 1.0,
nullable = true,
default = "null",
example = 0.95
)]
pub top_p: Option<f32>,
/// Typical Decoding mass
/// See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information.
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
maximum = 1.0,
nullable = true,
default = "null",
example = 0.95
)]
pub typical_p: Option<f32>,
/// Activate logits sampling.
#[serde(default)]
#[schema(default = "false", example = true)]
pub do_sample: bool,
/// Maximum number of tokens to generate.
#[serde(default)]
#[schema(nullable = true, default = "1024", example = "20")]
pub max_new_tokens: Option<u32>,
/// Whether to prepend the prompt to the generated text
#[serde(default)]
#[schema(nullable = true, default = "null", example = false)]
pub return_full_text: Option<bool>,
/// Stop generating tokens if a member of `stop` is generated.
#[serde(default)]
#[schema(inline, max_items = 4, example = json ! (["photographer"]))]
pub stop: Vec<String>,
/// Truncate inputs tokens to the given size.
#[serde(default)]
#[schema(nullable = true, default = "null", example = "null")]
pub truncate: Option<usize>,
/// Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226).
#[serde(default)]
#[schema(default = "false", example = true)]
pub watermark: bool,
/// Whether to return generation details.
#[serde(default)]
#[schema(default = "true")]
pub details: bool,
/// Whether to return decoder input token logprobs and ids.
#[serde(default)]
#[schema(default = "false")]
pub decoder_input_details: bool,
/// Random sampling seed.
#[serde(default)]
#[schema(
exclusive_minimum = 0,
nullable = true,
default = "null",
example = "null"
)]
pub seed: Option<u64>,
/// The number of highest probability vocabulary tokens to keep for top-n-filtering.
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)]
pub top_n_tokens: Option<u32>,
/// Grammar constraints for the generation.
#[serde(default)]
#[schema(nullable = true, default = "null", example = "null")]
pub grammar: Option<GrammarType>,
/// Lora adapter id
#[serde(default)]
#[schema(nullable = true, default = "null", example = "null")]
pub adapter_id: Option<String>,
}
fn default_parameters() -> GenerateParameters {
GenerateParameters {
best_of: None,
temperature: None,
repetition_penalty: None,
frequency_penalty: None,
top_k: None,
top_p: None,
typical_p: None,
do_sample: true,
max_new_tokens: None,
return_full_text: None,
stop: Vec::new(),
truncate: None,
watermark: false,
details: false,
decoder_input_details: false,
seed: None,
top_n_tokens: None,
grammar: None,
adapter_id: None,
}
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
#[serde(try_from = "PromptDeserializer")]
pub struct Prompt(pub Vec<String>);
#[derive(Deserialize)]
#[serde(untagged)]
enum PromptDeserializer {
Single(String),
Multiple(Vec<String>),
}
impl TryFrom<PromptDeserializer> for Prompt {
type Error = String;
fn try_from(value: PromptDeserializer) -> Result<Self, Self::Error> {
match value {
PromptDeserializer::Single(s) => Ok(Prompt(vec![s])),
PromptDeserializer::Multiple(v) => {
if v.is_empty() {
Err(
"Empty array detected. Do not use an empty array for the prompt."
.to_string(),
)
} else {
Ok(Prompt(v))
}
}
}
}
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug)]
pub struct CompletionRequest {
/// UNUSED
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
/// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
pub model: Option<String>,
/// The prompt to generate completions for.
#[schema(example = "What is Deep Learning?")]
pub prompt: Prompt,
/// The maximum number of tokens that can be generated in the chat completion.
#[serde(default)]
#[schema(default = "1024", example = "32")]
pub max_tokens: Option<u32>,
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
/// lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
#[serde(default)]
#[schema(nullable = true, example = 1.0)]
pub temperature: Option<f32>,
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
/// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
#[serde(default)]
#[schema(nullable = true, example = 0.95)]
pub top_p: Option<f32>,
#[serde(default = "bool::default")]
pub stream: bool,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
/// The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.
/// please see the completion_template field in the model's tokenizer_config.json file for completion template.
#[serde(default)]
pub suffix: Option<String>,
#[serde(default)]
pub repetition_penalty: Option<f32>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.
#[serde(default)]
#[schema(example = "1.0")]
pub frequency_penalty: Option<f32>,
/// Up to 4 sequences where the API will stop generating further tokens.
#[serde(default)]
#[schema(nullable = true, example = "null")]
pub stop: Option<Vec<String>>,
}
#[derive(Clone, Serialize, ToSchema)]
#[serde(tag = "object")]
enum Completion {
#[serde(rename = "text_completion")]
Chunk(Chunk),
#[serde(rename = "text_completion")]
Final(CompletionFinal),
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct CompletionFinal {
pub id: String,
#[schema(example = "1706270835")]
pub created: u64,
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
pub model: String,
pub system_fingerprint: String,
pub choices: Vec<CompletionComplete>,
pub usage: Usage,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct CompletionComplete {
pub index: u32,
pub text: String,
pub logprobs: Option<Vec<f32>>,
pub finish_reason: String,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct Chunk {
pub id: String,
pub created: u64,
pub choices: Vec<CompletionComplete>,
pub model: String,
pub system_fingerprint: String,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletion {
pub id: String,
#[schema(example = "1706270835")]
pub created: u64,
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
pub model: String,
pub system_fingerprint: String,
pub choices: Vec<ChatCompletionComplete>,
pub usage: Usage,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionComplete {
pub index: u32,
pub message: OutputMessage,
pub logprobs: Option<ChatCompletionLogprobs>,
pub finish_reason: String,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprobs {
content: Vec<ChatCompletionLogprob>,
}
impl From<(Token, Vec<Token>)> for ChatCompletionLogprobs {
fn from(value: (Token, Vec<Token>)) -> Self {
let (token, top_tokens) = value;
Self {
content: vec![ChatCompletionLogprob {
token: token.text,
logprob: token.logprob,
top_logprobs: top_tokens
.into_iter()
.map(|t| ChatCompletionTopLogprob {
token: t.text,
logprob: t.logprob,
})
.collect(),
}],
}
}
}
impl From<(Vec<Token>, Vec<Vec<Token>>)> for ChatCompletionLogprobs {
fn from(value: (Vec<Token>, Vec<Vec<Token>>)) -> Self {
let (tokens, top_tokens) = value;
// Create an iterator that produces None for top_tokens once it's exhausted
let top_tokens_iter = top_tokens
.into_iter()
.map(Some)
.chain(std::iter::repeat(None));
let content = tokens
.into_iter()
.zip(top_tokens_iter)
.map(|(t, top_t_option)| ChatCompletionLogprob {
token: t.text,
logprob: t.logprob,
top_logprobs: match top_t_option {
Some(top_t) => top_t
.into_iter()
.map(|t| ChatCompletionTopLogprob {
token: t.text,
logprob: t.logprob,
})
.collect(),
None => vec![], // Handle the case where there are no top tokens
},
})
.collect();
Self { content }
}
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionLogprob {
token: String,
logprob: f32,
top_logprobs: Vec<ChatCompletionTopLogprob>,
}
#[derive(Clone, Deserialize, Serialize, ToSchema)]
pub(crate) struct ChatCompletionTopLogprob {
token: String,
logprob: f32,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Default)]
pub(crate) struct Usage {
pub prompt_tokens: u32,
pub completion_tokens: u32,
pub total_tokens: u32,
}
#[derive(Clone, Serialize, ToSchema)]
#[serde(tag = "object")]
enum CompletionType {
#[serde(rename = "chat.completion.chunk")]
ChatCompletionChunk(ChatCompletionChunk),
#[serde(rename = "chat.completion")]
ChatCompletion(ChatCompletion),
}
impl ChatCompletion {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
model: String,
system_fingerprint: String,
output: Option<String>,
created: u64,
details: Details,
return_logprobs: bool,
tool_calls: Option<Vec<ToolCall>>,
prompt_tokens: u32,
) -> Self {
let message = match (output, tool_calls) {
(Some(content), None) => OutputMessage::ChatMessage(TextMessage {
role: "assistant".into(),
content,
}),
(None, Some(tool_calls)) => OutputMessage::ToolCall(ToolCallMessage {
role: "assistant".to_string(),
tool_calls,
}),
(Some(output), Some(_)) => {
warn!("Received both chat and tool call");
OutputMessage::ChatMessage(TextMessage {
role: "assistant".into(),
content: output,
})
}
(None, None) => {
warn!("Didn't receive an answer");
OutputMessage::ChatMessage(TextMessage {
role: "assistant".into(),
content: "".to_string(),
})
}
};
Self {
id: String::new(),
created,
model,
system_fingerprint,
choices: vec![ChatCompletionComplete {
index: 0,
message,
logprobs: return_logprobs
.then(|| ChatCompletionLogprobs::from((details.tokens, details.top_tokens))),
finish_reason: details.finish_reason.format(true),
}],
usage: Usage {
prompt_tokens,
completion_tokens: details.generated_tokens,
total_tokens: prompt_tokens + details.generated_tokens,
},
}
}
}
#[derive(Clone, Serialize, ToSchema)]
pub(crate) struct ChatCompletionChunk {
pub id: String,
#[schema(example = "1706270978")]
pub created: u64,
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
pub model: String,
pub system_fingerprint: String,
pub choices: Vec<ChatCompletionChoice>,
pub usage: Option<Usage>,
}
#[derive(Clone, Serialize, ToSchema)]
pub(crate) struct ChatCompletionChoice {
pub index: u32,
pub delta: ChatCompletionDelta,
pub logprobs: Option<ChatCompletionLogprobs>,
pub finish_reason: Option<String>,
}
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct ToolCallDelta {
#[schema(example = "assistant")]
role: String,
tool_calls: DeltaToolCall,
}
#[derive(Clone, Debug, Serialize, ToSchema)]
#[serde(untagged)]
enum ChatCompletionDelta {
Chat(TextMessage),
Tool(ToolCallDelta),
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug, PartialEq)]
pub(crate) struct DeltaToolCall {
pub index: u32,
pub id: String,
pub r#type: String,
pub function: Function,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug, PartialEq)]
pub(crate) struct Function {
pub name: Option<String>,
pub arguments: String,
}
#[allow(clippy::too_many_arguments)]
impl ChatCompletionChunk {
pub(crate) fn new(
model: String,
system_fingerprint: String,
delta: Option<String>,
tool_calls: Option<Vec<String>>,
created: u64,
logprobs: Option<ChatCompletionLogprobs>,
finish_reason: Option<String>,
usage: Option<Usage>,
) -> Self {
let delta = match (delta, tool_calls) {
(Some(delta), _) => ChatCompletionDelta::Chat(TextMessage {
role: "assistant".to_string(),
content: delta,
}),
(None, Some(tool_calls)) => ChatCompletionDelta::Tool(ToolCallDelta {
role: "assistant".to_string(),
tool_calls: DeltaToolCall {
index: 0,
id: String::new(),
r#type: "function".to_string(),
function: Function {
name: None,
arguments: tool_calls[0].to_string(),
},
},
}),
(None, None) => ChatCompletionDelta::Chat(TextMessage {
role: "assistant".to_string(),
content: "".to_string(),
}),
};
Self {
id: String::new(),
created,
model,
system_fingerprint,
choices: vec![ChatCompletionChoice {
index: 0,
delta,
logprobs,
finish_reason,
}],
usage,
}
}
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
#[cfg_attr(test, derive(Debug, PartialEq, Default))]
pub(crate) struct ChatRequest {
#[schema(example = "mistralai/Mistral-7B-Instruct-v0.2")]
/// [UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
pub model: Option<String>,
/// A list of messages comprising the conversation so far.
#[schema(example = "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]")]
pub messages: Vec<Message>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.
#[serde(default)]
#[schema(example = "1.0")]
pub frequency_penalty: Option<f32>,
/// UNUSED
/// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
/// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
/// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
/// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
/// result in a ban or exclusive selection of the relevant token.
#[serde(default)]
pub logit_bias: Option<Vec<f32>>,
/// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each
/// output token returned in the content of message.
#[serde(default)]
#[schema(example = "false")]
pub logprobs: Option<bool>,
/// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with
/// an associated log probability. logprobs must be set to true if this parameter is used.
#[serde(default)]
#[schema(example = "5")]
pub top_logprobs: Option<u32>,
/// The maximum number of tokens that can be generated in the chat completion.
#[serde(default, alias = "max_completion_tokens")]
#[schema(default = "1024", example = "32")]
pub max_tokens: Option<u32>,
/// UNUSED
/// How many chat completion choices to generate for each input message. Note that you will be charged based on the
/// number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
#[serde(default)]
#[schema(nullable = true, example = "2")]
pub n: Option<u32>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
/// increasing the model's likelihood to talk about new topics
#[serde(default)]
#[schema(nullable = true, example = 0.1)]
pub presence_penalty: Option<f32>,
/// Up to 4 sequences where the API will stop generating further tokens.
#[serde(default)]
#[schema(nullable = true, example = "null")]
pub stop: Option<Vec<String>>,
#[serde(default = "bool::default")]
pub stream: bool,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
/// lower values like 0.2 will make it more focused and deterministic.
///
/// We generally recommend altering this or `top_p` but not both.
#[serde(default)]
#[schema(nullable = true, example = 1.0)]
pub temperature: Option<f32>,
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
/// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
#[serde(default)]
#[schema(nullable = true, example = 0.95)]
pub top_p: Option<f32>,
/// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of
/// functions the model may generate JSON inputs for.
#[serde(default)]
#[schema(nullable = true, example = "null")]
pub tools: Option<Vec<Tool>>,
/// A prompt to be appended before the tools
#[serde(default)]
#[schema(
nullable = true,
example = "Given the functions available, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. Respond in the format {name: function name, parameters: dictionary of argument name and its value}.Do not use variables."
)]
pub tool_prompt: Option<String>,
/// A specific tool to use. If not provided, the model will default to use any of the tools provided in the tools parameter.
#[serde(default)]
#[schema(nullable = true, default = "auto", example = "auto")]
pub tool_choice: ToolChoice,
/// Response format constraints for the generation.
///
/// NOTE: A request can use `response_format` OR `tools` but not both.
#[serde(default)]
#[schema(nullable = true, default = "null", example = "null")]
pub response_format: Option<GrammarType>,
/// Options for streaming response. Only set this when you set stream: true.
#[serde(default)]
#[schema(nullable = true, example = "null")]
pub stream_options: Option<StreamOptions>,
}
impl ChatRequest {
fn try_into_generate(self, infer: &Infer) -> Result<(GenerateRequest, bool), InferError> {
let ChatRequest {
model,
max_tokens,
messages,
seed,
stop,
tools,
tool_choice,
tool_prompt,
temperature,
response_format,
presence_penalty,
frequency_penalty,
top_p,
top_logprobs,
..
} = self;
let repetition_penalty = presence_penalty.map(|x| x + 2.0);
let max_new_tokens = max_tokens;
let tool_prompt = tool_prompt
.filter(|s| !s.is_empty())
.unwrap_or_else(default_tool_prompt);
let stop = stop.unwrap_or_default();
// enable greedy only when temperature is 0
let (do_sample, temperature) = match temperature {
Some(temperature) if temperature == 0.0 => (false, None),
other => (true, other),
};
if response_format.is_some() && tools.is_some() {
return Err(InferError::ToolError(
"Grammar and tools are mutually exclusive".into(),
));
}
let (inputs, grammar, using_tools) = match response_format {
Some(format) => {
let inputs = infer.apply_chat_template(messages, None)?;
(inputs, Some(format), false)
}
None => {
if let Some(tools) = tools {
match ToolGrammar::apply(tools, tool_choice)? {
Some((updated_tools, tool_schema)) => {
let grammar = GrammarType::Json(serde_json::json!(tool_schema));
let inputs: String = infer.apply_chat_template(
messages,
Some((updated_tools, tool_prompt)),
)?;
(inputs, Some(grammar), true)
}
None => {
// same as if no response_format or tools are set
let inputs = infer.apply_chat_template(messages, None)?;
(inputs, None, false)
}
}
} else {
// if no response_format or tools are set simply apply the chat template to generate inputs
let inputs = infer.apply_chat_template(messages, None)?;
(inputs, None, false)
}
}
};
Ok((
GenerateRequest {
inputs: inputs.to_string(),
add_special_tokens: false,
parameters: GenerateParameters {
best_of: None,
temperature,
repetition_penalty,
frequency_penalty,
top_k: None,
top_p,
typical_p: None,
do_sample,
max_new_tokens,
return_full_text: None,
stop,
truncate: None,
watermark: false,
details: true,
decoder_input_details: false,
seed,
top_n_tokens: top_logprobs,
grammar,
adapter_id: model.filter(|m| *m != "tgi").map(String::from),
},
},
using_tools,
))
}
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
#[cfg_attr(test, derive(Debug, PartialEq))]
struct StreamOptions {
/// If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value.
#[schema(example = "true")]
include_usage: bool,
}
pub fn default_tool_prompt() -> String {
"\nGiven the functions available, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. Respond in the format {name: function name, parameters: dictionary of argument name and its value}.Do not use variables.\n".to_string()
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
#[serde(tag = "type")]
pub enum TypedChoice {
#[serde(rename = "function")]
Function { function: FunctionName },
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema)]
pub struct FunctionName {
pub name: String,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema, Default)]
#[serde(from = "ToolTypeDeserializer")]
#[serde(rename_all = "snake_case")]
/// <https://platform.openai.com/docs/guides/function-calling/configuring-function-calling-behavior-using-the-tool_choice-parameter>
pub enum ToolChoice {
/// Means the model can pick between generating a message or calling one or more tools.
#[default]
Auto,
/// Means the model will not call any tool and instead generates a message.
#[serde(rename = "none")]
NoTool,
/// Means the model must call one or more tools.
Required,
/// Forces the model to call a specific tool. This structure aligns with the `OpenAI` API schema to force a specific tool.
Function(FunctionName),
}
#[derive(Deserialize, ToSchema)]
#[serde(untagged)]
/// Controls which (if any) tool is called by the model.
/// - `none` means the model will not call any tool and instead generates a message.
/// - `auto` means the model can pick between generating a message or calling one or more tools.
/// - `required` means the model must call one or more tools.
/// - Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
///
/// `none` is the default when no tools are present. `auto` is the default if tools are present."
enum ToolTypeDeserializer {
/// None means `null` was passed in the JSON, and the default choice is applied based on the presence of tools.
Null,
/// `auto` means the model can pick between generating a message or calling one or more tools.
#[schema(example = "auto")]
String(String),
/// Specifying a particular tool forces the model to call that tool, with structured function details.
#[schema(example = r#"{"type": "function", "function": {"name": "my_function"}}"#)]
TypedChoice(TypedChoice),
}
impl From<ToolTypeDeserializer> for ToolChoice {
fn from(value: ToolTypeDeserializer) -> Self {
match value {
ToolTypeDeserializer::Null => ToolChoice::Auto,
ToolTypeDeserializer::String(s) => match s.as_str() {
"none" => ToolChoice::NoTool,
"auto" => ToolChoice::Auto,
"required" => ToolChoice::Required,
_ => ToolChoice::Function(FunctionName { name: s }),
},
ToolTypeDeserializer::TypedChoice(TypedChoice::Function { function }) => {
ToolChoice::Function(function)
}
}
}
}
#[derive(Debug, Deserialize, Serialize, ToSchema, PartialEq)]
pub struct JsonSchemaTool {
#[serde(flatten)]
functions_map: FunctionsMap,
properties: Properties,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
struct FunctionsMap {
#[serde(rename = "$functions")]
functions: std::collections::HashMap<String, serde_json::Value>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
struct FunctionRef {
#[serde(rename = "$ref")]
ref_path: String,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
struct Properties {
#[serde(serialize_with = "serialize_function")]
function: Vec<FunctionRef>,
}
fn serialize_function<S>(functions: &Vec<FunctionRef>, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
use serde::ser::SerializeStruct;
let mut state = serializer.serialize_struct("Function", 1)?;
state.serialize_field("anyOf", functions)?;
state.end()
}
#[derive(Clone, Debug, Deserialize, Serialize, ToSchema, Default, PartialEq)]
pub(crate) struct FunctionDefinition {
#[serde(default)]
pub description: Option<String>,
pub name: String,
#[serde(alias = "parameters")]
pub arguments: serde_json::Value,
}
#[derive(Clone, Debug, Deserialize, Serialize, ToSchema)]
#[cfg_attr(test, derive(PartialEq))]
pub(crate) struct Tool {
// The type of the tool. Currently, only 'function' is supported.
#[schema(example = "function")]
pub r#type: String,
// Grab the tool as generic JSON for debugging purposes.
pub function: FunctionDefinition,
}
#[derive(Clone, Serialize, Deserialize, Default)]
pub(crate) struct ChatTemplateInputs<'a> {
messages: Vec<TextMessage>,
bos_token: Option<&'a str>,
eos_token: Option<&'a str>,
add_generation_prompt: bool,
tools: Option<Vec<Tool>>,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Default, Debug, PartialEq)]
pub(crate) struct ToolCall {
pub id: String,
pub r#type: String,
pub function: FunctionDefinition,
}
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct Url {
url: String,
}
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
#[serde(tag = "type")]
#[serde(rename_all = "snake_case")]
pub enum MessageChunk {
Text { text: String },
ImageUrl { image_url: Url },
}
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct Message {
#[schema(example = "user")]
role: String,
#[schema(example = "My name is David and I")]
pub content: MessageContent,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[schema(example = "\"David\"")]
name: Option<String>,
}
#[derive(Clone, Deserialize, Serialize, ToSchema, Debug, PartialEq)]
#[serde(untagged)]
pub enum MessageContent {
SingleText(String),
MultipleChunks(Vec<MessageChunk>),
}
// Pushing a chunk to a single text message will convert it to a multiple chunks message
impl MessageContent {
pub fn push(&mut self, chunk: MessageChunk) {
match self {
MessageContent::SingleText(text) => {
*self = MessageContent::MultipleChunks(vec![
MessageChunk::Text { text: text.clone() },
chunk,
]);
}
MessageContent::MultipleChunks(chunks) => {
chunks.push(chunk);
}
}
}
}
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct TextMessage {
#[schema(example = "user")]
pub role: String,
#[schema(example = "My name is David and I")]
pub content: String,
}
impl From<Message> for TextMessage {
fn from(value: Message) -> Self {
TextMessage {
role: value.role,
content: match value.content {
MessageContent::SingleText(text) => text,
MessageContent::MultipleChunks(chunks) => chunks
.into_iter()
.map(|chunk| match chunk {
MessageChunk::Text { text } => text,
MessageChunk::ImageUrl { image_url } => format!("", image_url.url),
})
.collect::<Vec<_>>()
.join(""),
},
}
}
}
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
pub struct ToolCallMessage {
#[schema(example = "assistant")]
role: String,
tool_calls: Vec<ToolCall>,
}
#[derive(Clone, Deserialize, ToSchema, Serialize, Debug, PartialEq)]
#[serde(untagged)]
pub(crate) enum OutputMessage {
ChatMessage(TextMessage),
ToolCall(ToolCallMessage),
}
#[derive(Clone, Debug, Deserialize, ToSchema)]
#[cfg_attr(test, derive(PartialEq))]
pub(crate) struct GenerateRequest {
#[schema(example = "My name is Olivier and I")]
pub inputs: String,
#[serde(default = "default_parameters")]
pub parameters: GenerateParameters,
/// This is used internally because some requests
/// already contain the templated input therefore
/// we shouldn't add the special tokens.
#[serde(default = "default_true", skip)]
pub add_special_tokens: bool,
}
fn default_true() -> bool {
true
}
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct CompatGenerateRequest {
#[schema(example = "My name is Olivier and I")]
pub inputs: String,
#[serde(default = "default_parameters")]
pub parameters: GenerateParameters,
#[serde(default)]
#[schema(default = "false")]
pub stream: bool,
}
impl From<CompatGenerateRequest> for GenerateRequest {
fn from(req: CompatGenerateRequest) -> Self {
Self {
inputs: req.inputs,
add_special_tokens: true,
parameters: req.parameters,
}
}
}
#[derive(Debug, Serialize, ToSchema)]
pub struct PrefillToken {
#[schema(example = 0)]
pub id: u32,
#[schema(example = "test")]
pub text: String,
#[schema(nullable = true, example = - 0.34)]
pub logprob: f32,
}
#[derive(Debug, Serialize, ToSchema, Clone)]
pub struct Token {
#[schema(example = 0)]
pub id: u32,
#[schema(example = "test")]
pub text: String,
#[schema(nullable = true, example = - 0.34)]
pub logprob: f32,
#[schema(example = "false")]
pub special: bool,
}
#[derive(Debug, Serialize, ToSchema)]
pub struct SimpleToken {
#[schema(example = 0)]
id: u32,
#[schema(example = "test")]
text: String,
#[schema(example = 0)]
start: usize,
#[schema(example = 2)]
stop: usize,
}
#[derive(Debug, Serialize, ToSchema, Clone)]
#[serde(rename_all(serialize = "snake_case"))]
#[schema(example = "Length")]
pub enum FinishReason {
#[schema(rename = "length")]
Length,
#[serde(rename = "eos_token")]
#[schema(rename = "eos_token")]
EndOfSequenceToken,
#[schema(rename = "stop_sequence")]
StopSequence,
}
impl std::fmt::Display for FinishReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FinishReason::Length => write!(f, "length"),
FinishReason::EndOfSequenceToken => write!(f, "eos_token"),
FinishReason::StopSequence => write!(f, "stop_sequence"),
}
}
}
impl FinishReason {
pub fn format(&self, use_stop: bool) -> String {
match self {
FinishReason::EndOfSequenceToken if use_stop => "stop".to_string(),
_ => self.to_string(),
}
}
}
#[derive(Serialize, ToSchema)]
pub(crate) struct BestOfSequence {
#[schema(example = "test")]
pub generated_text: String,
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
pub prefill: Vec<PrefillToken>,
pub tokens: Vec<Token>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Vec<Token>>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct Details {
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
pub prefill: Vec<PrefillToken>,
pub tokens: Vec<Token>,
#[serde(skip_serializing_if = "Option::is_none")]
pub best_of_sequences: Option<Vec<BestOfSequence>>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Vec<Token>>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct GenerateResponse {
#[schema(example = "test")]
pub generated_text: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub details: Option<Details>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct ChatTokenizeResponse {
pub(crate) tokenize_response: TokenizeResponse,
pub(crate) templated_text: String,
}
#[derive(Serialize, ToSchema)]
#[serde(transparent)]
pub(crate) struct TokenizeResponse(Vec<SimpleToken>);
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamDetails {
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
#[schema(example = 1)]
pub input_length: u32,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamResponse {
pub index: u32,
pub token: Token,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Token>,
#[schema(nullable = true, default = "null", example = "test")]
pub generated_text: Option<String>,
#[schema(nullable = true, default = "null")]
pub details: Option<StreamDetails>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct ErrorResponse {
pub error: String,
pub error_type: String,
}
#[derive(Serialize, Deserialize, ToSchema)]
pub(crate) struct ModelInfo {
#[schema(example = "gpt2")]
pub id: String,
#[schema(example = "model")]
pub object: String,
#[schema(example = 1686935002)]
pub created: u64,
#[schema(example = "openai")]
pub owned_by: String,
}
#[derive(Serialize, Deserialize, ToSchema)]
pub(crate) struct ModelsInfo {
#[schema(example = "list")]
pub object: String,
pub data: Vec<ModelInfo>,
}
impl Default for ModelsInfo {
fn default() -> Self {
ModelsInfo {
object: "list".to_string(),
data: Vec::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
pub(crate) fn get_tokenizer() -> Tokenizer {
let api = hf_hub::api::sync::Api::new().unwrap();
let repo = api.model("gpt2".to_string());
let filename = repo.get("tokenizer.json").unwrap();
Tokenizer::Rust(tokenizers::Tokenizer::from_file(filename).unwrap())
}
#[test]
fn test_hub_nested_tokens_tokenizer_config() {
// this is a subset of the tokenizer.json file
// in this case we expect the tokens to be encoded as simple strings
let json_content = r#"{
"chat_template": "test",
"bos_token": "<|begin▁of▁sentence|>",
"eos_token": "<|end▁of▁sentence|>"
}"#;
let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();
// check that we successfully parsed the tokens
assert_eq!(
config.chat_template,
Some(ChatTemplateVersions::Single("test".to_string()))
);
assert_eq!(
config.bos_token,
Some(TokenizerConfigToken::String(
"<|begin▁of▁sentence|>".to_string()
))
);
assert_eq!(
config.eos_token,
Some(TokenizerConfigToken::String(
"<|end▁of▁sentence|>".to_string()
))
);
// in this case we expect the tokens to be encoded as structured tokens
// we want the content of the structured token
let json_content = r#"{
"chat_template": "test",
"bos_token": {
"__type": "AddedToken",
"content": "<|begin▁of▁sentence|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"eos_token": {
"__type": "AddedToken",
"content": "<|end▁of▁sentence|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}"#;
let config: HubTokenizerConfig = serde_json::from_str(json_content).unwrap();
// check that we successfully parsed the tokens
assert_eq!(
config.chat_template,
Some(ChatTemplateVersions::Single("test".to_string()))
);
assert_eq!(
config.bos_token,
Some(TokenizerConfigToken::Object {
content: "<|begin▁of▁sentence|>".to_string()
})
);
assert_eq!(
config.eos_token,
Some(TokenizerConfigToken::Object {
content: "<|end▁of▁sentence|>".to_string()
})
);
}
#[test]
fn test_chat_simple_string() {
let json = json!({
"model": "",
"messages": [{
"role": "user",
"content": "What is Deep Learning?"
}]
});
let request: ChatRequest = serde_json::from_str(json.to_string().as_str()).unwrap();
assert_eq!(
request.messages[0],
Message {
role: "user".to_string(),
content: MessageContent::SingleText("What is Deep Learning?".to_string()),
name: None
}
);
}
#[test]
fn test_message_content_append() {
let mut content = MessageContent::SingleText("Initial text".to_string());
let chunk = MessageChunk::Text {
text: "Additional text".to_string(),
};
content.push(chunk);
match content {
MessageContent::MultipleChunks(chunks) => {
assert_eq!(chunks.len(), 2);
assert_eq!(
chunks[0],
MessageChunk::Text {
text: "Initial text".to_string()
}
);
assert_eq!(
chunks[1],
MessageChunk::Text {
text: "Additional text".to_string()
}
);
}
_ => panic!("Expected MultipleChunks, but got a different variant"),
}
}
#[test]
fn test_chat_request() {
let json = json!({
"model": "",
"messages": [{
"role": "user",
"content": [
{"type": "text", "text": "Whats in this image?"},
{"type": "image_url", "image_url": {"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"}},
]
}]
});
let request: ChatRequest = serde_json::from_str(json.to_string().as_str()).unwrap();
assert_eq!(
request.messages[0],
Message{
role: "user".to_string(),
content: MessageContent::MultipleChunks(vec![
MessageChunk::Text { text: "Whats in this image?".to_string() },
MessageChunk::ImageUrl { image_url: Url { url: "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png".to_string() }},
]),
name: None
}
);
}
#[test]
fn text_message_convert() {
let message = Message{
role: "user".to_string(),
content: MessageContent::MultipleChunks(vec![
MessageChunk::Text { text: "Whats in this image?".to_string() },
MessageChunk::ImageUrl { image_url: Url { url: "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png".to_string() } }
]),
name: None
};
let textmsg: TextMessage = message.into();
assert_eq!(textmsg.content, "Whats in this image?");
}
#[test]
fn test_chat_stream_options() {
let json = json!({
"model": "",
"stream_options": {"include_usage": true},
"messages": [{
"role": "user",
"content": "Hello"
}]
});
let request: ChatRequest = serde_json::from_str(json.to_string().as_str()).unwrap();
assert!(matches!(
request.stream_options,
Some(StreamOptions {
include_usage: true
})
));
}
#[test]
fn openai_output() {
let message = OutputMessage::ChatMessage(TextMessage {
role: "assistant".to_string(),
content: "This is the answer".to_string(),
});
let serialized = serde_json::to_string(&message).unwrap();
assert_eq!(
serialized,
r#"{"role":"assistant","content":"This is the answer"}"#
);
let message = OutputMessage::ToolCall(ToolCallMessage {
role: "assistant".to_string(),
tool_calls: vec![ToolCall {
id: "0".to_string(),
r#type: "function".to_string(),
function: FunctionDefinition {
description: None,
name: "myfn".to_string(),
arguments: json!({
"format": "csv"
}),
},
}],
});
let serialized = serde_json::to_string(&message).unwrap();
assert_eq!(
serialized,
r#"{"role":"assistant","tool_calls":[{"id":"0","type":"function","function":{"description":null,"name":"myfn","arguments":{"format":"csv"}}}]}"#
);
}
#[test]
fn tool_choice_formats() {
#[derive(Deserialize)]
struct TestRequest {
tool_choice: ToolChoice,
}
let de_none: TestRequest = serde_json::from_str(r#"{"tool_choice":"none"}"#).unwrap();
assert_eq!(de_none.tool_choice, ToolChoice::NoTool);
let de_auto: TestRequest = serde_json::from_str(r#"{"tool_choice":"auto"}"#).unwrap();
assert_eq!(de_auto.tool_choice, ToolChoice::Auto);
let de_required: TestRequest =
serde_json::from_str(r#"{"tool_choice":"required"}"#).unwrap();
assert_eq!(de_required.tool_choice, ToolChoice::Required);
let de_named: TestRequest = serde_json::from_str(r#"{"tool_choice":"myfn"}"#).unwrap();
assert_eq!(
de_named.tool_choice,
ToolChoice::Function(FunctionName {
name: "myfn".to_string(),
})
);
let de_openai_named: TestRequest = serde_json::from_str(
r#"{"tool_choice":{"type":"function","function":{"name":"myfn"}}}"#,
)
.unwrap();
assert_eq!(
de_openai_named.tool_choice,
ToolChoice::Function(FunctionName {
name: "myfn".to_string(),
})
);
}
}
| text-generation-inference/router/src/lib.rs/0 | {
"file_path": "text-generation-inference/router/src/lib.rs",
"repo_id": "text-generation-inference",
"token_count": 25379
} |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#include <ATen/cuda/CUDAContext.h>
#include "q4_matrix.cuh"
#include <vector>
#include "../util.cuh"
#include "../matrix.cuh"
using namespace std;
const int UNSHUF_BLOCKSIZE_X = 64;
const int RECONS_THREADS_X = 64; // Block size and thread count along columns in out, each thread converts 1 column
const int RECONS_THREADS_Y = 1; // Block size and thread count along rows in x and out, each thread converts 8 rows
vector<Q4Matrix*> g_q4_matrices;
void g_q4_keep_matrix(Q4Matrix* m)
{
g_q4_matrices.push_back(m);
}
void g_q4_free_matrices()
{
for (const auto& m : g_q4_matrices) delete m;
g_q4_matrices.clear();
}
Q4Matrix::Q4Matrix
(
const int _height,
const int _width,
const int _groups,
uint32_t* _qweight,
uint32_t* _qzeros,
half* _scales,
uint32_t* _g_idx,
const int _device
) :
height(_height),
width(_width),
groups(_groups),
device(_device)
{
cudaSetDevice(device);
cuda_qweight = _qweight;
cuda_qzeros = _qzeros;
cuda_scales = _scales;
groupsize = height / groups;
if (_g_idx) make_sequential(_g_idx);
}
Q4Matrix::~Q4Matrix()
{
}
// Make sequential
__global__ void make_sequential_kernel
(
const uint32_t* __restrict__ w,
uint32_t* __restrict__ w_new,
const uint32_t* __restrict__ x_map,
const int w_height,
const int w_width
)
{
const uint64_t* w2 = (uint64_t*) w;
uint64_t* w_new2 = (uint64_t*) w_new;
int w2_stride = w_width >> 1;
int w2_column = UNSHUF_BLOCKSIZE_X * blockIdx.x + threadIdx.x;
int w_new2_row = blockIdx.y;
int x_map_idx = w_new2_row << 3;
uint64_t dst = 0;
#pragma unroll
for (int i = 0; i < 8; i++)
{
int source_row = x_map[x_map_idx++];
int w2_row = source_row >> 3;
int w2_subrow = source_row & 0x07;
int w2_row_shift = w2_subrow << 2;
int wnew2_row_shift = i << 2;
uint64_t src = w2[w2_row * w2_stride + w2_column];
src >>= w2_row_shift;
src &= 0x0000000f0000000f;
src <<= wnew2_row_shift;
dst |= src;
}
w_new2[w_new2_row * w2_stride + w2_column] = dst;
}
void Q4Matrix::make_sequential(const uint32_t* cpu_g_idx)
{
uint32_t* cuda_new_qweight = NULL;
cudaMalloc(&cuda_new_qweight, height / 8 * width * sizeof(uint32_t));
cudaMalloc(&cuda_x_map, height * sizeof(uint32_t)); // TODO: Should probably be allocated in PyTorch
uint32_t* cpu_g_idx_map = (uint32_t*) calloc(groups, sizeof(uint32_t));
uint32_t* cpu_x_map = (uint32_t*) malloc(height * sizeof(uint32_t));
uint32_t* cpu_x_map_inv = (uint32_t*) malloc(height * sizeof(uint32_t));
// Group histogram
for (int i = 0; i < height; i++) cpu_g_idx_map[cpu_g_idx[i]]++;
// Group map
for (int i = 0, acc = 0; i < groups; i++)
{
short tmp = cpu_g_idx_map[i];
cpu_g_idx_map[i] = acc;
acc += tmp;
}
// X map (inverse)
for (int row = 0; row < height; row++)
{
uint32_t target_group = cpu_g_idx[row];
uint32_t target_row = cpu_g_idx_map[target_group];
cpu_g_idx_map[target_group]++;
cpu_x_map_inv[row] = target_row;
}
// X map
for (int row = 0; row < height; row++) cpu_x_map[cpu_x_map_inv[row]] = row;
// Move to CUDA
cudaMemcpyAsync(cuda_x_map, cpu_x_map, height * sizeof(uint32_t), cudaMemcpyHostToDevice);
// Rearrange rows in w
dim3 threads(UNSHUF_BLOCKSIZE_X, 1, 1);
dim3 blocks(width / UNSHUF_BLOCKSIZE_X / 2, height / 8, 1);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
make_sequential_kernel<<<blocks, threads, 0, stream>>>(cuda_qweight, cuda_new_qweight, cuda_x_map, height / 8, width);
// Replace qweights
cudaMemcpyAsync(cuda_qweight, cuda_new_qweight, height / 8 * width * sizeof(uint32_t), cudaMemcpyDeviceToDevice);
// Cleanup
cudaDeviceSynchronize();
cudaFree(cuda_new_qweight);
free(cpu_g_idx_map);
free(cpu_x_map);
free(cpu_x_map_inv);
}
__global__ void reconstruct_kernel
(
const uint32_t* __restrict__ w,
half* __restrict__ out, // (y)
const half* __restrict__ w_scales,
const uint32_t* __restrict__ w_zeros,
const int height,
const int width,
const int groupsize
)
{
// Start of block
int column = RECONS_THREADS_X * blockIdx.x + threadIdx.x;
int row = (RECONS_THREADS_Y * blockIdx.y + threadIdx.y) * 8;
// Views
MatrixView_q4_column w_(w, height, width);
MatrixView_half_rw out_(out, height, width);
MatrixView_half w_scales_(w_scales, height / groupsize, width);
MatrixView_q4_row w_zeros_(w_zeros, height / groupsize, width);
// Groupsize version
int group = row / groupsize;
half w_scale = w_scales_.item(group, column);
uint32_t w_zero = (w_zeros_.item(group, column) + 1) & 0x0F;
uint32_t w_read = w_.item_uint32_t(row, column);
half* out_ptr = out_.item_ptr(row, column);
#pragma unroll
for (int s = 0; s < 32; s += 4)
{
half w_item = __hmul(__int2half_rn((int)((w_read >> s) & 0x0f) - w_zero), w_scale);
*out_ptr = w_item; out_ptr += out_.width;
}
}
void Q4Matrix::reconstruct(half* out)
{
dim3 threads(RECONS_THREADS_X, RECONS_THREADS_Y, 1);
dim3 blocks
(
(width + threads.x - 1) / threads.x,
(height / 8 + threads.y - 1) / threads.y,
1
);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
reconstruct_kernel<<<blocks, threads, 0, stream>>>(cuda_qweight, out, cuda_scales, cuda_qzeros, height / 8, width, groupsize);
}
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matrix.cu",
"repo_id": "text-generation-inference",
"token_count": 2592
} |
#include "q_matrix.cuh"
#include "matrix_view.cuh"
#include "util.cuh"
#include "quant/qdq_2.cuh"
#include "quant/qdq_3.cuh"
#include "quant/qdq_4.cuh"
#include "quant/qdq_5.cuh"
#include "quant/qdq_6.cuh"
#include "quant/qdq_8.cuh"
#define BLOCK_KN_SIZE 128
#define THREADS_X 32
#define THREADS_Y 32
// Shuffle quantized data on load
__global__ void shuffle_kernel
(
uint32_t* __restrict__ b_q_weight,
const int size_k,
const int size_n,
const int rows_8,
const int rows_6,
const int rows_5,
const int rows_4,
const int rows_3,
const int rows_2
)
{
int n = blockIdx.x * THREADS_X + threadIdx.x;
if (n >= size_n) return;
int k = 0;
uint32_t* b_ptr = b_q_weight + n;
while (k < rows_8) { shuffle_8bit_4 (b_ptr, size_n); b_ptr += 1 * size_n; k += 4; }
while (k < rows_6) { shuffle_6bit_16(b_ptr, size_n); b_ptr += 3 * size_n; k += 16; }
while (k < rows_5) { shuffle_5bit_32(b_ptr, size_n); b_ptr += 5 * size_n; k += 32; }
while (k < rows_4) { shuffle_4bit_8 (b_ptr, size_n); b_ptr += 1 * size_n; k += 8; }
while (k < rows_3) { shuffle_3bit_32(b_ptr, size_n); b_ptr += 3 * size_n; k += 32; }
while (k < rows_2) { shuffle_2bit_16(b_ptr, size_n); b_ptr += 1 * size_n; k += 16; }
}
// QMatrix constructor
QMatrix::QMatrix
(
const int _device,
const int _height,
const int _width,
const int _groups,
uint32_t* _q_weight,
uint16_t* _q_perm,
uint16_t* _q_invperm,
uint32_t* _q_scale,
half* _q_scale_max,
uint16_t* _q_groups,
uint16_t* _q_group_map,
uint32_t* _gptq_qzeros,
half* _gptq_scales,
uint32_t* _gptq_g_idx,
half* _temp_dq
) :
device(_device),
height(_height),
width(_width),
groups(_groups),
temp_dq(_temp_dq)
{
cudaSetDevice(device);
failed = false;
cuda_q_weight = _q_weight;
cuda_q_perm = _q_perm;
cuda_q_invperm = _q_invperm;
cuda_q_scale = _q_scale;
cuda_q_scale_max = _q_scale_max;
cuda_q_groups = _q_groups;
cuda_q_group_map = _q_group_map;
cuda_gptq_qzeros = _gptq_qzeros;
cuda_gptq_scales = _gptq_scales;
is_gptq = (_gptq_qzeros != NULL);
if (is_gptq)
{
gptq_groupsize = 1;
while (gptq_groupsize * groups < height) gptq_groupsize *= 2;
}
// Create group map
rows_8 = 0;
rows_6 = 0;
rows_5 = 0;
rows_4 = 0;
rows_3 = 0;
rows_2 = 0;
if (!is_gptq)
{
uint16_t* cpu_q_groups = (uint16_t*)calloc(groups * 2, sizeof(uint16_t));
cudaMemcpy(cpu_q_groups, cuda_q_groups, groups * 2 * sizeof(uint16_t), cudaMemcpyDeviceToHost);
int row = 0;
for (int i = 0; i < groups; i++)
{
int bits = cpu_q_groups[i * 2];
int rows;
if (i < groups - 1)
{
int qrows = cpu_q_groups[i * 2 + 3] - cpu_q_groups[i * 2 + 1];
rows = qrows * 32 / bits;
}
else rows = height - row;
if (bits == 8) rows_8 += rows;
if (bits == 6) rows_6 += rows;
if (bits == 5) rows_5 += rows;
if (bits == 4) rows_4 += rows;
if (bits == 3) rows_3 += rows;
if (bits == 2) rows_2 += rows;
row += rows;
}
free(cpu_q_groups);
rows_6 += rows_8;
rows_5 += rows_6;
rows_4 += rows_5;
rows_3 += rows_4;
rows_2 += rows_3;
}
else
{
rows_4 = height;
rows_3 = height;
rows_2 = height;
if (_gptq_g_idx)
{
if (!make_sequential(_gptq_g_idx))
{
failed = true;
//printf("FAIL\n");
return;
}
}
}
// DBGI(rows_8);
// DBGI(rows_6);
// DBGI(rows_5);
// DBGI(rows_4);
// DBGI(rows_3);
// DBGI(rows_2);
// Shuffle quantized data
dim3 blockDim, gridDim;
blockDim.x = THREADS_X;
blockDim.y = 1;
gridDim.x = DIVIDE(width, THREADS_X);
gridDim.y = 1;
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
shuffle_kernel<<<gridDim, blockDim, 0, stream>>>(cuda_q_weight, height, width, rows_8, rows_6, rows_5, rows_4, rows_3, rows_2);
}
QMatrix::~QMatrix()
{
}
// Reconstruct b[k,n] (GPTQ)
__global__ void reconstruct_gptq_kernel
(
const uint32_t* __restrict__ b_q_weight,
const uint16_t* __restrict__ b_q_perm,
const uint32_t* __restrict__ b_gptq_qzeros,
const half* __restrict__ b_gptq_scales,
//const uint16_t* __restrict__ b_q_groups,
const int size_k,
const int size_n,
const int groupsize,
const int groups,
half* __restrict__ b,
const int rows_4
)
{
MatrixView_half_rw b_(b, size_k, size_n);
MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n);
MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n);
int offset_k = BLOCK_KN_SIZE * blockIdx.y;
int offset_n = BLOCK_KN_SIZE * blockIdx.x * 4;
int end_k = min(offset_k + BLOCK_KN_SIZE, size_k);
// Preload remapping table
__shared__ uint16_t perm[BLOCK_KN_SIZE];
int t = threadIdx.x;
if (b_q_perm)
{
if (offset_k + t < size_k)
perm[t] = b_q_perm[offset_k + t];
}
// Column
int n = offset_n + t * 4;
if (n >= size_n) return;
// Find initial group
int group = offset_k / groupsize;
int nextgroup = offset_k + groupsize;
// b offset
int qk = offset_k / (32 / 4);
const uint32_t* b_ptr = b_q_weight + qk * size_n + n;
// Initial zeros/scale
int zeros[4];
half2 scales[4];
half2 z1z16[4][2];
half2 y1y16[4][2];
b_gptq_qzeros_.item4(zeros, group, n);
b_gptq_scales_.item4_h2(scales, group, n);
dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]);
dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]);
dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]);
dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]);
__syncthreads();
int k = offset_k;
int lk = 0;
while (k < end_k)
{
if (k == nextgroup)
{
group++;
nextgroup += groupsize;
b_gptq_qzeros_.item4(zeros, group, n);
b_gptq_scales_.item4_h2(scales, group, n);
dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]);
dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]);
dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]);
dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]);
}
for (int p = 0; p < 4; p++)
{
half2 dq[4][4];
const int4* b_ptr4 = (int4*) b_ptr;
int4 load_int4 = *b_ptr4;
dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false);
dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false);
dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false);
dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false);
b_ptr += size_n;
//half* dqh = (half*)dq;
if (b_q_perm)
{
for (int j = 0; j < 4; j++)
{
for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]);
b_.set4(perm[lk++], n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j]));
b_.set4(perm[lk++], n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j]));
}
}
else
{
for (int j = 0; j < 4; j++)
{
for (int v = 0; v < 4; v++) dq[v][j] = __hmul2(scales[v], dq[v][j]);
b_.set4(offset_k + lk++, n, __low2half(dq[0][j]), __low2half(dq[1][j]), __low2half(dq[2][j]), __low2half(dq[3][j]));
b_.set4(offset_k + lk++, n, __high2half(dq[0][j]), __high2half(dq[1][j]), __high2half(dq[2][j]), __high2half(dq[3][j]));
}
}
}
k += 32;
}
}
// Reconstruct b[k,n]
__global__ void reconstruct_kernel
(
const uint32_t* __restrict__ b_q_weight,
const uint16_t* __restrict__ b_q_perm,
const uint32_t* __restrict__ b_q_scale,
const half* __restrict__ b_q_scale_max,
const uint16_t* __restrict__ b_q_group_map,
const int size_k,
const int size_n,
//const int groupsize,
const int groups,
half* __restrict__ b,
const int rows_8,
const int rows_6,
const int rows_5,
const int rows_4,
const int rows_3,
const int rows_2
)
{
MatrixView_half_rw b_(b, size_k, size_n);
MatrixView_q4_row b_q_scale_(b_q_scale, groups, size_n);
int offset_k = BLOCK_KN_SIZE * blockIdx.y;
int offset_n = BLOCK_KN_SIZE * blockIdx.x;
// Preload remapping table
int t = threadIdx.x;
__shared__ uint16_t perm[BLOCK_KN_SIZE];
if (offset_k + t < size_k)
perm[t] = b_q_perm[offset_k + t];
// Column
int n = offset_n + t;
if (n >= size_n) return;
// Find initial group
// int group = offset_k / groupsize;
int group = b_q_group_map[offset_k * 2];
int pre_rows_8 = min(rows_8, offset_k);
int pre_rows_6 = offset_k > rows_8 ? min(rows_6, offset_k) - rows_8 : 0;
int pre_rows_5 = offset_k > rows_6 ? min(rows_5, offset_k) - rows_6 : 0;
int pre_rows_4 = offset_k > rows_5 ? min(rows_4, offset_k) - rows_5 : 0;
int pre_rows_3 = offset_k > rows_4 ? min(rows_3, offset_k) - rows_4 : 0;
int pre_rows_2 = offset_k > rows_3 ? min(rows_2, offset_k) - rows_3 : 0;
int qk = 0;
qk += pre_rows_8 / 32 * 8;
qk += pre_rows_6 / 32 * 6;
qk += pre_rows_5 / 32 * 5;
qk += pre_rows_4 / 32 * 4;
qk += pre_rows_3 / 32 * 3;
qk += pre_rows_2 / 32 * 2;
const uint32_t* b_ptr = b_q_weight + qk * size_n + n;
half qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]);
half2 qs_h2 = __halves2half2(qs_h, qs_h);
int nextgroup = offset_k + b_q_group_map[offset_k * 2 + 1];
int end_k = min(offset_k + BLOCK_KN_SIZE, size_k);
int k = offset_k;
int lk = 0;
__syncthreads();
while (k < rows_8 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 4; p++)
{
half2 dq[4];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
uint32_t q_1 = *b_ptr; b_ptr += size_n;
dequant_8bit_8(q_0, q_1, dq, size_n);
for (int j = 0; j < 4; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 8; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_6 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 2; p++)
{
half2 dq[8];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
uint32_t q_1 = *b_ptr; b_ptr += size_n;
uint32_t q_2 = *b_ptr; b_ptr += size_n;
dequant_6bit_16(q_0, q_1, q_2, dq, size_n);
for (int j = 0; j < 8; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 16; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_5 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 1; p++)
{
half2 dq[16];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
uint32_t q_1 = *b_ptr; b_ptr += size_n;
uint32_t q_2 = *b_ptr; b_ptr += size_n;
uint32_t q_3 = *b_ptr; b_ptr += size_n;
uint32_t q_4 = *b_ptr; b_ptr += size_n;
dequant_5bit_32(q_0, q_1, q_2, q_3, q_4, dq, size_n);
for (int j = 0; j < 16; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 32; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_4 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 4; p++)
{
half2 dq[4];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
dequant_4bit_8(q_0, dq, size_n);
for (int j = 0; j < 4; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 8; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_3 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 1; p++)
{
half2 dq[16];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
uint32_t q_1 = *b_ptr; b_ptr += size_n;
uint32_t q_2 = *b_ptr; b_ptr += size_n;
dequant_3bit_32(q_0, q_1, q_2, dq, size_n);
for (int j = 0; j < 16; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 32; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 32;
}
while (k < rows_2 && k < end_k)
{
if (k == nextgroup) { group++; qs_h = dq_scale(b_q_scale_.item(group, n), b_q_scale_max[group]); nextgroup += b_q_group_map[k * 2 + 1]; qs_h2 = __halves2half2(qs_h, qs_h); }
for (int p = 0; p < 1; p++)
{
half2 dq[8];
uint32_t q_0 = *b_ptr; b_ptr += size_n;
dequant_2bit_16(q_0, dq, size_n);
for (int j = 0; j < 8; j++) dq[j] = __hmul2(dq[j], qs_h2);
half* dqh = (half*) dq;
for (int j = 0; j < 16; j++) b_.set(perm[lk++], n, dqh[j]);
}
k += 16;
}
}
void QMatrix::reconstruct(half* out)
{
dim3 blockDim, gridDim;
blockDim.x = BLOCK_KN_SIZE;
blockDim.y = 1;
gridDim.y = DIVIDE(height, BLOCK_KN_SIZE);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (!is_gptq)
{
gridDim.x = DIVIDE(width, BLOCK_KN_SIZE);
reconstruct_kernel<<<gridDim, blockDim, 0, stream>>>
(
cuda_q_weight,
cuda_q_perm,
cuda_q_scale,
cuda_q_scale_max,
cuda_q_group_map,
height,
width,
//groupsize,
groups,
out,
rows_8,
rows_6,
rows_5,
rows_4,
rows_3,
rows_2
);
}
else
{
gridDim.x = DIVIDE(width, BLOCK_KN_SIZE * 4);
reconstruct_gptq_kernel<<<gridDim, blockDim, 0, stream>>>
(
cuda_q_weight,
cuda_q_perm,
cuda_gptq_qzeros,
cuda_gptq_scales,
//const uint16_t* __restrict__ b_q_groups,
height,
width,
gptq_groupsize,
groups,
out,
rows_4
);
}
}
__global__ void make_sequential_kernel
(
const uint32_t* __restrict__ w,
uint32_t* __restrict__ w_new,
const uint16_t* __restrict__ q_perm,
const int w_height,
const int w_width
)
{
const uint64_t* w2 = (uint64_t*) w;
uint64_t* w_new2 = (uint64_t*) w_new;
int w2_stride = w_width >> 1;
int w2_column = THREADS_X * blockIdx.x + threadIdx.x;
if (w2_column >= w2_stride) return;
int w_new2_row = blockIdx.y;
int q_perm_idx = w_new2_row << 3;
uint64_t dst = 0;
#pragma unroll
for (int i = 0; i < 8; i++)
{
int source_row = q_perm[q_perm_idx++];
int w2_row = source_row >> 3;
int w2_subrow = source_row & 0x07;
int w2_row_shift = w2_subrow << 2;
int wnew2_row_shift = i << 2;
uint64_t src = w2[w2_row * w2_stride + w2_column];
src >>= w2_row_shift;
src &= 0x0000000f0000000f;
src <<= wnew2_row_shift;
dst |= src;
}
w_new2[w_new2_row * w2_stride + w2_column] = dst;
}
bool QMatrix::make_sequential(const uint32_t* cpu_g_idx)
{
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
uint32_t* cuda_new_qweight = NULL;
cudaError_t err = cudaMalloc(&cuda_new_qweight, height / 8 * width * sizeof(uint32_t));
if (err != cudaSuccess) {
cudaError_t cuda_status = cudaGetLastError(); // Clear error
return false;
}
uint32_t* cpu_g_idx_map = (uint32_t*) calloc(groups, sizeof(uint32_t));
uint32_t* cpu_x_map = (uint32_t*) malloc(height * sizeof(uint32_t));
uint32_t* cpu_x_map_inv = (uint32_t*) malloc(height * sizeof(uint32_t));
// Group histogram
for (int i = 0; i < height; i++) cpu_g_idx_map[cpu_g_idx[i]]++;
// Group map
for (int i = 0, acc = 0; i < groups; i++)
{
short tmp = cpu_g_idx_map[i];
cpu_g_idx_map[i] = acc;
acc += tmp;
}
// X map (inverse)
for (int row = 0; row < height; row++)
{
uint32_t target_group = cpu_g_idx[row];
uint32_t target_row = cpu_g_idx_map[target_group];
cpu_g_idx_map[target_group]++;
cpu_x_map_inv[row] = target_row;
}
// X map
for (int row = 0; row < height; row++) cpu_x_map[cpu_x_map_inv[row]] = row;
// Reduce to uint16_t
uint16_t* cpu_x_map16 = (uint16_t*)cpu_x_map;
uint16_t* cpu_x_map_inv16 = (uint16_t*)cpu_x_map_inv;
for (int row = 0; row < height; row++) cpu_x_map16[row] = (uint16_t) cpu_x_map[row];
for (int row = 0; row < height; row++) cpu_x_map_inv16[row] = (uint16_t) cpu_x_map_inv[row];
// Move to CUDA
cudaMemcpyAsync(cuda_q_perm, cpu_x_map16, height * sizeof(uint16_t), cudaMemcpyHostToDevice);
cudaMemcpyAsync(cuda_q_invperm, cpu_x_map_inv16, height * sizeof(uint16_t), cudaMemcpyHostToDevice);
// Rearrange rows in w
dim3 blockDim, gridDim;
blockDim.x = THREADS_X;
blockDim.y = 1;
gridDim.x = DIVIDE(width, THREADS_X);
gridDim.y = height / 8;
make_sequential_kernel<<<gridDim, blockDim, 0, stream>>>
(
cuda_q_weight,
cuda_new_qweight,
cuda_q_perm,
height / 8,
width
);
// Replace qweights
cudaMemcpyAsync(cuda_q_weight, cuda_new_qweight, height / 8 * width * sizeof(uint32_t), cudaMemcpyDeviceToDevice);
// Cleanup
cudaDeviceSynchronize();
cudaFree(cuda_new_qweight);
free(cpu_g_idx_map);
free(cpu_x_map);
free(cpu_x_map_inv);
return true;
}
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_matrix.cu",
"repo_id": "text-generation-inference",
"token_count": 10524
} |
# Origin: https://github.com/predibase/lorax
# Path: lorax/server/lorax_server/adapters/__init__.py
# License: Apache License Version 2.0, January 2004
from text_generation_server.adapters.weights import (
AdapterBatchData,
AdapterBatchMetadata,
)
__all__ = [
"AdapterBatchData",
"AdapterBatchMetadata",
]
| text-generation-inference/server/text_generation_server/adapters/__init__.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/adapters/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 125
} |
import torch
from typing import List
AWQ_PACK_ORDER = [0, 2, 4, 6, 1, 3, 5, 7]
REVERSE_AWQ_PACK_ORDER = [0, 4, 1, 5, 2, 6, 3, 7]
def pack(imatrix: torch.Tensor, direction: str = "column"):
"""
Packs a 4-bit integer matrix into a packed 32-bit integer matrix.
Args:
imatrix (torch.Tensor): matrix of integers
direction (str): direction of packing, either "column" or "row"
Returns:
qmatrix (torch.Tensor): packed matrix of integers
"""
shifts = torch.arange(0, 32, 4, dtype=torch.int32, device=imatrix.device)
imatrix = imatrix.to(torch.int8) & 0x0F # eventually correct overflow
if direction == "column":
imatrix = imatrix.view(-1, imatrix.shape[1] // (32 // 4), (32 // 4))
qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, None, :]).sum(dim=-1)
elif direction == "row":
imatrix = imatrix.view(imatrix.shape[0] // (32 // 4), (32 // 4), -1)
qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, :, None]).sum(dim=1)
qmatrix = qmatrix.to(torch.int32)
return qmatrix
def unpack(qmatrix: torch.Tensor, direction: str = "column"):
"""
Unpacks a 32-bit packed integer matrix into a 4-bit integer matrix.
Args:
qmatrix (torch.Tensor): matrix of packed integers
direction (str): direction of unpacking, either "column" or "row"
Returns:
imatrix (torch.Tensor): matrix of integers
"""
shifts = torch.arange(0, 32, 4, device=qmatrix.device)
if direction == "column":
imatrix = torch.bitwise_right_shift(
qmatrix[:, :, None], shifts[None, None, :]
).view(qmatrix.shape[0], -1)
elif direction == "row":
imatrix = torch.bitwise_right_shift(
qmatrix[:, None, :], shifts[None, :, None]
).view(-1, qmatrix.shape[-1])
imatrix = imatrix.to(torch.int8) & 0x0F # eventually correct overflow
return imatrix
def apply_order(
imatrix: torch.Tensor,
direction: str = "column",
order: List[int] = AWQ_PACK_ORDER,
):
"""
Applies the order to a 4-bit integer matrix.
Args:
imatrix (torch.Tensor): matrix of integers
direction (str): direction of applying order, either "column" or "row"
order (List[int]): order to apply, default is AWQ_PACK_ORDER
Returns:
imatrix (torch.Tensor): matrix of integers
"""
if direction == "column":
imatrix = imatrix.view(-1, (32 // 4))[:, order].view(imatrix.shape)
elif direction == "row":
imatrix = imatrix.view((32 // 4), -1)[order, :].view(imatrix.shape)
return imatrix
def fast_awq_to_gptq(qweight, qzeros):
# awq uses column packing for both weights and zeros
izeros = unpack(qzeros, direction="column")
iweights = unpack(qweight, direction="column")
# Reverse the order of the iweight and izeros tensors
izeros = apply_order(izeros, direction="column", order=REVERSE_AWQ_PACK_ORDER)
iweights = apply_order(iweights, direction="column", order=REVERSE_AWQ_PACK_ORDER)
# Subtract 1 from the izeros tensor (gptq adds 1 to the zeros)
izeros = izeros - 1
# exllama uses row packing for weights and column packing for zeros
qzeros = pack(izeros, direction="column")
qweight = pack(iweights, direction="row")
return qweight, qzeros
| text-generation-inference/server/text_generation_server/layers/awq/conversion_utils.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/awq/conversion_utils.py",
"repo_id": "text-generation-inference",
"token_count": 1384
} |
# https://github.com/fpgaminer/GPTQ-triton
"""
Mostly the same as the autotuner in Triton, but with a few changes like using 40 runs instead of 100.
"""
import builtins
import math
import time
from typing import Dict
import triton
class Autotuner(triton.KernelInterface):
def __init__(
self,
fn,
arg_names,
configs,
key,
reset_to_zero,
prune_configs_by: Dict = None,
nearest_power_of_two: bool = False,
):
"""
:param prune_configs_by: a dict of functions that are used to prune configs, fields:
'perf_model': performance model used to predicate running time with different configs, returns running time
'top_k': number of configs to bench
'prune_num_stages_by'(optional): a function used to prune num_stages. It take configs:List[Config] as its input, and returns pruned configs.
'nearest_power_of_two'(optional): whether to round key arguments to the nearest power of two when caching tuning results
"""
if not configs:
self.configs = [triton.Config({}, num_warps=4, num_stages=2)]
else:
self.configs = configs
self.key_idx = [arg_names.index(k) for k in key]
self.nearest_power_of_two = nearest_power_of_two
self.cache = {}
# hook to reset all required tensor to zeros before relaunching a kernel
self.hook = lambda args: 0
if reset_to_zero is not None:
self.reset_idx = [arg_names.index(k) for k in reset_to_zero]
def _hook(args):
for i in self.reset_idx:
args[i].zero_()
self.hook = _hook
self.arg_names = arg_names
# prune configs
if prune_configs_by:
perf_model, top_k = (
prune_configs_by["perf_model"],
prune_configs_by["top_k"],
)
if "early_config_prune" in prune_configs_by:
early_config_prune = prune_configs_by["early_config_prune"]
else:
perf_model, top_k, early_config_prune = None, None, None
self.perf_model, self.configs_top_k = perf_model, top_k
self.early_config_prune = early_config_prune
self.fn = fn
def _bench(self, *args, config, **meta):
# check for conflicts, i.e. meta-parameters both provided
# as kwargs and by the autotuner
conflicts = meta.keys() & config.kwargs.keys()
if conflicts:
raise ValueError(
f"Conflicting meta-parameters: {', '.join(conflicts)}."
" Make sure that you don't re-define auto-tuned symbols."
)
# augment meta-parameters with tunable ones
current = dict(meta, **config.kwargs)
def kernel_call():
if config.pre_hook:
config.pre_hook(self.nargs)
self.hook(args)
self.fn.run(
*args,
num_warps=config.num_warps,
num_stages=config.num_stages,
**current,
)
try:
# In testings using only 40 reps seems to be close enough and it appears to be what PyTorch uses
# PyTorch also sets fast_flush to True, but I didn't see any speedup so I'll leave the default
return triton.testing.do_bench(
kernel_call, quantiles=(0.5, 0.2, 0.8), rep=40
)
except triton.OutOfResources:
return [float("inf"), float("inf"), float("inf")]
def run(self, *args, **kwargs):
self.nargs = dict(zip(self.arg_names, args))
if len(self.configs) > 1:
key = tuple(args[i] for i in self.key_idx)
# This reduces the amount of autotuning by rounding the keys to the nearest power of two
# In my testing this gives decent results, and greatly reduces the amount of tuning required
if self.nearest_power_of_two:
key = tuple([2 ** int(math.log2(x) + 0.5) for x in key])
if key not in self.cache:
# prune configs
pruned_configs = self.prune_configs(kwargs)
bench_start = time.time()
timings = {
config: self._bench(*args, config=config, **kwargs)
for config in pruned_configs
}
bench_end = time.time()
self.bench_time = bench_end - bench_start
self.cache[key] = builtins.min(timings, key=timings.get)
self.hook(args)
self.configs_timings = timings
config = self.cache[key]
else:
config = self.configs[0]
self.best_config = config
if config.pre_hook is not None:
config.pre_hook(self.nargs)
return self.fn.run(
*args,
num_warps=config.num_warps,
num_stages=config.num_stages,
**kwargs,
**config.kwargs,
)
def prune_configs(self, kwargs):
pruned_configs = self.configs
if self.early_config_prune:
pruned_configs = self.early_config_prune(self.configs, self.nargs)
if self.perf_model:
top_k = self.configs_top_k
if isinstance(top_k, float) and top_k <= 1.0:
top_k = int(len(self.configs) * top_k)
if len(pruned_configs) > top_k:
est_timing = {
config: self.perf_model(
**self.nargs,
**kwargs,
**config.kwargs,
num_stages=config.num_stages,
num_warps=config.num_warps,
)
for config in pruned_configs
}
pruned_configs = sorted(est_timing.keys(), key=lambda x: est_timing[x])[
:top_k
]
return pruned_configs
def warmup(self, *args, **kwargs):
self.nargs = dict(zip(self.arg_names, args))
for config in self.prune_configs(kwargs):
self.fn.warmup(
*args,
num_warps=config.num_warps,
num_stages=config.num_stages,
**kwargs,
**config.kwargs,
)
self.nargs = None
def autotune(
configs, key, prune_configs_by=None, reset_to_zero=None, nearest_power_of_two=False
):
"""
Decorator for auto-tuning a :code:`triton.jit`'d function.
.. highlight:: python
.. code-block:: python
@triton.autotune(configs=[
triton.Config(meta={'BLOCK_SIZE': 128}, num_warps=4),
triton.Config(meta={'BLOCK_SIZE': 1024}, num_warps=8),
],
key=['x_size'] # the two above configs will be evaluated anytime
# the value of x_size changes
)
@triton.jit
def kernel(x_ptr, x_size, **META):
BLOCK_SIZE = META['BLOCK_SIZE']
:note: When all the configurations are evaluated, the kernel will run multiple time.
This means that whatever value the kernel updates will be updated multiple times.
To avoid this undesired behavior, you can use the `reset_to_zero` argument, which
reset the value of the provided tensor to `zero` before running any configuration.
:param configs: a list of :code:`triton.Config` objects
:type configs: list[triton.Config]
:param key: a list of argument names whose change in value will trigger the evaluation of all provided configs.
:type key: list[str]
:param prune_configs_by: a dict of functions that are used to prune configs, fields:
'perf_model': performance model used to predicate running time with different configs, returns running time
'top_k': number of configs to bench
'early_config_prune'(optional): a function used to do early prune (eg, num_stages). It take configs:List[Config] as its input, and returns pruned configs.
:param reset_to_zero: a list of argument names whose value will be reset to zero before evaluating any configs.
:type reset_to_zero: list[str]
"""
def decorator(fn):
return Autotuner(
fn,
fn.arg_names,
configs,
key,
reset_to_zero,
prune_configs_by,
nearest_power_of_two,
)
return decorator
def matmul248_kernel_config_pruner(configs, nargs):
"""
The main purpose of this function is to shrink BLOCK_SIZE_* when the corresponding dimension is smaller.
"""
m = max(2 ** int(math.ceil(math.log2(nargs["M"]))), 16)
n = max(2 ** int(math.ceil(math.log2(nargs["N"]))), 16)
k = max(2 ** int(math.ceil(math.log2(nargs["K"]))), 16)
used = set()
for config in configs:
block_size_m = min(m, config.kwargs["BLOCK_SIZE_M"])
block_size_n = min(n, config.kwargs["BLOCK_SIZE_N"])
block_size_k = min(k, config.kwargs["BLOCK_SIZE_K"])
group_size_m = config.kwargs["GROUP_SIZE_M"]
if (
block_size_m,
block_size_n,
block_size_k,
group_size_m,
config.num_stages,
config.num_warps,
) in used:
continue
used.add(
(
block_size_m,
block_size_n,
block_size_k,
group_size_m,
config.num_stages,
config.num_warps,
)
)
yield triton.Config(
{
"BLOCK_SIZE_M": block_size_m,
"BLOCK_SIZE_N": block_size_n,
"BLOCK_SIZE_K": block_size_k,
"GROUP_SIZE_M": group_size_m,
},
num_stages=config.num_stages,
num_warps=config.num_warps,
)
| text-generation-inference/server/text_generation_server/layers/gptq/custom_autotune.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/gptq/custom_autotune.py",
"repo_id": "text-generation-inference",
"token_count": 5117
} |
import torch
import math
from torch import nn
from torch.nn import functional as F
from typing import Optional, Tuple
from text_generation_server.layers import TensorParallelEmbedding, FastLinear
from text_generation_server.layers.tensor_parallel import TensorParallelHead
from text_generation_server.utils.speculate import get_speculate
class MLPSpeculatorLayerNorm(nn.Module):
"""
A L2 normalization implementation
...
Args
----
normalized_shape : int
Dimensionality of input data (size of final tensor axis)
elementwise_scale_weight : torch.Tensor
learned scaling term after normalization?
elementwise_shift_bias : torch.Tensor
learned bias term after normalization?
eps : float
Safety term to prevent division by zero. Make sure the chosen value fits in the range of your encoding scheme (i.e. fp16 requires eps >= 6e-8).
"""
def __init__(
self,
prefix,
config,
weights,
eps=1e-06,
):
super(MLPSpeculatorLayerNorm, self).__init__()
self.weight = weights.get_tensor(f"{prefix}.weight")
self.bias = weights.get_tensor(f"{prefix}.bias")
self.eps = eps
def forward(self, x):
xf = x
xf = xf * torch.rsqrt(xf.pow(2).mean(-1, keepdim=True) + self.eps)
x = xf.type_as(x)
x = self.weight * x
x = x + self.bias
return x
INV_SQRT2 = 2**-0.5
def simple_norm(x: torch.Tensor, eps=1e-06):
xf = x
xf = xf * torch.rsqrt(xf.pow(2).mean(-1, keepdim=True) + eps)
x = xf.type_as(x)
return x * INV_SQRT2
class MLPSpeculatorModelTied(torch.nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.config = config
self.n_predict = get_speculate()
self.hidden_size = config.hidden_size
self.emb = TensorParallelEmbedding(f"{prefix}.emb.0", weights)
self.proj0 = FastLinear.load(
config,
prefix=f"{prefix}.proj.0",
weights=weights,
bias=False,
)
self.proj1 = FastLinear.load(
config,
prefix=f"{prefix}.proj.1",
weights=weights,
bias=False,
)
self.head = FastLinear.load(config, f"{prefix}.head.0", weights, bias=False)
self.ln = MLPSpeculatorLayerNorm(
prefix=f"{prefix}.ln.0",
config=config,
weights=weights,
)
# Weights ensure that state_0 accounts for 50% of state magnitude by final head in expectation
self.state_weight = 0.5 ** (0.5 / self.n_predict) if self.n_predict > 0 else 1
self.activation = nn.GELU()
self.vsize = config.vocab_size
self.inner_dim = config.speculator_config["inner_dim"]
self.top_k_tokens_per_head = [1] * self.n_predict
self.emb_weight = math.sqrt(1 - self.state_weight**2) * math.sqrt(
self.inner_dim / 2
)
self.emb.weight *= self.emb_weight
def forward(
self,
hidden_states: torch.Tensor,
input_ids: torch.Tensor,
):
top_k_tokens_per_head = self.top_k_tokens_per_head
# k indicates # of candidates
# h indicates # of generated tokens
state = hidden_states
b = state.size(0)
ind = input_ids.unsqueeze(0)
all_probs = torch.empty(
b, self.n_predict, self.vsize, device=state.device
) # b k h v
assert (
len(top_k_tokens_per_head) == self.n_predict
), f"You must provide a topk number for each head ({self.n_predict} heads, {len(top_k_tokens_per_head)} provided)"
for i in range(self.n_predict):
# Project and predict
z = self.emb(ind)
# z = z.mul(self.emb_weight) # b k d
if i == 0:
state = self.proj0(state) * self.state_weight + z
else:
state = self.proj1(state) * self.state_weight + z
state = self.activation(self.ln(state)) # b k d
probs = F.log_softmax(self.head(state), dim=-1) # b k v
_probs, preds = probs.topk(top_k_tokens_per_head[i], dim=-1) # b k k'
# Update candidate set with new predictions
# Update distribution set with new logits
all_probs[:, i] = probs.exp()
# Update state, log_probs and ind for new predictions
state = state.unsqueeze(2).expand(
-1, -1, top_k_tokens_per_head[i], -1
) # b k k' d
state = state.reshape(-1, b, state.size(3)) # b kk' d
ind = preds.view(-1, b) # b kk'
speculative_logits = all_probs
return speculative_logits
class MLPSpeculatorModel(torch.nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.config = config
self.n_predict = get_speculate()
self.hidden_size = config.hidden_size
self.emb = nn.ModuleList(
[
TensorParallelEmbedding(f"{prefix}.emb.{i}", weights)
for i in range(self.n_predict)
]
)
self.proj = [
FastLinear.load(
config,
prefix=f"{prefix}.proj.{i}",
weights=weights,
bias=False,
)
for i in range(self.n_predict)
]
self.head = nn.ModuleList(
[
FastLinear.load(config, f"{prefix}.head.{i}", weights, bias=False)
for i in range(self.n_predict)
]
)
self.ln = nn.ModuleList(
[
MLPSpeculatorLayerNorm(
prefix=f"{prefix}.ln.{i}",
config=config,
weights=weights,
)
for i in range(self.n_predict)
]
)
# Weights ensure that state_0 accounts for 50% of state magnitude by final head in expectation
self.state_weight = 0.5 ** (0.5 / self.n_predict) if self.n_predict > 0 else 1
self.activation = nn.GELU()
self.vsize = config.vocab_size
self.inner_dim = config.speculator_config["inner_dim"]
self.top_k_tokens_per_head = [1] * self.n_predict
self.emb_weight = math.sqrt(1 - self.state_weight**2) * math.sqrt(
self.inner_dim / 2
)
self.emb.weight *= self.emb_weight
def forward(
self,
hidden_states: torch.Tensor,
input_ids: torch.Tensor,
):
top_k_tokens_per_head = self.top_k_tokens_per_head
# k indicates # of candidates
# h indicates # of generated tokens
state = hidden_states
b = state.size(0)
ind = input_ids.unsqueeze(0)
all_probs = torch.empty(
b, self.n_predict, self.vsize, device=state.device
) # b k h v
assert (
len(top_k_tokens_per_head) == self.n_predict
), f"You must provide a topk number for each head ({self.n_predict} heads, {len(top_k_tokens_per_head)} provided)"
for i in range(self.n_predict):
# Project and predict
z = self.emb[i](ind)
# z = z.mul(self.emb_weight) # b k d
state = self.proj[i](state) * self.state_weight + z
state = self.activation(self.ln[i](state)) # b k d
probs = F.log_softmax(self.head[i](state), dim=-1) # b k v
_probs, preds = probs.topk(top_k_tokens_per_head[i], dim=-1) # b k k'
# Update candidate set with new predictions
# Update distribution set with new logits
all_probs[:, i] = probs.exp()
# Update state, log_probs and ind for new predictions
state = state.unsqueeze(2).expand(
-1, -1, top_k_tokens_per_head[i], -1
) # b k k' d
state = state.reshape(-1, b, state.size(3)) # b kk' d
ind = preds.view(-1, b) # b kk'
speculative_logits = all_probs
return speculative_logits
class MLPSpeculatorHead(nn.Module):
def __init__(self, lm_head, mlp_speculator, scale_input: bool):
super().__init__()
self.lm_head = lm_head
self.mlp_speculator = mlp_speculator
self.scale_input = scale_input
def forward(
self, input: torch.Tensor
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
logits = self.lm_head(input)
# If we have too many tokens, we skip speculative logits
if input.shape[0] > 128:
return logits, None
input_ids = logits.argmax(dim=-1)
if self.scale_input:
input = simple_norm(input)
speculative_logits = self.mlp_speculator(input, input_ids)
return logits, speculative_logits
@staticmethod
def load(config, prefix: str, weights):
from pathlib import Path
from safetensors import safe_open
speculator_path = config.speculator["path"]
for fname in config.speculator["model_paths"]:
filename = str(Path(speculator_path) / fname)
routing = weights.routing
with safe_open(filename, framework="pytorch") as f:
for k in f.keys():
if k in routing and routing[k] != filename:
raise RuntimeError(
f"Key {k} was found in multiple files: {filename} and {routing[k]}"
)
routing[k] = filename
tie_weights = config.speculator_config.get("tie_weights", False)
if tie_weights:
mlp_speculator = MLPSpeculatorModelTied(config, "speculator", weights)
else:
mlp_speculator = MLPSpeculatorModel(config, "speculator", weights)
# This is used in https://huggingface.co/ibm-fms/llama3-70b-accelerator
scale_input = config.speculator_config.get("scale_input", False)
lm_head = TensorParallelHead.load(config, prefix, weights)
return MLPSpeculatorHead(lm_head, mlp_speculator, scale_input)
| text-generation-inference/server/text_generation_server/layers/mlp.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/mlp.py",
"repo_id": "text-generation-inference",
"token_count": 5007
} |
# coding=utf-8
# Copyright 2022 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple, Any
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.utils.import_utils import SYSTEM
if SYSTEM == "ipex":
from intel_extension_for_pytorch.llm.modules import GatedMLPMOE
else:
from moe_kernels.fused_moe import fused_moe
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers import (
FastLinear,
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.rotary import (
PositionRotaryEmbedding,
)
from text_generation_server.layers.layernorm import (
FastLayerNorm,
)
class DbrxAttentionConfig(PretrainedConfig):
def __init__(
self,
attn_pdrop: float = 0,
clip_qkv: Optional[float] = None,
kv_n_heads: int = 1,
rope_theta: float = 10000.0,
**kwargs: Any,
):
super().__init__(**kwargs)
self.attn_pdrop = attn_pdrop
self.clip_qkv = clip_qkv
self.kv_n_heads = kv_n_heads
self.rope_theta = rope_theta
for k in ["model_type"]:
if k in kwargs:
kwargs.pop(k)
if len(kwargs) != 0:
raise ValueError(f"Found unknown {kwargs=}")
class DbrxFFNConfig(PretrainedConfig):
def __init__(
self,
ffn_act_fn: Optional[dict] = None,
ffn_hidden_size: int = 3584,
moe_num_experts: int = 4,
moe_top_k: int = 1,
moe_jitter_eps: Optional[float] = None,
moe_loss_weight: float = 0.01,
moe_normalize_expert_weights: Optional[float] = 1,
uniform_expert_assignment: bool = False,
**kwargs: Any,
):
super().__init__()
if ffn_act_fn is None:
ffn_act_fn = {"name": "silu"}
self.ffn_act_fn = ffn_act_fn
self.ffn_hidden_size = ffn_hidden_size
self.moe_num_experts = moe_num_experts
self.moe_top_k = moe_top_k
self.moe_jitter_eps = moe_jitter_eps
self.moe_loss_weight = moe_loss_weight
self.moe_normalize_expert_weights = moe_normalize_expert_weights
self.uniform_expert_assignment = uniform_expert_assignment
if uniform_expert_assignment:
raise ValueError("`uniform_expert_assignment = True` is not supported")
for k in ["model_type"]:
if k in kwargs:
kwargs.pop(k)
if len(kwargs) != 0:
raise ValueError(f"Found unknown {kwargs=}")
class DbrxConfig(PretrainedConfig):
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(
self,
d_model: int = 2048,
n_heads: int = 16,
n_layers: int = 24,
max_seq_len: int = 2048,
vocab_size: int = 32000,
resid_pdrop: float = 0.0,
emb_pdrop: float = 0.0,
attn_config: Optional[DbrxAttentionConfig] = None,
ffn_config: Optional[DbrxFFNConfig] = None,
use_cache: bool = True,
initializer_range: float = 0.02,
output_router_logits: bool = False,
router_aux_loss_coef: float = 0.05,
**kwargs: Any,
):
if attn_config is None:
self.attn_config = DbrxAttentionConfig()
elif isinstance(attn_config, dict):
self.attn_config = DbrxAttentionConfig(**attn_config)
else:
self.attn_config = attn_config
if ffn_config is None:
self.ffn_config = DbrxFFNConfig()
elif isinstance(ffn_config, dict):
self.ffn_config = DbrxFFNConfig(**ffn_config)
else:
self.ffn_config = ffn_config
self.d_model = d_model
self.n_heads = n_heads
self.n_layers = n_layers
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.resid_pdrop = resid_pdrop
self.emb_pdrop = emb_pdrop
self.use_cache = use_cache
self.initializer_range = initializer_range
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
tie_word_embeddings = kwargs.pop("tie_word_embeddings", False)
if tie_word_embeddings:
raise ValueError("tie_word_embeddings is not supported for Dbrx models.")
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
@property
def num_key_value_heads(self):
# We can't use the attribute map, since this the number of KV
# heads is not top-level.
return self.attn_config.kv_n_heads
def promote_scalar(x: torch.Tensor) -> torch.Tensor:
return x.view(1) if len(x.size()) == 0 else x
def load_attention(config, prefix, weights):
return TensorParallelColumnLinear.load_qkv(
config,
prefix=f"{prefix}.Wqkv",
weights=weights,
bias=False,
num_heads=config.n_heads,
num_key_value_heads=config.attn_config.kv_n_heads,
)
def _load_experts(config, prefix, weights):
world_size = weights.process_group.size()
rank = weights.process_group.rank()
assert (
config.ffn_config.ffn_hidden_size % world_size == 0
), f"The chosen size {config.ffn_config.ffn_hidden_size} is not compatible with sharding on {world_size} shards"
expert_size = config.ffn_config.ffn_hidden_size
block_size = expert_size // world_size
start = rank * block_size
stop = (rank + 1) * block_size
tensor = torch.empty(
(config.ffn_config.moe_num_experts * block_size, config.d_model),
dtype=weights.dtype,
device=weights.device,
)
slice_ = weights._get_slice(f"{prefix}")
for i in range(config.ffn_config.moe_num_experts):
offset = i * expert_size
expert_slice = slice_[start + offset : stop + offset]
tensor[i * block_size : (i + 1) * block_size] = expert_slice.to(
dtype=weights.dtype
).to(device=weights.device)
return tensor
def _load_experts_quantized(config, prefix, weights, cls):
world_size = weights.process_group.size()
rank = weights.process_group.rank()
assert (
config.ffn_config.ffn_hidden_size % world_size == 0
), f"The chosen size {config.ffn_config.ffn_hidden_size} is not compatible with sharding on {world_size} shards"
expert_size = config.ffn_config.ffn_hidden_size
block_size = expert_size // world_size
start = rank * block_size
stop = (rank + 1) * block_size
slice_ = weights._get_slice(f"{prefix}")
experts = []
for i in range(config.ffn_config.moe_num_experts):
if config.quantize in ["gptq", "awq"]:
raise NotImplementedError(
"Dbrx does not support gptq/awq quantization yet."
)
else:
offset = i * expert_size
expert_slice = (
slice_[start + offset : stop + offset]
.to(dtype=weights.dtype)
.to(device=weights.device)
)
if cls == TensorParallelRowLinear:
expert_slice = expert_slice.t().contiguous()
linear = get_linear(expert_slice, None)
experts.append(cls(linear, weights.process_group))
else:
linear = get_linear(expert_slice, None)
experts.append(cls(linear))
return experts
class DbrxAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.clip_qkv = config.attn_config.clip_qkv
self.num_heads = config.n_heads
self.hidden_size = config.d_model
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.attn_config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.attn_config.kv_n_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.out_proj",
weights=weights,
bias=False,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
qkv = self.query_key_value(hidden_states)
if self.clip_qkv is not None:
qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
kv_cache.store(
key=kv[:, 0],
value=kv[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=kv[:, 0],
value=kv[:, 1],
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
class DbrxNormAttentionNorm(nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.norm_1 = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.norm_1", weights=weights, eps=1e-5
)
self.self_attn = DbrxAttention(
prefix=f"{prefix}.attn", config=config, weights=weights
)
self.norm_2 = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.norm_2",
weights=weights,
eps=1e-5,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
normed_hidden_states, res = self.norm_1(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
# faster post attention rms norm
normed_attn_res_output, attn_res = self.norm_2(attn_output, res)
return normed_attn_res_output, attn_res
@torch.jit.script
def select_experts(
gate_logits: torch.Tensor, top_k: int, moe_normalize_expert_weights: int
):
# all_probs: (sequence_length, n_experts) and upcast for softmax
all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float)
# weights, selected_experts: (sequence_length, top-k)
weights, selected_experts = torch.topk(all_probs, top_k, dim=-1)
if moe_normalize_expert_weights:
weights = weights / torch.norm(
weights, p=moe_normalize_expert_weights, dim=-1, keepdim=True
)
weights = weights.view(-1)
selected_experts = selected_experts.view(-1)
return selected_experts, weights
@torch.jit.script
def round_up(x: torch.Tensor, value: int):
return torch.div(x + (value - 1), value, rounding_mode="trunc") * value
class BlockSparseMoE(nn.Module):
def __init__(self, prefix, config: DbrxConfig, weights):
super().__init__()
self.moe_normalize_expert_weights = (
config.ffn_config.moe_normalize_expert_weights
)
self.hidden_dim = config.d_model
self.ffn_dim = config.ffn_config.ffn_hidden_size // weights.process_group.size()
self.num_experts = config.ffn_config.moe_num_experts
self.top_k = config.ffn_config.moe_top_k
act = config.ffn_config.ffn_act_fn["name"]
if "gelu" in act:
self.act = lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
elif "silu" in act:
self.act = torch.nn.functional.silu
else:
self.act = ACT2FN[act]
# gating
self.gate = FastLinear.load(
config, f"{prefix}.router.layer", weights, bias=False
)
# merged expert weights, all of size (n_experts * ffn_dim, hidden_dim)
w1 = _load_experts(config, f"{prefix}.experts.mlp.w1", weights).view(
self.num_experts, self.ffn_dim, self.hidden_dim
)
v1 = _load_experts(config, f"{prefix}.experts.mlp.v1", weights).view(
self.num_experts, self.ffn_dim, self.hidden_dim
)
self.wv1 = torch.cat([w1, v1], dim=1)
self.w2 = (
_load_experts(config, f"{prefix}.experts.mlp.w2", weights)
.view(self.num_experts, self.ffn_dim, self.hidden_dim)
.transpose(1, 2)
.contiguous()
)
self.process_group = weights.process_group
if SYSTEM == "ipex":
self.ipex_fused_moe = GatedMLPMOE(
W13=self.wv1, W2=self.w2, use_prepack=True
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# router_logits: (num_tokens, n_experts)
router_logits = self.gate(x)
if SYSTEM == "ipex":
out = self.ipex_fused_moe(
hidden_states=x,
router_logits=router_logits,
top_k=self.top_k,
renormalize=self.moe_normalize_expert_weights,
use_grouped_topk=False,
num_expert_group=None,
topk_group=None,
)
else:
out = fused_moe(
x,
self.wv1,
self.w2,
router_logits,
self.top_k,
renormalize=self.moe_normalize_expert_weights,
inplace=True,
)
# Reduce sum
if self.process_group.size() > 1:
torch.distributed.all_reduce(out, group=self.process_group)
return out.view(*x.shape)
class DenseMoE(nn.Module):
def __init__(self, prefix, config: DbrxConfig, weights):
super().__init__()
self.moe_normalize_expert_weights = (
config.ffn_config.moe_normalize_expert_weights
)
self.hidden_dim = config.d_model
self.ffn_dim = config.ffn_config.ffn_hidden_size // weights.process_group.size()
self.num_experts = config.ffn_config.moe_num_experts
self.top_k = config.ffn_config.moe_top_k
act = config.ffn_config.ffn_act_fn["name"]
if "gelu" in act:
self.act = lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
elif "silu" in act:
self.act = torch.nn.functional.silu
else:
self.act = ACT2FN[act]
# gating
self.gate = FastLinear.load(
config, f"{prefix}.router.layer", weights, bias=False
)
self.w1 = _load_experts_quantized(
config,
prefix=f"{prefix}.experts.mlp.w1",
weights=weights,
cls=TensorParallelColumnLinear,
)
self.w2 = _load_experts_quantized(
config,
prefix=f"{prefix}.experts.mlp.w2",
weights=weights,
cls=TensorParallelRowLinear,
)
self.v1 = _load_experts_quantized(
config,
prefix=f"{prefix}.experts.mlp.v1",
weights=weights,
cls=TensorParallelColumnLinear,
)
self.process_group = weights.process_group
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
x: (sequence_length, model_dim)
gate_logits: (sequence_length, n_experts)
"""
# optional reshape
input_shape = x.shape
x = x.view(-1, input_shape[-1])
# gate_logits: (sequence_length, n_experts)
gate_logits = self.gate(x)
# all_probs: (sequence_length, n_experts) and upcast for softmax
weights = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float)
if self.top_k < self.num_experts:
_, not_selected_experts = torch.topk(
weights,
self.num_experts - self.top_k,
largest=False,
sorted=False,
dim=1,
)
# Mask not selected experts
weights.scatter_(1, not_selected_experts, 0)
# Re-normalize
if self.moe_normalize_expert_weights:
weights = weights / torch.norm(
weights, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True
)
weights = weights.to(x.dtype)
# Final output tensor
out = x.new_zeros(x.shape[0], self.hidden_dim)
for i in range(self.num_experts):
h = self.act(self.w1[i](x)) * self.v1[i](x)
h = self.w2[i](h, reduce=False)
# Add expert output to out with masking
out += h * weights[:, i].view(-1, 1)
# Reduce sum
if self.process_group.size() > 1:
torch.distributed.all_reduce(out, group=self.process_group)
return out
class DbrxLayer(nn.Module):
def __init__(self, prefix: str, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.blocks.{layer_id}"
self.attn = DbrxNormAttentionNorm(
prefix=f"{prefix}.norm_attn_norm", config=config, weights=weights
)
moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE
self.moe = moe_cls(f"{prefix}.ffn", config, weights)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
# Self Attention
attn_output, attn_res = self.attn(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
moe_output = self.moe(attn_output)
return moe_output, attn_res
class DbrxModel(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.wte", weights=weights
)
self.layers = nn.ModuleList(
[
DbrxLayer(
prefix,
layer_id,
config,
weights,
)
for layer_id in range(config.n_layers)
]
)
self.norm = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.norm_f", weights=weights, eps=1e-5
)
self.head_size = self.layers[0].attn.self_attn.head_size
self.num_heads = self.layers[0].attn.self_attn.num_heads
self.num_key_value_heads = self.layers[0].attn.self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].attn.self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashDbrxForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
if not prefix:
prefix = "transformer"
else:
prefix = f"{prefix}.transformer"
self.model = DbrxModel(prefix, config, weights)
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head",
weights=weights,
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 12466
} |
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
SpeculativeHead,
TensorParallelEmbedding,
get_linear,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.layers.gptq import GPTQWeightsLoader
from text_generation_server.layers.layernorm import (
FastLayerNorm,
)
def load_multi_mqa(
config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size
):
if config.quantize == "gptq":
return _load_multi_mqa_gptq(
config, prefix, weights, bias, head_size, num_heads, hidden_size
)
elif config.quantize == "marlin":
raise RuntimeError(
"santacoder models with marlin quantization are not yet supported"
)
else:
return _load_multi_mqa(
config, prefix, weights, bias, head_size, num_heads, hidden_size
)
def _load_multi_mqa_gptq(
config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size
):
from text_generation_server.layers.gptq import GPTQWeight
if any("c_attn" in k for k in weights.routing.keys()) and not config.transpose:
world_size = weights.process_group.size()
rank = weights.process_group.rank()
slice_ = weights._get_slice(f"{prefix}.c_attn.qweight")
shape = slice_.get_shape()
block_size = (shape[1] - 2 * head_size) // world_size
start = rank * block_size
stop = (rank + 1) * block_size
assert (shape[1] - 2 * head_size) % world_size == 0
q_tensor = slice_[:, start:stop]
kv_tensor = slice_[:, -2 * head_size :]
qweight = torch.cat([q_tensor, kv_tensor], dim=1)
qweight = qweight.to(device=weights.device)
slice_ = weights._get_slice(f"{prefix}.c_attn.scales")
shape = slice_.get_shape()
block_size = (shape[1] - 2 * head_size) // world_size
start = rank * block_size
stop = (rank + 1) * block_size
assert (shape[1] - 2 * head_size) % world_size == 0
q_tensor = slice_[:, start:stop]
kv_tensor = slice_[:, -2 * head_size :]
scales = torch.cat([q_tensor, kv_tensor], dim=1)
scales = scales.to(device=weights.device)
slice_ = weights._get_slice(f"{prefix}.c_attn.qzeros")
shape = slice_.get_shape()
block_size = (shape[1] - (2 * head_size) * 4 // 32) // world_size
start = rank * block_size
stop = (rank + 1) * block_size
assert 2 * head_size % (32 // 4) == 0
q_tensor = slice_[:, start:stop]
kv_tensor = slice_[:, -2 * head_size * 4 // 32 :]
qzeros = torch.cat([q_tensor, kv_tensor], dim=1)
qzeros = qzeros.to(device=weights.device)
loader = weights.weights_loader
assert isinstance(loader, GPTQWeightsLoader)
loader._get_gptq_params(weights)
if loader.quant_method == "gptq":
g_idx = weights.get_tensor(f"{prefix}.c_attn.g_idx")
g_idx = g_idx.to(device=weights.device)
elif loader.quant_method == "awq":
g_idx = None
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
from text_generation_server.layers.gptq import HAS_EXLLAMA
weight = GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=loader.bits,
groupsize=loader.groupsize,
use_awq_kernel=loader.quantize == "awq",
use_exllama=HAS_EXLLAMA,
)
if bias:
slice_ = weights._get_slice(f"{prefix}.c_attn.bias")
shape = slice_.get_shape()
block_size = (shape[0] - 2 * head_size) // world_size
assert (shape[0] - 2 * head_size) % world_size == 0
q_tensor = slice_[start:stop]
start = rank * block_size
stop = (rank + 1) * block_size
q_tensor = slice_[start:stop]
kv_tensor = slice_[-2 * head_size :]
bias = torch.cat([q_tensor, kv_tensor], dim=0)
bias = bias.to(device=weights.device)
return TensorParallelColumnLinear(get_linear(weight, bias))
else:
raise NotImplementedError("Gptq loading with santacoder is not implemented")
def _load_multi_mqa(
config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size
):
if any("c_attn" in k for k in weights.routing.keys()):
slice_ = weights._get_slice(f"{prefix}.c_attn.weight")
shape = slice_.get_shape()
world_size = weights.process_group.size()
rank = weights.process_group.rank()
if config.transpose:
block_size = (shape[1] - 2 * head_size) // world_size
start = rank * block_size
stop = (rank + 1) * block_size
assert (shape[1] - 2 * head_size) % world_size == 0
q_tensor = slice_[:, start:stop]
kv_tensor = slice_[:, -2 * head_size :]
weight = torch.cat([q_tensor, kv_tensor], dim=1).T
else:
block_size = (shape[0] - 2 * head_size) // world_size
start = rank * block_size
stop = (rank + 1) * block_size
assert (shape[0] - 2 * head_size) % world_size == 0
q_tensor = slice_[start:stop]
kv_tensor = slice_[-2 * head_size :]
weight = torch.cat([q_tensor, kv_tensor], dim=0)
if bias:
slice_ = weights._get_slice(f"{prefix}.c_attn.bias")
shape = slice_.get_shape()
block_size = (shape[0] - 2 * head_size) // world_size
assert (shape[0] - 2 * head_size) % world_size == 0
start = rank * block_size
stop = (rank + 1) * block_size
q_tensor = slice_[start:stop]
kv_tensor = slice_[-2 * head_size :]
bias = torch.cat([q_tensor, kv_tensor], dim=0)
else:
if config.transpose:
w = [
weights.get_sharded(f"{prefix}.q_attn.weight", dim=1).T,
weights.get_tensor(f"{prefix}.kv_attn.weight").T,
]
weight = torch.cat(w, dim=0)
else:
w = [
weights.get_sharded(f"{prefix}.q_attn.weight", dim=0),
weights.get_tensor(f"{prefix}.kv_attn.weight"),
]
weight = torch.cat(w, dim=1)
if bias:
b = [
weights.get_sharded(f"{prefix}.q_attn.bias", dim=0),
weights.get_tensor(f"{prefix}.kv_attn.bias"),
]
bias = torch.cat(b, dim=0)
else:
bias = None
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
assert list(weight.shape) == [
(num_heads + 2) * head_size,
hidden_size,
], f"{weight.shape} != {[(num_heads + 2) * head_size, hidden_size]}"
if bias is not None:
bias = bias.to(dtype=weights.dtype).to(device=weights.device)
assert list(bias.shape) == [
(num_heads + 2) * head_size
], f"{weight.shape} != {[(num_heads + 2) * head_size]}"
return TensorParallelColumnLinear(get_linear(weight, bias))
def load_col(config, prefix: str, weights, bias: bool):
if config.transpose:
weight = weights.get_sharded(f"{prefix}.weight", dim=1).T
else:
weight = weights.get_multi_weights_col([prefix], dim=0)
if bias:
bias = weights.get_sharded(f"{prefix}.bias", dim=0)
else:
bias = None
return TensorParallelColumnLinear(get_linear(weight, bias))
def load_row(config, prefix: str, weights, bias: bool):
if config.transpose:
weight = weights.get_sharded(f"{prefix}.weight", dim=0).T
else:
weight = weights.get_weights_row(prefix)
if bias and weights.process_group.rank() == 0:
# Rank is only on the first rank process
bias = weights.get_tensor(f"{prefix}.bias")
else:
bias = None
return TensorParallelRowLinear(
get_linear(weight, bias), process_group=weights.process_group
)
class FlashMQAttention(torch.nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
num_heads = config.num_attention_heads
hidden_size = config.hidden_size
self.num_heads = num_heads
self.hidden_size = hidden_size
self.head_size = hidden_size // num_heads
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.softmax_scale = self.head_size ** (-0.5)
self.c_attn = load_multi_mqa(
config,
prefix=prefix,
weights=weights,
bias=True,
head_size=self.head_size,
hidden_size=hidden_size,
num_heads=self.num_heads,
)
self.c_proj = load_row(
config, prefix=f"{prefix}.c_proj", weights=weights, bias=True
)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.kv_head_mapping = torch.zeros(
self.num_heads, dtype=torch.int32, device=weights.device
)
def forward(
self,
hidden_states,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
qkv = self.c_attn(hidden_states)
# Split query from key_value
query, key_value = qkv.split(
[self.head_size * self.num_heads, 2 * self.head_size], dim=1
)
# Prepare query and key_value for indexing
query = query.view(-1, self.num_heads, self.head_size)
key_value = key_value.view(-1, 2, 1, self.head_size)
kv_cache.store(
key=key_value[:, 0],
value=key_value[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=key_value[:, 0],
value=key_value[:, 1],
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.c_proj(attn_output.view(-1, self.num_heads * self.head_size))
class MLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.activation_function
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
self.c_fc = load_col(
config, prefix=f"{prefix}.c_fc", weights=weights, bias=True
)
self.c_proj = load_row(
config, prefix=f"{prefix}.c_proj", weights=weights, bias=True
)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
return hidden_states
class Block(nn.Module):
def __init__(self, prefix: str, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.h.{layer_id}"
self.ln_1 = FastLayerNorm.load(
prefix=f"{prefix}.ln_1", weights=weights, eps=config.layer_norm_epsilon
)
self.ln_2 = FastLayerNorm.load(
prefix=f"{prefix}.ln_2", weights=weights, eps=config.layer_norm_epsilon
)
self.self_attn = FlashMQAttention(
prefix=f"{prefix}.attn",
config=config,
weights=weights,
)
self.mlp = MLP(
prefix=f"{prefix}.mlp",
config=config,
weights=weights,
)
def forward(
self,
hidden_states,
residual,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
hidden_states, residual = self.ln_1(hidden_states, residual)
hidden_states = self.self_attn(
hidden_states,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, residual = self.ln_2(hidden_states, residual)
mlp_output = self.mlp(hidden_states)
return mlp_output, residual
class FlashSantacoderModel(nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.config = config
self.process_group = weights.process_group
self.wte = TensorParallelEmbedding(
prefix=f"{prefix}.wte",
weights=weights,
reduce=False,
)
self.wpe = TensorParallelEmbedding(
prefix=f"{prefix}.wpe",
weights=weights,
reduce=False,
)
self.layers = nn.ModuleList(
[
Block(
prefix,
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.ln_f = FastLayerNorm.load(
prefix="transformer.ln_f", weights=weights, eps=config.layer_norm_epsilon
)
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
) -> torch.Tensor:
hidden_states = self.wte(input_ids) + self.wpe(position_ids)
if self.process_group.size() > 1:
torch.distributed.all_reduce(hidden_states, group=self.process_group)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.ln_f(hidden_states, residual)
return hidden_states
class FlashSantacoderForCausalLM(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
if not prefix:
prefix = "transformer"
else:
prefix = f"{prefix}.transformer"
config.transpose = config.architectures[0].startswith("GPT2")
self.model = FlashSantacoderModel(prefix, config, weights)
self.lm_head = SpeculativeHead.load(
config, prefix=f"{prefix}.wte", weights=weights
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.lm_head(hidden_states)
return logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 8648
} |
# imlementation of the PhiModel and PhiForCausalLM classes
import torch
import torch.distributed
import math
from torch import nn
from typing import Optional, List, Tuple
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_outputs import CausalLMOutputWithPast
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
FastLinear,
)
# PhiConfig is the configuration class for the PhiModel.
class PhiConfig(PretrainedConfig):
def __init__(
self,
vocab_size=51200,
n_positions=2048,
n_embd=2560,
n_layer=32,
n_inner=None,
n_head=32,
rotary_dim=32,
layer_norm_epsilon=1e-5,
tie_word_embeddings=False,
pad_vocab_size_multiple=64,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
no_bias=False,
**kwargs,
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_inner = n_inner
self.n_head = n_head
self.rotary_dim = rotary_dim
self.layer_norm_epsilon = layer_norm_epsilon
self.tie_word_embeddings = tie_word_embeddings
self.pad_vocab_size_multiple = pad_vocab_size_multiple
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.no_bias = no_bias
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# RotaryEmbedding is a class that implements the rotary embedding.
class RotaryEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
inv_freq = [1.0 / 10000.0 ** (i / dim) for i in range(0, dim, 2)]
inv_freq_len = len(inv_freq)
inv_freq = torch.tensor(inv_freq).view(1, inv_freq_len)
t = torch.arange(0, max_seq_len, dtype=torch.float).view(max_seq_len, 1)
freqs = t.matmul(inv_freq)
self.sin = freqs.sin()
self.cos = freqs.cos()
def apply_rotary_emb_qkv(self, qkv, seqlen_offset):
b_size, seqlen, three, _, _headdim = qkv.shape
if three != 3:
raise Exception("unexpected shape for qkv")
_, rotary_dim = self.cos.shape
rotary_dim = rotary_dim * 2
q_rot = qkv[:, :, 0, :, :rotary_dim]
q_pass = qkv[:, :, 0, :, rotary_dim:]
k_rot = qkv[:, :, 1, :, :rotary_dim]
k_pass = qkv[:, :, 1, :, rotary_dim:]
q12 = torch.chunk(q_rot, 2, dim=-1)
k12 = torch.chunk(k_rot, 2, dim=-1)
q1, q2 = q12[0], q12[1]
k1, k2 = k12[0], k12[1]
c = self.cos.narrow(0, seqlen_offset, seqlen).unsqueeze(1)
s = self.sin.narrow(0, seqlen_offset, seqlen).unsqueeze(1)
q_rot = torch.cat(
[
q1 * c - q2 * s,
q1 * s + q2 * c,
],
dim=-1,
)
k_rot = torch.cat(
[
k1 * c - k2 * s,
k1 * s + k2 * c,
],
dim=-1,
)
q = torch.cat([q_rot, q_pass], dim=-1)
k = torch.cat([k_rot, k_pass], dim=-1)
v = qkv[:, :, 2]
return q, k, v
# PhiCausalLMHead is the head of the PhiModel. It is a linear layer with a layer norm.
class PhiCausalLMHead(nn.Module):
def __init__(self, config, weights):
super().__init__()
self.ln = nn.LayerNorm.load(
prefix="lm_head.ln",
weights=weights,
eps=config.layer_norm_epsilon,
)
self.linear = SpeculativeHead.load(
config=config, prefix="lm_head.linear", weights=weights
)
def forward(self, hidden_states):
hidden_states = self.ln(hidden_states)
hidden_states = self.linear(hidden_states)
return hidden_states
# PhiMHA is a multi-head attention layer. This layer uses an attention mask to prevent tokens from attending to subsequent tokens.
class PhiMHA(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.Wqkv = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.Wqkv", weights=weights, bias=not config.no_bias
)
self.out_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.out_proj",
weights=weights,
bias=not config.no_bias,
)
self.op_size = config.n_embd
self.head_dim = int(config.n_embd / config.n_head)
self.num_heads = config.n_head
self.rotary_emb = RotaryEmbedding(
config.rotary_dim,
config.n_positions,
)
self.softmax_scale = 1.0 / math.sqrt(self.head_dim)
def forward(
self,
hidden_states,
past_kv_cache,
attention_mask=None,
):
b_size, seq_len, _n_embd = hidden_states.shape
qkv = self.Wqkv(hidden_states)
qkv = qkv.view(b_size, seq_len, 3, self.num_heads, self.head_dim)
seqlen_offset = 0 if past_kv_cache is None else past_kv_cache[0].shape[1]
q, k, v = self.rotary_emb.apply_rotary_emb_qkv(qkv, seqlen_offset)
# if there is a kv_cache, then we need to concatenate
if past_kv_cache is not None:
prev_k, prev_v = past_kv_cache
k = torch.cat([prev_k, k], dim=1)
v = torch.cat([prev_v, v], dim=1)
past_kv_cache = [k, v]
attn_weights = torch.einsum("bthd,bshd->bhts", q, k * self.softmax_scale)
if attention_mask is not None:
seqlen_k = k.shape[1]
seqlen_q = q.shape[1]
causal_mask = torch.triu(
torch.full((seqlen_q, seqlen_k), -10000.0, device=attn_weights.device),
1,
)
attn_weights = attn_weights + causal_mask.to(dtype=attn_weights.dtype)
attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1)
attn_output = attn_weights.matmul(v.transpose(1, 2)).squeeze(0)
attn_output = (
attn_output.view((b_size, self.num_heads, seq_len, self.head_dim))
.transpose(1, 2)
.flatten(-2)
)
return self.out_proj(attn_output), past_kv_cache
# PhiMLP is a multi-layer perceptron. It contains two linear layers with a gelu activation function.
class PhiMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.n_inner = config.n_inner
self.fc1 = FastLinear.load(
config=config,
prefix=f"{prefix}.fc1",
weights=weights,
bias=False,
)
self.fc2 = FastLinear.load(
config=config,
prefix=f"{prefix}.fc2",
weights=weights,
bias=False,
)
self.activation = torch.nn.functional.gelu
def forward(self, hidden_states):
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
# PhiBlock is a single transformer block. It contains a layer norm, a multi-head attention layer and an multi-layer perceptron.
class PhiBlock(nn.Module):
def __init__(self, layer_id, config, weights):
super().__init__()
self.layer_id = layer_id
self.layer_norm = nn.LayerNorm.load(
prefix=f"{layer_id}.ln", weights=weights, eps=config.layer_norm_epsilon
)
self.mixer = PhiMHA(prefix=f"{layer_id}.mixer", config=config, weights=weights)
self.mlp = PhiMLP(prefix=f"{layer_id}.mlp", config=config, weights=weights)
def forward(
self,
hidden_states,
kv_cache,
attention_mask,
):
residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
attn_outputs, past_kv_cache = self.mixer(
hidden_states, kv_cache, attention_mask
)
feed_forward_hidden_states = self.mlp(hidden_states)
out = attn_outputs + feed_forward_hidden_states + residual
return out, past_kv_cache
# PhiModel implements the embedding layer and the transformer blocks.
class PhiModel(nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.tp_rank = weights.process_group.rank()
self.tp_world_size = weights.process_group.size()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embd.wte", weights=weights
)
self.blocks = nn.ModuleList(
[
PhiBlock(f"{prefix}.h.{layer_id}", config, weights)
for layer_id in range(config.n_layer)
]
)
def forward(
self,
input_ids: torch.LongTensor,
past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
attention_mask: Optional[torch.ByteTensor] = None,
return_dict: Optional[bool] = None,
use_cache: Optional[bool] = None,
) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]:
hidden_states = self.embed_tokens(input_ids)
seq_len = hidden_states.shape[1]
mask = None if seq_len <= 1 else attention_mask
past_key_values = (
[None] * len(self.blocks) if past_key_values is None else past_key_values
)
for index, block in enumerate(self.blocks):
hidden_states, new_key_values = block(
hidden_states, past_key_values[index], mask
)
past_key_values[index] = new_key_values
return hidden_states, past_key_values
# PhiForCausalLM wraps the PhiModel and PhiCausalLMHead together and returns a CausalLMOutputWithPast object.
class PhiForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
if not prefix:
prefix = "transformer"
else:
prefix = f"{prefix}.transformer"
self.model = PhiModel(prefix, config, weights)
self.lm_head = PhiCausalLMHead(config, weights)
def forward(
self,
input_ids: torch.LongTensor,
past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
attention_mask: Optional[torch.ByteTensor] = None,
return_dict: Optional[bool] = None,
use_cache: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]:
model_output = self.model(
input_ids, past_key_values, attention_mask, return_dict, use_cache
)
logits = self.lm_head(model_output[0])
loss = None
if labels is not None:
loss = nn.CrossEntropyLoss()(
logits[:, :-1].view(-1, logits.size(-1)), labels[:, 1:].view(-1)
)
if not return_dict:
return (
((loss,) + (logits,) + model_output[1:])
if loss is not None
else (logits,) + model_output[1:]
)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=model_output[1],
hidden_states=None,
attentions=None,
)
| text-generation-inference/server/text_generation_server/models/custom_modeling/phi_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/phi_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 5696
} |
import torch
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import List, Optional
from transformers import PreTrainedTokenizerBase
from text_generation_server.pb import generate_pb2
from text_generation_server.pb.generate_pb2 import FinishReason
class Batch(ABC):
@abstractmethod
def to_pb(self) -> generate_pb2.CachedBatch:
raise NotImplementedError
@classmethod
@abstractmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "Batch":
raise NotImplementedError
@abstractmethod
def filter(self, request_ids: List[int]) -> "Batch":
raise NotImplementedError
@classmethod
@abstractmethod
def concatenate(cls, batches: List["Batch"]) -> "Batch":
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@dataclass
class GeneratedText:
text: str
generated_tokens: int
finish_reason: FinishReason
seed: Optional[int]
def to_pb(self) -> generate_pb2.GeneratedText:
return generate_pb2.GeneratedText(
text=self.text,
generated_tokens=self.generated_tokens,
finish_reason=self.finish_reason,
seed=self.seed,
)
@dataclass
class Tokens:
token_ids: List[int]
logprobs: List[float]
texts: List[str]
is_special: List[bool]
def to_pb(self) -> generate_pb2.Tokens:
return generate_pb2.Tokens(
ids=self.token_ids,
logprobs=self.logprobs,
texts=self.texts,
is_special=self.is_special,
)
def __len__(self):
return len(self.token_ids)
def __add__(self, other: "Tokens") -> "Tokens":
return Tokens(
self.token_ids + other.token_ids,
self.logprobs + other.logprobs,
self.texts + other.texts,
self.is_special + other.is_special,
)
@dataclass
class Generation:
request_id: int
prefill_tokens: Optional[Tokens]
tokens: Tokens
generated_text: Optional[GeneratedText]
# Optional for now, since it's not yet supported for every model.
top_tokens: Optional[List[Tokens]]
def to_pb(self) -> generate_pb2.Generation:
return generate_pb2.Generation(
request_id=self.request_id,
prefill_tokens=(
self.prefill_tokens.to_pb() if self.prefill_tokens is not None else None
),
tokens=self.tokens.to_pb(),
generated_text=(
self.generated_text.to_pb() if self.generated_text is not None else None
),
top_tokens=(
[top_tokens.to_pb() for top_tokens in self.top_tokens]
if self.top_tokens is not None
else None
),
)
| text-generation-inference/server/text_generation_server/models/types.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/types.py",
"repo_id": "text-generation-inference",
"token_count": 1353
} |
import os
from typing import Union
from loguru import logger
import torch
from transformers import AutoTokenizer
from peft import AutoPeftModelForCausalLM, AutoPeftModelForSeq2SeqLM
def download_and_unload_peft(model_id, revision, trust_remote_code):
torch_dtype = torch.float16
logger.info("Trying to load a Peft model. It might take a while without feedback")
try:
model = AutoPeftModelForCausalLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=torch_dtype,
trust_remote_code=trust_remote_code,
low_cpu_mem_usage=True,
)
except Exception:
model = AutoPeftModelForSeq2SeqLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=torch_dtype,
trust_remote_code=trust_remote_code,
low_cpu_mem_usage=True,
)
logger.info("Peft model detected.")
logger.info("Merging the lora weights.")
base_model_id = model.peft_config["default"].base_model_name_or_path
model = model.merge_and_unload()
os.makedirs(model_id, exist_ok=True)
cache_dir = model_id
logger.info(f"Saving the newly created merged model to {cache_dir}")
tokenizer = AutoTokenizer.from_pretrained(
base_model_id, trust_remote_code=trust_remote_code
)
model.save_pretrained(cache_dir, safe_serialization=True)
model.config.save_pretrained(cache_dir)
tokenizer.save_pretrained(cache_dir)
def download_peft(
model_id: Union[str, os.PathLike], revision: str, trust_remote_code: bool
):
torch_dtype = torch.float16
try:
_model = AutoPeftModelForCausalLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=torch_dtype,
trust_remote_code=trust_remote_code,
low_cpu_mem_usage=True,
)
except Exception:
_model = AutoPeftModelForSeq2SeqLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=torch_dtype,
trust_remote_code=trust_remote_code,
low_cpu_mem_usage=True,
)
logger.info("Peft model downloaded.")
| text-generation-inference/server/text_generation_server/utils/peft.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/peft.py",
"repo_id": "text-generation-inference",
"token_count": 981
} |
/* tslint:disable */
/* eslint-disable */
/* auto-generated by NAPI-RS */
export function bpeDecoder(suffix?: string | undefined | null): Decoder
export function byteFallbackDecoder(): Decoder
export function ctcDecoder(
padToken?: string = '<pad>',
wordDelimiterToken?: string | undefined | null,
cleanup?: boolean | undefined | null,
): Decoder
export function fuseDecoder(): Decoder
export function metaspaceDecoder(
replacement?: string = '▁',
prependScheme?: prepend_scheme = 'always',
split?: split = true,
): Decoder
export function replaceDecoder(pattern: string, content: string): Decoder
export function sequenceDecoder(decoders: Array<Decoder>): Decoder
export function stripDecoder(content: string, left: number, right: number): Decoder
export function wordPieceDecoder(prefix?: string = '##', cleanup?: bool = true): Decoder
export const enum TruncationDirection {
Left = 'Left',
Right = 'Right',
}
export const enum TruncationStrategy {
LongestFirst = 'LongestFirst',
OnlyFirst = 'OnlyFirst',
OnlySecond = 'OnlySecond',
}
export interface BpeOptions {
cacheCapacity?: number
dropout?: number
unkToken?: string
continuingSubwordPrefix?: string
endOfWordSuffix?: string
fuseUnk?: boolean
byteFallback?: boolean
}
export interface WordPieceOptions {
unkToken?: string
continuingSubwordPrefix?: string
maxInputCharsPerWord?: number
}
export interface WordLevelOptions {
unkToken?: string
}
export interface UnigramOptions {
unkId?: number
byteFallback?: boolean
}
export function prependNormalizer(prepend: string): Normalizer
export function stripAccentsNormalizer(): Normalizer
export interface BertNormalizerOptions {
cleanText?: boolean
handleChineseChars?: boolean
stripAccents?: boolean
lowercase?: boolean
}
/**
* bert_normalizer(options?: {
* cleanText?: bool = true,
* handleChineseChars?: bool = true,
* stripAccents?: bool = true,
* lowercase?: bool = true
* })
*/
export function bertNormalizer(options?: BertNormalizerOptions | undefined | null): Normalizer
export function nfdNormalizer(): Normalizer
export function nfkdNormalizer(): Normalizer
export function nfcNormalizer(): Normalizer
export function nfkcNormalizer(): Normalizer
export function stripNormalizer(left?: boolean | undefined | null, right?: boolean | undefined | null): Normalizer
export function sequenceNormalizer(normalizers: Array<Normalizer>): Normalizer
export function lowercase(): Normalizer
export function replace(pattern: string, content: string): Normalizer
export function nmt(): Normalizer
export function precompiled(bytes: Array<number>): Normalizer
export const enum JsSplitDelimiterBehavior {
Removed = 'Removed',
Isolated = 'Isolated',
MergedWithPrevious = 'MergedWithPrevious',
MergedWithNext = 'MergedWithNext',
Contiguous = 'Contiguous',
}
/** byte_level(addPrefixSpace: bool = true, useRegex: bool = true) */
export function byteLevelPreTokenizer(
addPrefixSpace?: boolean | undefined | null,
useRegex?: boolean | undefined | null,
): PreTokenizer
export function byteLevelAlphabet(): Array<string>
export function whitespacePreTokenizer(): PreTokenizer
export function whitespaceSplitPreTokenizer(): PreTokenizer
export function bertPreTokenizer(): PreTokenizer
export function metaspacePreTokenizer(
replacement?: string = '▁',
prependScheme?: prepend_scheme = 'always',
split?: split = true,
): PreTokenizer
export function splitPreTokenizer(pattern: string, behavior: string, invert?: boolean | undefined | null): PreTokenizer
export function punctuationPreTokenizer(behavior?: string | undefined | null): PreTokenizer
export function sequencePreTokenizer(preTokenizers: Array<PreTokenizer>): PreTokenizer
export function charDelimiterSplit(delimiter: string): PreTokenizer
export function digitsPreTokenizer(individualDigits?: boolean | undefined | null): PreTokenizer
export function bertProcessing(sep: [string, number], cls: [string, number]): Processor
export function robertaProcessing(
sep: [string, number],
cls: [string, number],
trimOffsets?: boolean | undefined | null,
addPrefixSpace?: boolean | undefined | null,
): Processor
export function byteLevelProcessing(trimOffsets?: boolean | undefined | null): Processor
export function templateProcessing(
single: string,
pair?: string | undefined | null,
specialTokens?: Array<[string, number]> | undefined | null,
): Processor
export function sequenceProcessing(processors: Array<Processor>): Processor
export const enum PaddingDirection {
Left = 0,
Right = 1,
}
export interface PaddingOptions {
maxLength?: number
direction?: string | PaddingDirection
padToMultipleOf?: number
padId?: number
padTypeId?: number
padToken?: string
}
export interface EncodeOptions {
isPretokenized?: boolean
addSpecialTokens?: boolean
}
export interface TruncationOptions {
maxLength?: number
strategy?: TruncationStrategy
direction?: string | TruncationDirection
stride?: number
}
export interface AddedTokenOptions {
singleWord?: boolean
leftStrip?: boolean
rightStrip?: boolean
normalized?: boolean
}
export interface JsFromPretrainedParameters {
revision?: string
authToken?: string
}
export function slice(s: string, beginIndex?: number | undefined | null, endIndex?: number | undefined | null): string
export function mergeEncodings(encodings: Array<Encoding>, growingOffsets?: boolean | undefined | null): Encoding
/** Decoder */
export class Decoder {
decode(tokens: Array<string>): string
}
export type JsEncoding = Encoding
export class Encoding {
constructor()
getLength(): number
getNSequences(): number
getIds(): Array<number>
getTypeIds(): Array<number>
getAttentionMask(): Array<number>
getSpecialTokensMask(): Array<number>
getTokens(): Array<string>
getOffsets(): Array<Array<number>>
getWordIds(): Array<number | undefined | null>
charToToken(pos: number, seqId?: number | undefined | null): number | null
charToWord(pos: number, seqId?: number | undefined | null): number | null
pad(length: number, options?: PaddingOptions | undefined | null): void
truncate(
length: number,
stride?: number | undefined | null,
direction?: string | TruncationDirection | undefined | null,
): void
wordToTokens(word: number, seqId?: number | undefined | null): [number, number] | null | undefined
wordToChars(word: number, seqId?: number | undefined | null): [number, number] | null | undefined
tokenToChars(token: number): [number, [number, number]] | null | undefined
tokenToWord(token: number): number | null
getOverflowing(): Array<Encoding>
getSequenceIds(): Array<number | undefined | null>
tokenToSequence(token: number): number | null
}
export class Model {}
export type Bpe = BPE
export class BPE {
static empty(): Model
static init(vocab: Vocab, merges: Merges, options?: BpeOptions | undefined | null): Model
static fromFile(vocab: string, merges: string, options?: BpeOptions | undefined | null): Promise<Model>
}
export class WordPiece {
static init(vocab: Vocab, options?: WordPieceOptions | undefined | null): Model
static empty(): WordPiece
static fromFile(vocab: string, options?: WordPieceOptions | undefined | null): Promise<Model>
}
export class WordLevel {
static init(vocab: Vocab, options?: WordLevelOptions | undefined | null): Model
static empty(): WordLevel
static fromFile(vocab: string, options?: WordLevelOptions | undefined | null): Promise<Model>
}
export class Unigram {
static init(vocab: Array<[string, number]>, options?: UnigramOptions | undefined | null): Model
static empty(): Model
}
/** Normalizer */
export class Normalizer {
normalizeString(sequence: string): string
}
/** PreTokenizers */
export class PreTokenizer {
preTokenizeString(sequence: string): [string, [number, number]][]
}
export class Processor {}
export class AddedToken {
constructor(token: string, isSpecial: boolean, options?: AddedTokenOptions | undefined | null)
getContent(): string
}
export class Tokenizer {
constructor(model: Model)
setPreTokenizer(preTokenizer: PreTokenizer): void
setDecoder(decoder: Decoder): void
setModel(model: Model): void
setPostProcessor(postProcessor: Processor): void
setNormalizer(normalizer: Normalizer): void
save(path: string, pretty?: boolean | undefined | null): void
addAddedTokens(tokens: Array<AddedToken>): number
addTokens(tokens: Array<string>): number
encode(
sentence: InputSequence,
pair?: InputSequence | null,
encodeOptions?: EncodeOptions | undefined | null,
): Promise<JsEncoding>
encodeBatch(sentences: EncodeInput[], encodeOptions?: EncodeOptions | undefined | null): Promise<JsEncoding[]>
decode(ids: Array<number>, skipSpecialTokens: boolean): Promise<string>
decodeBatch(ids: Array<Array<number>>, skipSpecialTokens: boolean): Promise<string[]>
static fromString(s: string): Tokenizer
static fromFile(file: string): Tokenizer
addSpecialTokens(tokens: Array<string>): void
setTruncation(maxLength: number, options?: TruncationOptions | undefined | null): void
disableTruncation(): void
setPadding(options?: PaddingOptions | undefined | null): void
disablePadding(): void
getDecoder(): Decoder | null
getNormalizer(): Normalizer | null
getPreTokenizer(): PreTokenizer | null
getPostProcessor(): Processor | null
getVocab(withAddedTokens?: boolean | undefined | null): Record<string, number>
getVocabSize(withAddedTokens?: boolean | undefined | null): number
idToToken(id: number): string | null
tokenToId(token: string): number | null
train(files: Array<string>): void
runningTasks(): number
postProcess(
encoding: Encoding,
pair?: Encoding | undefined | null,
addSpecialTokens?: boolean | undefined | null,
): Encoding
}
export class Trainer {}
| tokenizers/bindings/node/index.d.ts/0 | {
"file_path": "tokenizers/bindings/node/index.d.ts",
"repo_id": "tokenizers",
"token_count": 2753
} |
use crate::arc_rwlock_serde;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use serde::{Deserialize, Serialize};
use std::sync::{Arc, RwLock};
use tk::pre_tokenizers::PreTokenizerWrapper;
use tk::PreTokenizedString;
use tk::SplitDelimiterBehavior;
use tokenizers as tk;
#[napi(string_enum)]
pub enum JsSplitDelimiterBehavior {
Removed,
Isolated,
MergedWithPrevious,
MergedWithNext,
Contiguous,
}
impl TryFrom<String> for JsSplitDelimiterBehavior {
type Error = Error;
fn try_from(value: String) -> Result<Self> {
match &value[..] {
"removed" => Ok(JsSplitDelimiterBehavior::Removed),
"isolated" => Ok(JsSplitDelimiterBehavior::Isolated),
"mergedWithPrevious" => Ok(JsSplitDelimiterBehavior::MergedWithPrevious),
"mergedWithNext" => Ok(JsSplitDelimiterBehavior::MergedWithNext),
"contiguous" => Ok(JsSplitDelimiterBehavior::Contiguous),
_ => Err(Error::from_reason(
"Wrong value for SplitDelimiterBehavior, expected one of: \
`removed, isolated, mergedWithPrevious, mergedWithNext, contiguous`"
.to_string(),
)),
}
}
}
impl From<JsSplitDelimiterBehavior> for SplitDelimiterBehavior {
fn from(value: JsSplitDelimiterBehavior) -> Self {
match value {
JsSplitDelimiterBehavior::Removed => SplitDelimiterBehavior::Removed,
JsSplitDelimiterBehavior::Isolated => SplitDelimiterBehavior::Isolated,
JsSplitDelimiterBehavior::MergedWithPrevious => SplitDelimiterBehavior::MergedWithPrevious,
JsSplitDelimiterBehavior::MergedWithNext => SplitDelimiterBehavior::MergedWithNext,
JsSplitDelimiterBehavior::Contiguous => SplitDelimiterBehavior::Contiguous,
}
}
}
/// PreTokenizers
#[derive(Clone, Debug, Serialize, Deserialize)]
#[napi]
pub struct PreTokenizer {
#[serde(flatten, with = "arc_rwlock_serde")]
pretok: Option<Arc<RwLock<PreTokenizerWrapper>>>,
}
impl tk::PreTokenizer for PreTokenizer {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> tk::Result<()> {
self
.pretok
.as_ref()
.ok_or("Uninitialized PreTokenizer")?
.read()
.unwrap()
.pre_tokenize(pretokenized)?;
Ok(())
}
}
#[napi]
impl PreTokenizer {
#[napi(ts_return_type = "[string, [number, number]][]")]
pub fn pre_tokenize_string(&self, sequence: String, env: Env) -> Result<Vec<Array>> {
use tk::PreTokenizer;
let mut pretokenized = PreTokenizedString::from(sequence);
self
.pre_tokenize(&mut pretokenized)
.map_err(|e| Error::from_reason(format!("{}", e)))?;
pretokenized
.get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char)
.into_iter()
.map(|(s, (start, end), _)| -> Result<Array> {
let mut arr = env.create_array(2)?;
let mut offset = env.create_array(2)?;
offset.set(0, env.create_uint32(start as u32)?)?;
offset.set(1, env.create_uint32(end as u32)?)?;
arr.set(0, env.create_string(s)?)?;
arr.set(1, offset)?;
Ok(arr)
})
.collect::<Result<Vec<_>>>()
}
}
/// byte_level(addPrefixSpace: bool = true, useRegex: bool = true)
#[napi]
pub fn byte_level_pre_tokenizer(
add_prefix_space: Option<bool>,
use_regex: Option<bool>,
) -> PreTokenizer {
let mut byte_level = tk::pre_tokenizers::byte_level::ByteLevel::default();
if let Some(add_prefix_space) = add_prefix_space {
byte_level = byte_level.add_prefix_space(add_prefix_space);
}
if let Some(use_regex) = use_regex {
byte_level = byte_level.use_regex(use_regex);
}
PreTokenizer {
pretok: Some(Arc::new(RwLock::new(byte_level.into()))),
}
}
#[napi]
pub fn byte_level_alphabet() -> Vec<String> {
tk::pre_tokenizers::byte_level::ByteLevel::alphabet()
.into_iter()
.map(|c| c.to_string())
.collect::<Vec<_>>()
}
#[napi]
pub fn whitespace_pre_tokenizer() -> PreTokenizer {
PreTokenizer {
pretok: Some(Arc::new(RwLock::new(
tk::pre_tokenizers::whitespace::Whitespace.into(),
))),
}
}
#[napi]
pub fn whitespace_split_pre_tokenizer() -> PreTokenizer {
PreTokenizer {
pretok: Some(Arc::new(RwLock::new(
tk::pre_tokenizers::whitespace::WhitespaceSplit.into(),
))),
}
}
#[napi]
pub fn bert_pre_tokenizer() -> PreTokenizer {
PreTokenizer {
pretok: Some(Arc::new(RwLock::new(
tk::pre_tokenizers::bert::BertPreTokenizer.into(),
))),
}
}
#[napi]
pub fn metaspace_pre_tokenizer(
#[napi(ts_arg_type = "string = '▁'")] replacement: Option<String>,
#[napi(ts_arg_type = "prepend_scheme = 'always'")] prepend_scheme: Option<String>,
#[napi(ts_arg_type = "split = true")] split: Option<bool>,
) -> Result<PreTokenizer> {
use tk::pre_tokenizers::metaspace::PrependScheme;
let split = split.unwrap_or(true);
let replacement = replacement.unwrap_or("▁".to_string());
if replacement.chars().count() != 1 {
return Err(Error::from_reason(
"replacement is supposed to be a single char",
));
}
let replacement = replacement.chars().next().unwrap();
let prepend_scheme: PrependScheme =
match prepend_scheme.unwrap_or(String::from("always")).as_str() {
"always" => PrependScheme::Always,
"first" => PrependScheme::First,
"never" => PrependScheme::Never,
_ => {
return Err(Error::from_reason(
"prepend_scheme is supposed to be either 'always', 'first' or 'never'",
));
}
};
Ok(PreTokenizer {
pretok: Some(Arc::new(RwLock::new(
tk::pre_tokenizers::metaspace::Metaspace::new(replacement, prepend_scheme, split).into(),
))),
})
}
#[napi]
pub fn split_pre_tokenizer(
pattern: String,
behavior: String,
invert: Option<bool>,
) -> Result<PreTokenizer> {
let behavior: JsSplitDelimiterBehavior = behavior.try_into()?;
let invert = invert.unwrap_or(false);
Ok(PreTokenizer {
pretok: Some(Arc::new(RwLock::new(
tk::pre_tokenizers::split::Split::new(pattern, behavior.into(), invert)
.map_err(|e| Error::from_reason(e.to_string()))?
.into(),
))),
})
}
#[napi]
pub fn punctuation_pre_tokenizer(behavior: Option<String>) -> Result<PreTokenizer> {
let behavior = match behavior {
Some(behavior) => behavior.try_into()?,
None => JsSplitDelimiterBehavior::Isolated,
};
Ok(PreTokenizer {
pretok: Some(Arc::new(RwLock::new(
tk::pre_tokenizers::punctuation::Punctuation::new(behavior.into()).into(),
))),
})
}
#[napi]
pub fn sequence_pre_tokenizer(pre_tokenizers: Vec<&PreTokenizer>) -> PreTokenizer {
let mut sequence: Vec<PreTokenizerWrapper> = Vec::with_capacity(pre_tokenizers.len());
pre_tokenizers.into_iter().for_each(|pre_tokenizer| {
if let Some(pre_tokenizer) = &pre_tokenizer.pretok {
sequence.push((**pre_tokenizer).read().unwrap().clone())
}
});
PreTokenizer {
pretok: Some(Arc::new(RwLock::new(PreTokenizerWrapper::Sequence(
tk::pre_tokenizers::sequence::Sequence::new(sequence),
)))),
}
}
#[napi]
pub fn char_delimiter_split(delimiter: String) -> Result<PreTokenizer> {
if delimiter.chars().count() != 1 {
return Err(Error::from_reason(
"delimiter is supposed to be a single char",
));
}
let delimiter = delimiter.chars().next().unwrap();
Ok(PreTokenizer {
pretok: Some(Arc::new(RwLock::new(
tk::pre_tokenizers::delimiter::CharDelimiterSplit::new(delimiter).into(),
))),
})
}
#[napi]
pub fn digits_pre_tokenizer(individual_digits: Option<bool>) -> PreTokenizer {
let individual_digits = individual_digits.unwrap_or(false);
PreTokenizer {
pretok: Some(Arc::new(RwLock::new(
tk::pre_tokenizers::digits::Digits::new(individual_digits).into(),
))),
}
}
| tokenizers/bindings/node/src/pre_tokenizers.rs/0 | {
"file_path": "tokenizers/bindings/node/src/pre_tokenizers.rs",
"repo_id": "tokenizers",
"token_count": 3152
} |
.PHONY: style check-style test
DATA_DIR = data
dir_guard=@mkdir -p $(@D)
check_dirs := examples py_src/tokenizers tests
# Format source code automatically
style:
python stub.py
ruff check $(check_dirs) --fix
ruff format $(check_dirs)
# Check the source code is formatted correctly
check-style:
python stub.py --check
ruff check $(check_dirs)
ruff format --check $(check_dirs)
TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json
# Launch the test suite
test: $(TESTS_RESOURCES)
pip install pytest requests setuptools_rust numpy pyarrow datasets
python -m pytest -s -v tests
cargo test --no-default-features
$(DATA_DIR)/big.txt :
$(dir_guard)
wget https://norvig.com/big.txt -O $@
$(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt
head -100 $(DATA_DIR)/big.txt > $@
$(DATA_DIR)/roberta.json :
$(dir_guard)
wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@
| tokenizers/bindings/python/Makefile/0 | {
"file_path": "tokenizers/bindings/python/Makefile",
"repo_id": "tokenizers",
"token_count": 349
} |
from typing import Dict, Iterator, List, Optional, Union
from tokenizers import AddedToken, Tokenizer, decoders, trainers
from tokenizers.models import WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.processors import BertProcessing
from .base_tokenizer import BaseTokenizer
class BertWordPieceTokenizer(BaseTokenizer):
"""Bert WordPiece Tokenizer"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
unk_token: Union[str, AddedToken] = "[UNK]",
sep_token: Union[str, AddedToken] = "[SEP]",
cls_token: Union[str, AddedToken] = "[CLS]",
pad_token: Union[str, AddedToken] = "[PAD]",
mask_token: Union[str, AddedToken] = "[MASK]",
clean_text: bool = True,
handle_chinese_chars: bool = True,
strip_accents: Optional[bool] = None,
lowercase: bool = True,
wordpieces_prefix: str = "##",
):
if vocab is not None:
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
else:
tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
# Let the tokenizer know about special tokens if they are part of the vocab
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
if tokenizer.token_to_id(str(sep_token)) is not None:
tokenizer.add_special_tokens([str(sep_token)])
if tokenizer.token_to_id(str(cls_token)) is not None:
tokenizer.add_special_tokens([str(cls_token)])
if tokenizer.token_to_id(str(pad_token)) is not None:
tokenizer.add_special_tokens([str(pad_token)])
if tokenizer.token_to_id(str(mask_token)) is not None:
tokenizer.add_special_tokens([str(mask_token)])
tokenizer.normalizer = BertNormalizer(
clean_text=clean_text,
handle_chinese_chars=handle_chinese_chars,
strip_accents=strip_accents,
lowercase=lowercase,
)
tokenizer.pre_tokenizer = BertPreTokenizer()
if vocab is not None:
sep_token_id = tokenizer.token_to_id(str(sep_token))
if sep_token_id is None:
raise TypeError("sep_token not found in the vocabulary")
cls_token_id = tokenizer.token_to_id(str(cls_token))
if cls_token_id is None:
raise TypeError("cls_token not found in the vocabulary")
tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id))
tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
parameters = {
"model": "BertWordPiece",
"unk_token": unk_token,
"sep_token": sep_token,
"cls_token": cls_token,
"pad_token": pad_token,
"mask_token": mask_token,
"clean_text": clean_text,
"handle_chinese_chars": handle_chinese_chars,
"strip_accents": strip_accents,
"lowercase": lowercase,
"wordpieces_prefix": wordpieces_prefix,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab: str, **kwargs):
vocab = WordPiece.read_file(vocab)
return BertWordPieceTokenizer(vocab, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
):
"""Train the model using the given files"""
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| tokenizers/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py",
"repo_id": "tokenizers",
"token_count": 2637
} |
use pyo3::prelude::*;
use tk::Token;
#[pyclass(module = "tokenizers", name = "Token")]
#[derive(Clone)]
pub struct PyToken {
token: Token,
}
impl From<Token> for PyToken {
fn from(token: Token) -> Self {
Self { token }
}
}
impl From<PyToken> for Token {
fn from(token: PyToken) -> Self {
token.token
}
}
#[pymethods]
impl PyToken {
#[new]
#[pyo3(text_signature = None)]
fn new(id: u32, value: String, offsets: (usize, usize)) -> PyToken {
Token::new(id, value, offsets).into()
}
#[getter]
fn get_id(&self) -> u32 {
self.token.id
}
#[getter]
fn get_value(&self) -> &str {
&self.token.value
}
#[getter]
fn get_offsets(&self) -> (usize, usize) {
self.token.offsets
}
fn as_tuple(&self) -> (u32, &str, (usize, usize)) {
(self.token.id, &self.token.value, self.token.offsets)
}
}
| tokenizers/bindings/python/src/token.rs/0 | {
"file_path": "tokenizers/bindings/python/src/token.rs",
"repo_id": "tokenizers",
"token_count": 439
} |
import pickle
import pytest
from tokenizers import NormalizedString
from tokenizers.normalizers import (
BertNormalizer,
Lowercase,
Normalizer,
Precompiled,
Sequence,
Strip,
Prepend,
Replace,
)
class TestBertNormalizer:
def test_instantiate(self):
assert isinstance(BertNormalizer(), Normalizer)
assert isinstance(BertNormalizer(), BertNormalizer)
assert isinstance(pickle.loads(pickle.dumps(BertNormalizer())), BertNormalizer)
def test_strip_accents(self):
normalizer = BertNormalizer(strip_accents=True, lowercase=False, handle_chinese_chars=False, clean_text=False)
output = normalizer.normalize_str("Héllò")
assert output == "Hello"
def test_handle_chinese_chars(self):
normalizer = BertNormalizer(strip_accents=False, lowercase=False, handle_chinese_chars=True, clean_text=False)
output = normalizer.normalize_str("你好")
assert output == " 你 好 "
def test_clean_text(self):
normalizer = BertNormalizer(strip_accents=False, lowercase=False, handle_chinese_chars=False, clean_text=True)
output = normalizer.normalize_str("\ufeffHello")
assert output == "Hello"
def test_lowercase(self):
normalizer = BertNormalizer(strip_accents=False, lowercase=True, handle_chinese_chars=False, clean_text=False)
output = normalizer.normalize_str("Héllò")
assert output == "héllò"
def test_can_modify(self):
normalizer = BertNormalizer(clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True)
assert normalizer.clean_text == True
assert normalizer.handle_chinese_chars == True
assert normalizer.strip_accents == True
assert normalizer.lowercase == True
# Modify these
normalizer.clean_text = False
assert normalizer.clean_text == False
normalizer.handle_chinese_chars = False
assert normalizer.handle_chinese_chars == False
normalizer.strip_accents = None
assert normalizer.strip_accents == None
normalizer.lowercase = False
assert normalizer.lowercase == False
class TestSequence:
def test_instantiate(self):
assert isinstance(Sequence([]), Normalizer)
assert isinstance(Sequence([]), Sequence)
assert isinstance(pickle.loads(pickle.dumps(Sequence([]))), Sequence)
def test_can_make_sequences(self):
normalizer = Sequence([Lowercase(), Strip()])
output = normalizer.normalize_str(" HELLO ")
assert output == "hello"
def test_set_item(self):
normalizers = Sequence(
[
BertNormalizer(True, True),
Prepend(prepend="test"),
]
)
assert normalizers[0].__class__ == BertNormalizer
assert normalizers[1].__class__ == Prepend
normalizers[1] = Strip()
assert normalizers[1].__class__ == Strip
with pytest.raises(IndexError):
print(normalizers[2])
def test_item_getters_and_setters(self):
normalizers = Sequence(
[
BertNormalizer(clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True),
Strip(left=True, right=True),
Prepend(prepend="_"),
Replace(pattern="something", content="else"),
]
)
assert normalizers[0].__class__ == BertNormalizer
normalizers[0].clean_text = False
normalizers[0].handle_chinese_chars = False
normalizers[0].strip_accents = False
normalizers[0].lowercase = False
assert not normalizers[0].clean_text
assert not normalizers[0].handle_chinese_chars
assert not normalizers[0].strip_accents
assert not normalizers[0].lowercase
assert normalizers[1].__class__ == Strip
normalizers[1].left = False
normalizers[1].right = False
assert not normalizers[1].left
assert not normalizers[1].right
assert normalizers[2].__class__ == Prepend
normalizers[2].prepend = " "
assert normalizers[2].prepend == " "
assert normalizers[3].__class__ == Replace
with pytest.raises(Exception):
normalizers[3].pattern = "test"
with pytest.raises(Exception):
print(normalizers[3].pattern)
normalizers[3].content = "test"
assert normalizers[3].content == "test"
class TestLowercase:
def test_instantiate(self):
assert isinstance(Lowercase(), Normalizer)
assert isinstance(Lowercase(), Lowercase)
assert isinstance(pickle.loads(pickle.dumps(Lowercase())), Lowercase)
def test_lowercase(self):
normalizer = Lowercase()
output = normalizer.normalize_str("HELLO")
assert output == "hello"
class TestStrip:
def test_instantiate(self):
assert isinstance(Strip(), Normalizer)
assert isinstance(Strip(), Strip)
assert isinstance(pickle.loads(pickle.dumps(Strip())), Strip)
def test_left_strip(self):
normalizer = Strip(left=True, right=False)
output = normalizer.normalize_str(" hello ")
assert output == "hello "
def test_right_strip(self):
normalizer = Strip(left=False, right=True)
output = normalizer.normalize_str(" hello ")
assert output == " hello"
def test_full_strip(self):
normalizer = Strip(left=True, right=True)
output = normalizer.normalize_str(" hello ")
assert output == "hello"
def test_can_modify(self):
normalizer = Strip(left=True, right=True)
assert normalizer.left == True
assert normalizer.right == True
# Modify these
normalizer.left = False
assert normalizer.left == False
normalizer.right = False
assert normalizer.right == False
class TestPrepend:
def test_instantiate(self):
assert isinstance(Prepend("▁"), Normalizer)
assert isinstance(Prepend("▁"), Prepend)
assert isinstance(pickle.loads(pickle.dumps(Prepend("▁"))), Prepend)
def test_prepend(self):
normalizer = Prepend(prepend="▁")
output = normalizer.normalize_str("hello")
assert output == "▁hello"
def test_can_modify(self):
normalizer = Prepend("▁")
assert normalizer.prepend == "▁"
# Modify these
normalizer.prepend = "-"
assert normalizer.prepend == "-"
class TestCustomNormalizer:
class BadCustomNormalizer:
def normalize(self, normalized, wrong):
pass
class GoodCustomNormalizer:
def normalize(self, normalized):
self.kept_normalized = normalized
normalized.replace("there", "you")
def use_after_normalize(self):
self.kept_normalized.replace("something", "else")
def test_instantiate(self):
bad = Normalizer.custom(TestCustomNormalizer.BadCustomNormalizer())
good_custom = TestCustomNormalizer.GoodCustomNormalizer()
good = Normalizer.custom(good_custom)
assert isinstance(bad, Normalizer)
assert isinstance(good, Normalizer)
with pytest.raises(Exception, match="TypeError:.*normalize()"):
bad.normalize_str("Hey there!")
assert good.normalize_str("Hey there!") == "Hey you!"
with pytest.raises(Exception, match="Cannot use a NormalizedStringRefMut outside `normalize`"):
good_custom.use_after_normalize()
def test_normalizer_interface(self):
normalizer = Normalizer.custom(TestCustomNormalizer.GoodCustomNormalizer())
normalized = NormalizedString("Hey there!")
normalizer.normalize(normalized)
assert repr(normalized) == 'NormalizedString(original="Hey there!", normalized="Hey you!")'
assert str(normalized) == "Hey you!"
| tokenizers/bindings/python/tests/bindings/test_normalizers.py/0 | {
"file_path": "tokenizers/bindings/python/tests/bindings/test_normalizers.py",
"repo_id": "tokenizers",
"token_count": 3243
} |
import multiprocessing as mp
import os
import pytest
import requests
DATA_PATH = os.path.join("tests", "data")
def download(url, with_filename=None):
filename = with_filename if with_filename is not None else url.rsplit("/")[-1]
filepath = os.path.join(DATA_PATH, filename)
if not os.path.exists(filepath):
with open(filepath, "wb") as f:
response = requests.get(url, stream=True)
response.raise_for_status()
for chunk in response.iter_content(1024):
f.write(chunk)
return filepath
@pytest.fixture(scope="session")
def data_dir():
assert os.getcwd().endswith("python")
exist = os.path.exists(DATA_PATH) and os.path.isdir(DATA_PATH)
if not exist:
os.mkdir(DATA_PATH)
@pytest.fixture(scope="session")
def roberta_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json"),
"merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt"),
}
@pytest.fixture(scope="session")
def bert_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt"),
}
@pytest.fixture(scope="session")
def openai_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json"),
"merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt"),
}
@pytest.fixture(scope="session")
def train_files(data_dir):
big = download("https://norvig.com/big.txt")
small = os.path.join(DATA_PATH, "small.txt")
with open(small, "w") as f:
with open(big, "r") as g:
for i, line in enumerate(g):
f.write(line)
if i > 100:
break
return {
"small": small,
"big": big,
}
@pytest.fixture(scope="session")
def albert_base(data_dir):
return download("https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tokenizer.json")
@pytest.fixture(scope="session")
def doc_wiki_tokenizer(data_dir):
return download(
"https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json",
"tokenizer-wiki.json",
)
@pytest.fixture(scope="session")
def doc_pipeline_bert_tokenizer(data_dir):
return download(
"https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json",
"bert-wiki.json",
)
# On MacOS Python 3.8+ the default was modified to `spawn`, we need `fork` in tests.
mp.set_start_method("fork")
def multiprocessing_with_parallelism(tokenizer, enabled: bool):
"""
This helper can be used to test that disabling parallelism avoids dead locks when the
same tokenizer is used after forking.
"""
# It's essential to this test that we call 'encode' or 'encode_batch'
# before the fork. This causes the main process to "lock" some resources
# provided by the Rust "rayon" crate that are needed for parallel processing.
tokenizer.encode("Hi")
tokenizer.encode_batch(["hi", "there"])
def encode(tokenizer):
tokenizer.encode("Hi")
tokenizer.encode_batch(["hi", "there"])
# Make sure this environment variable is set before the fork happens
os.environ["TOKENIZERS_PARALLELISM"] = str(enabled)
p = mp.Process(target=encode, args=(tokenizer,))
p.start()
p.join(timeout=1)
# At this point the process should have successfully exited, depending on whether parallelism
# was activated or not. So we check the status and kill it if needed
alive = p.is_alive()
if alive:
p.terminate()
assert (alive and mp.get_start_method() == "fork") == enabled
| tokenizers/bindings/python/tests/utils.py/0 | {
"file_path": "tokenizers/bindings/python/tests/utils.py",
"repo_id": "tokenizers",
"token_count": 1569
} |
[package]
authors = ["Anthony MOI <[email protected]>", "Nicolas Patry <[email protected]>"]
edition = "2018"
name = "tokenizers"
version = "0.21.0-dev.0"
homepage = "https://github.com/huggingface/tokenizers"
repository = "https://github.com/huggingface/tokenizers"
documentation = "https://docs.rs/tokenizers/"
license = "Apache-2.0"
keywords = ["tokenizer", "NLP", "huggingface", "BPE", "WordPiece"]
readme = "./README.md"
description = """
Provides an implementation of today's most used tokenizers,
with a focus on performances and versatility.
"""
exclude = [ "rust-toolchain", "target/*", "Cargo.lock", "benches/*.txt", "benches/*.json", "data/*" ]
[lib]
name = "tokenizers"
path = "src/lib.rs"
bench = false
[[bench]]
name = "bpe_benchmark"
harness = false
[[bench]]
name = "bert_benchmark"
harness = false
[[bench]]
name = "layout_benchmark"
harness = false
[[bench]]
name = "unigram_benchmark"
harness = false
[[bench]]
name = "llama3"
required-features = ["http"]
harness = false
[dependencies]
lazy_static = "1.4"
rand = "0.8"
onig = { version = "6.4", default-features = false, optional = true }
regex = "1.10"
regex-syntax = "0.8"
rayon = "1.10"
rayon-cond = "0.3"
serde = { version = "1.0", features = [ "derive" ] }
serde_json = "1.0"
unicode-normalization-alignments = "0.1"
unicode_categories = "0.1"
unicode-segmentation = "1.11"
indicatif = {version = "0.17", optional = true}
itertools = "0.13"
log = "0.4"
derive_builder = "0.20"
spm_precompiled = "0.1.3"
hf-hub = { version = "0.3.2", optional = true }
aho-corasick = "1.1"
paste = "1.0.14"
macro_rules_attribute = "0.2.0"
thiserror = "2"
fancy-regex = { version = "0.14", optional = true}
getrandom = { version = "0.2.10" }
esaxx-rs = { version = "0.1.10", default-features = false, features=[]}
monostate = "0.1.12"
[features]
default = ["progressbar", "onig", "esaxx_fast"]
esaxx_fast = ["esaxx-rs/cpp"]
progressbar = ["indicatif"]
http = ["hf-hub"]
unstable_wasm = ["fancy-regex", "getrandom/js"]
[dev-dependencies]
criterion = "0.5"
tempfile = "3.10"
assert_approx_eq = "1.1"
tracing = "0.1"
tracing-subscriber = "0.3.18"
[profile.release]
lto = "fat"
[[example]]
name = "encode_batch"
required-features = ["http"]
| tokenizers/tokenizers/Cargo.toml/0 | {
"file_path": "tokenizers/tokenizers/Cargo.toml",
"repo_id": "tokenizers",
"token_count": 908
} |
mod utils;
use tokenizers::models::bpe::{Vocab, BPE};
use tokenizers::Tokenizer;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
pub fn tokenize(string: &str) -> Vec<u32> {
let vocab: Vocab = vec![
("a".to_string(), 0),
("##b".to_string(), 1),
("##c".to_string(), 2),
("ab".to_string(), 3),
("abc".to_string(), 4),
]
.into_iter()
.collect();
let merges = vec![
("a".to_string(), "##b".to_string()),
("ab".to_string(), "##c".to_string()),
];
let bpe = BPE::builder()
.vocab_and_merges(vocab, merges)
.unk_token("[UNK]".to_string())
.continuing_subword_prefix("##".to_string())
.build()
.unwrap();
let tokenizer = Tokenizer::new(bpe);
tokenizer
.encode(string, false)
.unwrap()
.get_ids()
.into_iter()
.cloned()
.collect()
}
| tokenizers/tokenizers/examples/unstable_wasm/src/lib.rs/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/src/lib.rs",
"repo_id": "tokenizers",
"token_count": 543
} |
use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize)]
/// Allows decoding Original BPE by joining all the tokens and then replacing
/// the suffix used to identify end-of-words by whitespaces
#[serde(tag = "type")]
#[non_exhaustive]
pub struct BPEDecoder {
pub suffix: String,
}
impl BPEDecoder {
pub fn new(suffix: String) -> Self {
Self { suffix }
}
}
impl Default for BPEDecoder {
fn default() -> Self {
Self::new("</w>".into())
}
}
impl Decoder for BPEDecoder {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let n = tokens.len() - 1;
Ok(tokens
.into_iter()
.enumerate()
.map(|(i, token)| {
let replacement = if i == n { "" } else { " " };
token.replace(&self.suffix, replacement)
})
.collect())
}
}
| tokenizers/tokenizers/src/decoders/bpe.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/bpe.rs",
"repo_id": "tokenizers",
"token_count": 419
} |
//! [Unigram](https://arxiv.org/abs/1804.10959) model.
mod lattice;
mod model;
mod serialization;
mod trainer;
mod trie;
pub use lattice::*;
pub use model::*;
pub use trainer::*;
| tokenizers/tokenizers/src/models/unigram/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/unigram/mod.rs",
"repo_id": "tokenizers",
"token_count": 72
} |
use crate::tokenizer::pattern::Pattern;
use crate::tokenizer::Decoder;
use crate::tokenizer::{NormalizedString, Normalizer, Result};
use crate::utils::SysRegex;
use serde::{Deserialize, Serialize};
/// Represents the different patterns that `Replace` can use
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub enum ReplacePattern {
String(String),
Regex(String),
}
impl From<String> for ReplacePattern {
fn from(v: String) -> Self {
Self::String(v)
}
}
impl From<&str> for ReplacePattern {
fn from(v: &str) -> Self {
Self::String(v.to_owned())
}
}
/// We use this custom deserializer to provide the value for `regex` for `Replace`
#[doc(hidden)]
#[derive(Deserialize)]
#[serde(tag = "type")]
struct ReplaceDeserializer {
pattern: ReplacePattern,
content: String,
}
impl std::convert::TryFrom<ReplaceDeserializer> for Replace {
type Error = Box<dyn std::error::Error + Send + Sync>;
fn try_from(v: ReplaceDeserializer) -> Result<Self> {
Self::new(v.pattern, v.content)
}
}
/// This normalizer will take a `pattern` (for now only a String)
/// and replace every occurrence with `content`.
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "type", try_from = "ReplaceDeserializer")]
pub struct Replace {
pattern: ReplacePattern,
pub content: String,
#[serde(skip)]
regex: SysRegex,
}
impl Clone for Replace {
fn clone(&self) -> Self {
Self::new(self.pattern.clone(), &self.content).unwrap()
}
}
impl PartialEq for Replace {
fn eq(&self, other: &Self) -> bool {
self.pattern == other.pattern && self.content == other.content
}
}
impl Replace {
pub fn new<I: Into<ReplacePattern>, C: Into<String>>(pattern: I, content: C) -> Result<Self> {
let pattern: ReplacePattern = pattern.into();
let regex = match &pattern {
ReplacePattern::String(s) => SysRegex::new(®ex::escape(s))?,
ReplacePattern::Regex(r) => SysRegex::new(r)?,
};
Ok(Self {
pattern,
content: content.into(),
regex,
})
}
}
impl Normalizer for Replace {
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
normalized.replace(&self.regex, &self.content)
}
}
impl Decoder for Replace {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
tokens
.into_iter()
.map(|token| -> Result<String> {
let mut new_token = "".to_string();
for ((start, stop), is_match) in (&self.regex).find_matches(&token)? {
if is_match {
new_token.push_str(&self.content);
} else {
new_token.push_str(&token[start..stop]);
}
}
Ok(new_token)
})
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_replace() {
let original = "This is a ''test''";
let normalized = "This is a \"test\"";
let mut n = NormalizedString::from(original);
Replace::new("''", "\"").unwrap().normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
}
#[test]
fn test_replace_regex() {
let original = "This is a test";
let normalized = "This is a test";
let mut n = NormalizedString::from(original);
Replace::new(ReplacePattern::Regex(r"\s+".into()), ' ')
.unwrap()
.normalize(&mut n)
.unwrap();
assert_eq!(&n.get(), &normalized);
}
#[test]
fn serialization() {
let replace = Replace::new("Hello", "Hey").unwrap();
let replace_s = r#"{"type":"Replace","pattern":{"String":"Hello"},"content":"Hey"}"#;
assert_eq!(serde_json::to_string(&replace).unwrap(), replace_s);
assert_eq!(serde_json::from_str::<Replace>(replace_s).unwrap(), replace);
let replace = Replace::new(ReplacePattern::Regex(r"\s+".into()), ' ').unwrap();
let replace_s = r#"{"type":"Replace","pattern":{"Regex":"\\s+"},"content":" "}"#;
assert_eq!(serde_json::to_string(&replace).unwrap(), replace_s);
assert_eq!(serde_json::from_str::<Replace>(replace_s).unwrap(), replace);
}
#[test]
fn test_replace_decode() {
let original = vec!["hello".to_string(), "_hello".to_string()];
let replace = Replace::new("_", " ").unwrap();
assert_eq!(
replace.decode_chain(original).unwrap(),
vec!["hello", " hello"]
);
}
}
| tokenizers/tokenizers/src/normalizers/replace.rs/0 | {
"file_path": "tokenizers/tokenizers/src/normalizers/replace.rs",
"repo_id": "tokenizers",
"token_count": 2049
} |
use regex::Regex;
use crate::tokenizer::{
pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior,
};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Whitespace;
impl Default for Whitespace {
fn default() -> Self {
Self
}
}
impl PreTokenizer for Whitespace {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
lazy_static! {
static ref RE: Regex = Regex::new(r"\w+|[^\w\s]+").unwrap();
}
let re_ref: &Regex = &RE;
pretokenized.split(|_, normalized| {
normalized.split(Invert(re_ref), SplitDelimiterBehavior::Removed)
})
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct WhitespaceSplit;
impl PreTokenizer for WhitespaceSplit {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, normalized| {
normalized.split(char::is_whitespace, SplitDelimiterBehavior::Removed)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType, PreTokenizer};
#[test]
fn basic() {
let tests = vec![
(
"Hey man!",
vec![("Hey", (0, 3)), ("man", (4, 7)), ("!", (7, 8))],
),
(
"How are you doing?",
vec![
("How", (0, 3)),
("are", (4, 7)),
("you", (8, 11)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
("\n", vec![]),
];
let pretok = Whitespace {};
for (s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
#[test]
fn whitespace_split() {
let tests = vec![
("Hey man!", vec![("Hey", (0, 3)), ("man!", (4, 8))]),
(
"Hey, man, Good?",
vec![("Hey,", (0, 4)), ("man,", (5, 9)), ("Good?", (10, 15))],
),
];
let pretok = WhitespaceSplit;
for (s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
}
| tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs",
"repo_id": "tokenizers",
"token_count": 1660
} |
//! This comes from the Rust libcore and is duplicated here because it is not exported
//! (cf <https://github.com/rust-lang/rust/blob/25091ed9b7739e12466fb2490baa1e8a2815121c/src/libcore/iter/adapters/mod.rs#L2664>)
//! We are now using the version from <https://stackoverflow.com/questions/44544323/how-to-unzip-a-sequence-of-resulta-b-e-to-a-veca-vecb-and-stop-on-f>
//! because the one from the libcore seems to cause overflowing stacks in some cases
//! It also contains a lines_with_ending that copies std::io::BufRead but keeps line endings.
use std::io::BufRead;
pub struct ResultShunt<I, E> {
iter: I,
error: Option<E>,
}
impl<I, T, E> ResultShunt<I, E>
where
I: Iterator<Item = Result<T, E>>,
{
/// Process the given iterator as if it yielded a `T` instead of a
/// `Result<T, _>`. Any errors will stop the inner iterator and
/// the overall result will be an error.
pub fn process<F, U>(iter: I, mut f: F) -> Result<U, E>
where
F: FnMut(&mut Self) -> U,
{
let mut shunt = ResultShunt::new(iter);
let value = f(shunt.by_ref());
shunt.reconstruct(value)
}
fn new(iter: I) -> Self {
ResultShunt { iter, error: None }
}
/// Consume the adapter and rebuild a `Result` value. This should
/// *always* be called, otherwise any potential error would be
/// lost.
fn reconstruct<U>(self, val: U) -> Result<U, E> {
match self.error {
None => Ok(val),
Some(e) => Err(e),
}
}
}
impl<I, T, E> Iterator for ResultShunt<I, E>
where
I: Iterator<Item = Result<T, E>>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
Some(Ok(v)) => Some(v),
Some(Err(e)) => {
self.error = Some(e);
None
}
None => None,
}
}
}
/// Copied from std::io::BufRead but keep newline characters.
#[derive(Debug)]
pub struct Lines<B> {
buf: B,
}
pub trait LinesWithEnding<B> {
fn lines_with_ending(self) -> Lines<B>;
}
impl<B> LinesWithEnding<B> for B
where
B: BufRead,
{
fn lines_with_ending(self) -> Lines<B> {
Lines::<B> { buf: self }
}
}
impl<B: BufRead> Iterator for Lines<B> {
type Item = std::io::Result<String>;
fn next(&mut self) -> Option<Self::Item> {
let mut buf = String::new();
match self.buf.read_line(&mut buf) {
Ok(0) => None,
Ok(_n) => {
// if buf.ends_with('\n') {
// buf.pop();
// if buf.ends_with('\r') {
// buf.pop();
// }
// }
Some(Ok(buf))
}
Err(e) => Some(Err(e)),
}
}
}
| tokenizers/tokenizers/src/utils/iter.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/iter.rs",
"repo_id": "tokenizers",
"token_count": 1339
} |
import re
README_TEMPLATE = """
<p align="center">
<br/>
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/transformersjs-dark.svg" width="500" style="max-width: 100%;">
<source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/transformersjs-light.svg" width="500" style="max-width: 100%;">
<img alt="transformers.js javascript library logo" src="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/transformersjs-light.svg" width="500" style="max-width: 100%;">
</picture>
<br/>
</p>
<p align="center">
<a href="https://www.npmjs.com/package/@huggingface/transformers"><img alt="NPM" src="https://img.shields.io/npm/v/@huggingface/transformers"></a>
<a href="https://www.npmjs.com/package/@huggingface/transformers"><img alt="NPM Downloads" src="https://img.shields.io/npm/dw/@huggingface/transformers"></a>
<a href="https://www.jsdelivr.com/package/npm/@huggingface/transformers"><img alt="jsDelivr Hits" src="https://img.shields.io/jsdelivr/npm/hw/@huggingface/transformers"></a>
<a href="https://github.com/huggingface/transformers.js/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/huggingface/transformers.js?color=blue"></a>
<a href="https://huggingface.co/docs/transformers.js/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers.js/index.svg?down_color=red&down_message=offline&up_message=online"></a>
</p>
{intro}
## Installation
{installation}
## Quick tour
{quick_tour}
## Examples
{examples}
## Custom usage
{custom_usage}
## Supported tasks/models
Here is the list of all tasks and architectures currently supported by Transformers.js.
If you don't see your task/model listed here or it is not yet supported, feel free
to open up a feature request [here](https://github.com/huggingface/transformers.js/issues/new/choose).
To find compatible models on the Hub, select the "transformers.js" library tag in the filter menu (or visit [this link](https://huggingface.co/models?library=transformers.js)).
You can refine your search by selecting the task you're interested in (e.g., [text-classification](https://huggingface.co/models?pipeline_tag=text-classification&library=transformers.js)).
{tasks}
{models}
"""
FILES_TO_INCLUDE = dict(
intro='./docs/snippets/0_introduction.snippet',
quick_tour='./docs/snippets/1_quick-tour.snippet',
installation='./docs/snippets/2_installation.snippet',
examples='./docs/snippets/3_examples.snippet',
custom_usage='./docs/snippets/4_custom-usage.snippet',
tasks='./docs/snippets/5_supported-tasks.snippet',
models='./docs/snippets/6_supported-models.snippet',
)
DOCS_BASE_URL = 'https://huggingface.co/docs/transformers.js'
# Map of custom links to replace, typically used for links to other sections of the README.
CUSTOM_LINK_MAP = {
'/custom_usage#convert-your-models-to-onnx': '#convert-your-models-to-onnx',
'./api/env': DOCS_BASE_URL + '/api/env',
'./guides/webgpu': DOCS_BASE_URL + '/guides/webgpu',
'./guides/dtypes': DOCS_BASE_URL + '/guides/dtypes',
}
def main():
file_data = {}
for key, file_path in FILES_TO_INCLUDE.items():
with open(file_path, encoding='utf-8') as f:
file_data[key] = f.read()
# Fix links:
# NOTE: This regex does not match all markdown links, but works for the ones we need to replace.
LINK_RE = r'(?<=\])\((.+?)\)'
def replace_fn(match):
link = match.group(1)
if link in CUSTOM_LINK_MAP:
link = CUSTOM_LINK_MAP[link]
elif link.startswith('/'):
# Link to docs
link = DOCS_BASE_URL + link
elif link.startswith('./'):
# Relative link to file
pass
elif link.startswith('http'):
# Link to external site
pass
return f'({link})'
result = README_TEMPLATE.format(**file_data)
result = re.sub(LINK_RE, replace_fn, result, 0, re.MULTILINE)
with open('README.md', 'w', encoding='utf-8') as f:
f.write(result)
if __name__ == '__main__':
main()
| transformers.js/docs/scripts/build_readme.py/0 | {
"file_path": "transformers.js/docs/scripts/build_readme.py",
"repo_id": "transformers.js",
"token_count": 1756
} |
# Transformers.js
<include>
{
"path": "../snippets/0_introduction.snippet"
}
</include>
## Quick tour
<include>
{
"path": "../snippets/1_quick-tour.snippet"
}
</include>
## Contents
The documentation is organized into 4 sections:
1. **GET STARTED** provides a quick tour of the library and installation instructions to get up and running.
2. **TUTORIALS** are a great place to start if you're a beginner! We also include sample applications for you to play around with!
3. **DEVELOPER GUIDES** show you how to use the library to achieve a specific goal.
4. **API REFERENCE** describes all classes and functions, as well as their available parameters and types.
## Examples
<include>
{
"path": "../snippets/3_examples.snippet"
}
</include>
## Supported tasks/models
Here is the list of all tasks and architectures currently supported by Transformers.js.
If you don't see your task/model listed here or it is not yet supported, feel free
to open up a feature request [here](https://github.com/huggingface/transformers.js/issues/new/choose).
To find compatible models on the Hub, select the "transformers.js" library tag in the filter menu (or visit [this link](https://huggingface.co/models?library=transformers.js)).
You can refine your search by selecting the task you're interested in (e.g., [text-classification](https://huggingface.co/models?pipeline_tag=text-classification&library=transformers.js)).
<include>
{
"path": "../snippets/5_supported-tasks.snippet"
}
</include>
<include>
{
"path": "../snippets/6_supported-models.snippet"
}
</include>
| transformers.js/docs/source/index.md/0 | {
"file_path": "transformers.js/docs/source/index.md",
"repo_id": "transformers.js",
"token_count": 495
} |
import { useState, useRef, useEffect, useCallback } from 'react'
import './App.css'
const PLACEHOLDER_TEXTS = [
"A panda is a large black-and-white bear native to China.",
"The typical life span of a panda is 20 years in the wild.",
"A panda's diet consists almost entirely of bamboo.",
"Ailuropoda melanoleuca is a bear species endemic to China.",
"I love pandas so much!",
"Bamboo is a fast-growing, woody grass.",
"My favorite movie is Kung Fu Panda.",
"I love the color blue.",
"Once upon a time, in a land far, far away...",
"Hello world.",
"This is an example sentence.",
].sort(() => Math.random() - 0.5);
function normalize(embedding) {
const magnitude = Math.sqrt(embedding.reduce((sum, val) => sum + val * val, 0));
return embedding.map((val) => val / magnitude);
}
function dot(a, b) {
return a.reduce((acc, val, i) => acc + val * b[i], 0);
}
function App() {
const [status, setStatus] = useState('idle');
const [source, setSource] = useState('What is a panda?');
const [text, setText] = useState(PLACEHOLDER_TEXTS.join('\n'));
const [dimensions, setDimensions] = useState(768);
const [embeddings, setEmbeddings] = useState([]);
const [results, setResults] = useState([]);
// Create a reference to the worker object.
const worker = useRef(null);
// We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted.
useEffect(() => {
if (!worker.current) {
// Create the worker if it does not yet exist.
worker.current = new Worker(new URL('./worker.js', import.meta.url), {
type: 'module'
});
}
// Create a callback function for messages from the worker thread.
const onMessageReceived = (e) => {
const status = e.data.status;
if (status === 'initiate') {
setStatus('loading');
} else if (status === 'ready') {
setStatus('ready');
} else if (status === 'complete') {
const embeddings = e.data.embeddings;
setDimensions(embeddings[0].length);
setEmbeddings(embeddings);
setStatus('idle');
}
};
// Attach the callback function as an event listener.
worker.current.addEventListener('message', onMessageReceived);
// Define a cleanup function for when the component is unmounted.
return () => worker.current.removeEventListener('message', onMessageReceived);
}, []);
const run = useCallback(() => {
setStatus('processing');
worker.current.postMessage({
source,
text,
});
}, [source, text])
useEffect(() => {
if (embeddings.length === 0) return;
const slicedEmbeddings = embeddings.map(x => normalize(x.slice(0, dimensions)));
const sourceEmbedding = slicedEmbeddings[0];
const sentenceEmbeddings = slicedEmbeddings.slice(1);
// Compute the cosine similarity between the source sentence and the other sentences.
// NOTE: Since vectors are normalized, we use the dot product.
const similarities = sentenceEmbeddings.map((embedding) => dot(sourceEmbedding, embedding));
setResults(text.trim().split('\n').map((sentence, i) => ({
sentence,
similarity: similarities[i]
})).sort((a, b) => b.similarity - a.similarity));
}, [text, embeddings, dimensions])
const busy = status !== 'idle';
return (
<div className='flex flex-col h-full'>
<h1 className='text-2xl md:text-4xl font-bold text-center mb-1'>Adaptive Retrieval w/ Matryoshka Embeddings</h1>
<p className='text-lg md:text-xl font-medium text-center mb-2'>Powered by <a href='https://huggingface.co/nomic-ai/nomic-embed-text-v1.5'>Nomic Embed v1.5</a> and <a href='http://huggingface.co/docs/transformers.js'>🤗 Transformers.js</a></p>
<div className='flex-grow flex flex-wrap p-4'>
<div className='flex flex-col items-center gap-y-1 w-full md:w-1/2'>
<label className='text-lg font-medium'>Query</label>
<textarea
placeholder='Enter source sentence.'
className='border w-full p-1 resize-none overflow-hidden h-10'
value={source}
onChange={e => {
setSource(e.target.value);
setResults([]);
setEmbeddings([]);
}}
></textarea>
<label className='text-lg font-medium mt-1'>Text</label>
<textarea
placeholder='Enter sentences to compare with the source sentence. One sentence per line.'
className='border w-full p-1 h-full resize-none'
value={text}
onChange={e => {
setText(e.target.value);
setResults([]);
setEmbeddings([]);
}}
></textarea>
<button
className='border py-1 px-2 bg-blue-400 rounded text-white text-lg font-medium disabled:opacity-50 disabled:cursor-not-allowed'
disabled={busy}
onClick={run}>{
!busy
? (embeddings.length === 0 ? 'Compute Embeddings' : 'Recompute Embeddings')
: status === 'loading'
? 'Model loading...'
: 'Processing'
}</button>
</div>
<div className='flex flex-col items-center w-full md:w-1/2 gap-y-1'>
{embeddings.length > 0 && (<>
<label className='text-lg font-medium'>Dimensions</label>
<input
type="range"
min="64"
max="768"
step="1"
value={dimensions}
onChange={e => {
setDimensions(e.target.value);
}}
className="w-[98%] h-[10px]"
/>
<p className="font-bold text-sm">{dimensions}</p>
<div className='w-full flex flex-col gap-y-1'>
<label className='text-lg font-medium text-center mt-1'>Results</label>
<div className='flex flex-col gap-y-1'>
{results.map((result, i) => (
<div key={i} className='flex gap-x-2 border mx-2 p-1'>
<span className='font-bold'>{result.similarity.toFixed(3)}</span>
<span>{result.sentence}</span>
</div>
))}
</div>
</div>
</>)
}
</div>
</div>
</div>
)
}
export default App
| transformers.js/examples/adaptive-retrieval/src/App.jsx/0 | {
"file_path": "transformers.js/examples/adaptive-retrieval/src/App.jsx",
"repo_id": "transformers.js",
"token_count": 2829
} |
@tailwind base;
@tailwind components;
@tailwind utilities;
:root {
font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif;
line-height: 1.5;
font-weight: 400;
color-scheme: light dark;
color: rgba(255, 255, 255, 0.87);
background-color: #242424;
font-synthesis: none;
text-rendering: optimizeLegibility;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
-webkit-text-size-adjust: 100%;
}
a {
font-weight: 500;
color: #646cff;
text-decoration: inherit;
}
a:hover {
color: #535bf2;
}
body {
margin: 0;
display: flex;
place-items: center;
}
h1 {
font-size: 3.2em;
line-height: 1.1;
}
button {
border-radius: 8px;
border: 1px solid transparent;
padding: 0.6em 1.2em;
font-size: 1em;
font-weight: 500;
font-family: inherit;
background-color: #1a1a1a;
cursor: pointer;
transition: border-color 0.25s;
}
button:hover {
border-color: #646cff;
}
button:focus,
button:focus-visible {
outline: 4px auto -webkit-focus-ring-color;
}
@media (prefers-color-scheme: light) {
:root {
color: #213547;
background-color: #ffffff;
}
a:hover {
color: #747bff;
}
button {
background-color: #f9f9f9;
}
}
| transformers.js/examples/code-completion/src/index.css/0 | {
"file_path": "transformers.js/examples/code-completion/src/index.css",
"repo_id": "transformers.js",
"token_count": 514
} |
{
"manifest_version": 3,
"name": "extension",
"description": "Transformers.js | Sample browser extension",
"version": "0.0.1",
"permissions": [
"activeTab",
"scripting",
"contextMenus",
"storage",
"unlimitedStorage"
],
"background": {
"service_worker": "background.js",
"type": "module"
},
"content_scripts": [
{
"matches": [
"<all_urls>"
],
"js": [
"content.js"
]
}
],
"minimum_chrome_version": "92",
"action": {
"default_icon": {
"16": "icons/icon.png",
"24": "icons/icon.png",
"32": "icons/icon.png"
},
"default_title": "Transformers.js",
"default_popup": "popup.html"
},
"content_security_policy": {
"extension_pages": "script-src 'self' 'wasm-unsafe-eval'"
},
"icons": {
"16": "icons/icon.png",
"48": "icons/icon.png",
"128": "icons/icon.png"
}
} | transformers.js/examples/extension/public/manifest.json/0 | {
"file_path": "transformers.js/examples/extension/public/manifest.json",
"repo_id": "transformers.js",
"token_count": 421
} |
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer utilities {
.scrollbar-thin::-webkit-scrollbar {
@apply w-2;
}
.scrollbar-thin::-webkit-scrollbar-track {
@apply rounded-full bg-gray-100 dark:bg-gray-700;
}
.scrollbar-thin::-webkit-scrollbar-thumb {
@apply rounded-full bg-gray-300 dark:bg-gray-600;
}
.scrollbar-thin::-webkit-scrollbar-thumb:hover {
@apply bg-gray-500;
}
}
| transformers.js/examples/florence2-webgpu/src/index.css/0 | {
"file_path": "transformers.js/examples/florence2-webgpu/src/index.css",
"repo_id": "transformers.js",
"token_count": 173
} |
import { pipeline } from "@huggingface/transformers";
// Use the Singleton pattern to enable lazy construction of the pipeline.
class PipelineSingleton {
static task = 'text-classification';
static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english';
static instance = null;
static async getInstance(progress_callback = null) {
this.instance ??= pipeline(this.task, this.model, { progress_callback });
return this.instance;
}
}
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Retrieve the classification pipeline. When called for the first time,
// this will load the pipeline and save it for future use.
const classifier = await PipelineSingleton.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
// Actually perform the classification
const output = await classifier(event.data.text);
// Send the output back to the main thread
self.postMessage({
status: 'complete',
output: output,
});
});
| transformers.js/examples/next-client/src/app/worker.js/0 | {
"file_path": "transformers.js/examples/next-client/src/app/worker.js",
"repo_id": "transformers.js",
"token_count": 369
} |
// The full list of languages in FLORES-200 is available here:
// https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200
const LANGUAGES = {
"Acehnese (Arabic script)": "ace_Arab",
"Acehnese (Latin script)": "ace_Latn",
"Afrikaans": "afr_Latn",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"Armenian": "hye_Armn",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Ayacucho Quechua": "quy_Latn",
"Balinese": "ban_Latn",
"Bambara": "bam_Latn",
"Banjar (Arabic script)": "bjn_Arab",
"Banjar (Latin script)": "bjn_Latn",
"Bashkir": "bak_Cyrl",
"Basque": "eus_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Burmese": "mya_Mymr",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Central Aymara": "ayr_Latn",
"Central Kanuri (Arabic script)": "knc_Arab",
"Central Kanuri (Latin script)": "knc_Latn",
"Central Kurdish": "ckb_Arab",
"Chhattisgarhi": "hne_Deva",
"Chinese (Simplified)": "zho_Hans",
"Chinese (Traditional)": "zho_Hant",
"Chokwe": "cjk_Latn",
"Crimean Tatar": "crh_Latn",
"Croatian": "hrv_Latn",
"Czech": "ces_Latn",
"Danish": "dan_Latn",
"Dari": "prs_Arab",
"Dutch": "nld_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Eastern Panjabi": "pan_Guru",
"Eastern Yiddish": "ydd_Hebr",
"Egyptian Arabic": "arz_Arab",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Galician": "glg_Latn",
"Ganda": "lug_Latn",
"Georgian": "kat_Geor",
"German": "deu_Latn",
"Greek": "ell_Grek",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Halh Mongolian": "khk_Cyrl",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Hungarian": "hun_Latn",
"Icelandic": "isl_Latn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Irish": "gle_Latn",
"Italian": "ita_Latn",
"Japanese": "jpn_Jpan",
"Javanese": "jav_Latn",
"Jingpho": "kac_Latn",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Kabyle": "kab_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri (Arabic script)": "kas_Arab",
"Kashmiri (Devanagari script)": "kas_Deva",
"Kazakh": "kaz_Cyrl",
"Khmer": "khm_Khmr",
"Kikongo": "kon_Latn",
"Kikuyu": "kik_Latn",
"Kimbundu": "kmb_Latn",
"Kinyarwanda": "kin_Latn",
"Korean": "kor_Hang",
"Kyrgyz": "kir_Cyrl",
"Lao": "lao_Laoo",
"Latgalian": "ltg_Latn",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Luba-Kasai": "lua_Latn",
"Luo": "luo_Latn",
"Luxembourgish": "ltz_Latn",
"Macedonian": "mkd_Cyrl",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Maltese": "mlt_Latn",
"Maori": "mri_Latn",
"Marathi": "mar_Deva",
"Meitei (Bengali script)": "mni_Beng",
"Mesopotamian Arabic": "acm_Arab",
"Minangkabau (Arabic script)": "min_Arab",
"Minangkabau (Latin script)": "min_Latn",
"Mizo": "lus_Latn",
"Modern Standard Arabic (Romanized)": "arb_Latn",
"Modern Standard Arabic": "arb_Arab",
"Moroccan Arabic": "ary_Arab",
"Mossi": "mos_Latn",
"Najdi Arabic": "ars_Arab",
"Nepali": "npi_Deva",
"Nigerian Fulfulde": "fuv_Latn",
"North Azerbaijani": "azj_Latn",
"North Levantine Arabic": "apc_Arab",
"Northern Kurdish": "kmr_Latn",
"Northern Sotho": "nso_Latn",
"Northern Uzbek": "uzn_Latn",
"Norwegian Bokmål": "nob_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Papiamento": "pap_Latn",
"Plateau Malagasy": "plt_Latn",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Samoan": "smo_Latn",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sardinian": "srd_Latn",
"Scottish Gaelic": "gla_Latn",
"Serbian": "srp_Cyrl",
"Shan": "shn_Mymr",
"Shona": "sna_Latn",
"Sicilian": "scn_Latn",
"Silesian": "szl_Latn",
"Sindhi": "snd_Arab",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Somali": "som_Latn",
"South Azerbaijani": "azb_Arab",
"South Levantine Arabic": "ajp_Arab",
"Southern Pashto": "pbt_Arab",
"Southern Sotho": "sot_Latn",
"Southwestern Dinka": "dik_Latn",
"Spanish": "spa_Latn",
"Standard Latvian": "lvs_Latn",
"Standard Malay": "zsm_Latn",
"Standard Tibetan": "bod_Tibt",
"Sundanese": "sun_Latn",
"Swahili": "swh_Latn",
"Swati": "ssw_Latn",
"Swedish": "swe_Latn",
"Tagalog": "tgl_Latn",
"Tajik": "tgk_Cyrl",
"Tamasheq (Latin script)": "taq_Latn",
"Tamasheq (Tifinagh script)": "taq_Tfng",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Ta’izzi-Adeni Arabic": "acq_Arab",
"Telugu": "tel_Telu",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tok Pisin": "tpi_Latn",
"Tosk Albanian": "als_Latn",
"Tsonga": "tso_Latn",
"Tswana": "tsn_Latn",
"Tumbuka": "tum_Latn",
"Tunisian Arabic": "aeb_Arab",
"Turkish": "tur_Latn",
"Turkmen": "tuk_Latn",
"Twi": "twi_Latn",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Uyghur": "uig_Arab",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Welsh": "cym_Latn",
"West Central Oromo": "gaz_Latn",
"Western Persian": "pes_Arab",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Zulu": "zul_Latn",
}
export default function LanguageSelector({ type, onChange, defaultLanguage }) {
return (
<div className='language-selector'>
<label>{type}: </label>
<select onChange={onChange} defaultValue={defaultLanguage}>
{Object.entries(LANGUAGES).map(([key, value]) => {
return <option key={key} value={value}>{key}</option>
})}
</select>
</div>
)
} | transformers.js/examples/react-translator/src/components/LanguageSelector.jsx/0 | {
"file_path": "transformers.js/examples/react-translator/src/components/LanguageSelector.jsx",
"repo_id": "transformers.js",
"token_count": 3102
} |
// Reference the elements we will use
const statusLabel = document.getElementById('status');
const fileUpload = document.getElementById('upload');
const imageContainer = document.getElementById('container');
const example = document.getElementById('example');
const maskCanvas = document.getElementById('mask-output');
const uploadButton = document.getElementById('upload-button');
const resetButton = document.getElementById('reset-image');
const clearButton = document.getElementById('clear-points');
const cutButton = document.getElementById('cut-mask');
// State variables
let lastPoints = null;
let isEncoded = false;
let isDecoding = false;
let isMultiMaskMode = false;
let modelReady = false;
let imageDataURI = null;
// Constants
const BASE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/';
const EXAMPLE_URL = BASE_URL + 'corgi.jpg';
// Create a web worker so that the main (UI) thread is not blocked during inference.
const worker = new Worker(
new URL('./worker.js', import.meta.url),
{ type: 'module' }
);
// Preload star and cross images to avoid lag on first click
const star = new Image();
star.src = BASE_URL + 'star-icon.png';
star.className = 'icon';
const cross = new Image();
cross.src = BASE_URL + 'cross-icon.png';
cross.className = 'icon';
// Set up message handler
worker.addEventListener('message', (e) => {
const { type, data } = e.data;
if (type === 'ready') {
modelReady = true;
statusLabel.textContent = 'Ready';
} else if (type === 'decode_result') {
isDecoding = false;
if (!isEncoded) {
return; // We are not ready to decode yet
}
if (!isMultiMaskMode && lastPoints) {
// Perform decoding with the last point
decode();
lastPoints = null;
}
const { mask, scores } = data;
// Update canvas dimensions (if different)
if (maskCanvas.width !== mask.width || maskCanvas.height !== mask.height) {
maskCanvas.width = mask.width;
maskCanvas.height = mask.height;
}
// Create context and allocate buffer for pixel data
const context = maskCanvas.getContext('2d');
const imageData = context.createImageData(maskCanvas.width, maskCanvas.height);
// Select best mask
const numMasks = scores.length; // 3
let bestIndex = 0;
for (let i = 1; i < numMasks; ++i) {
if (scores[i] > scores[bestIndex]) {
bestIndex = i;
}
}
statusLabel.textContent = `Segment score: ${scores[bestIndex].toFixed(2)}`;
// Fill mask with colour
const pixelData = imageData.data;
for (let i = 0; i < pixelData.length; ++i) {
if (mask.data[numMasks * i + bestIndex] === 1) {
const offset = 4 * i;
pixelData[offset] = 0; // red
pixelData[offset + 1] = 114; // green
pixelData[offset + 2] = 189; // blue
pixelData[offset + 3] = 255; // alpha
}
}
// Draw image data to context
context.putImageData(imageData, 0, 0);
} else if (type === 'segment_result') {
if (data === 'start') {
statusLabel.textContent = 'Extracting image embedding...';
} else {
statusLabel.textContent = 'Embedding extracted!';
isEncoded = true;
}
}
});
function decode() {
isDecoding = true;
worker.postMessage({ type: 'decode', data: lastPoints });
}
function clearPointsAndMask() {
// Reset state
isMultiMaskMode = false;
lastPoints = null;
// Remove points from previous mask (if any)
document.querySelectorAll('.icon').forEach(e => e.remove());
// Disable cut button
cutButton.disabled = true;
// Reset mask canvas
maskCanvas.getContext('2d').clearRect(0, 0, maskCanvas.width, maskCanvas.height);
}
clearButton.addEventListener('click', clearPointsAndMask);
resetButton.addEventListener('click', () => {
// Update state
isEncoded = false;
imageDataURI = null;
// Indicate to worker that we have reset the state
worker.postMessage({ type: 'reset' });
// Clear points and mask (if present)
clearPointsAndMask();
// Update UI
cutButton.disabled = true;
imageContainer.style.backgroundImage = 'none';
uploadButton.style.display = 'flex';
statusLabel.textContent = 'Ready';
});
function segment(data) {
// Update state
isEncoded = false;
if (!modelReady) {
statusLabel.textContent = 'Loading model...';
}
imageDataURI = data;
// Update UI
imageContainer.style.backgroundImage = `url(${data})`;
uploadButton.style.display = 'none';
cutButton.disabled = true;
// Instruct worker to segment the image
worker.postMessage({ type: 'segment', data });
}
// Handle file selection
fileUpload.addEventListener('change', function (e) {
const file = e.target.files[0];
if (!file) {
return;
}
const reader = new FileReader();
// Set up a callback when the file is loaded
reader.onload = e2 => segment(e2.target.result);
reader.readAsDataURL(file);
});
example.addEventListener('click', (e) => {
e.preventDefault();
segment(EXAMPLE_URL);
});
function addIcon({ point, label }) {
const icon = (label === 1 ? star : cross).cloneNode();
icon.style.left = `${point[0] * 100}%`;
icon.style.top = `${point[1] * 100}%`;
imageContainer.appendChild(icon);
}
// Attach hover event to image container
imageContainer.addEventListener('mousedown', e => {
if (e.button !== 0 && e.button !== 2) {
return; // Ignore other buttons
}
if (!isEncoded) {
return; // Ignore if not encoded yet
}
if (!isMultiMaskMode) {
lastPoints = [];
isMultiMaskMode = true;
cutButton.disabled = false;
}
const point = getPoint(e);
lastPoints.push(point);
// add icon
addIcon(point);
decode();
});
// Clamp a value inside a range [min, max]
function clamp(x, min = 0, max = 1) {
return Math.max(Math.min(x, max), min)
}
function getPoint(e) {
// Get bounding box
const bb = imageContainer.getBoundingClientRect();
// Get the mouse coordinates relative to the container
const mouseX = clamp((e.clientX - bb.left) / bb.width);
const mouseY = clamp((e.clientY - bb.top) / bb.height);
return {
point: [mouseX, mouseY],
label: e.button === 2 // right click
? 0 // negative prompt
: 1, // positive prompt
}
}
// Do not show context menu on right click
imageContainer.addEventListener('contextmenu', e => {
e.preventDefault();
});
// Attach hover event to image container
imageContainer.addEventListener('mousemove', e => {
if (!isEncoded || isMultiMaskMode) {
// Ignore mousemove events if the image is not encoded yet,
// or we are in multi-mask mode
return;
}
lastPoints = [getPoint(e)];
if (!isDecoding) {
decode(); // Only decode if we are not already decoding
}
});
// Handle cut button click
cutButton.addEventListener('click', () => {
const [w, h] = [maskCanvas.width, maskCanvas.height];
// Get the mask pixel data
const maskContext = maskCanvas.getContext('2d');
const maskPixelData = maskContext.getImageData(0, 0, w, h);
// Load the image
const image = new Image();
image.crossOrigin = 'anonymous';
image.onload = async () => {
// Create a new canvas to hold the image
const imageCanvas = new OffscreenCanvas(w, h);
const imageContext = imageCanvas.getContext('2d');
imageContext.drawImage(image, 0, 0, w, h);
const imagePixelData = imageContext.getImageData(0, 0, w, h);
// Create a new canvas to hold the cut-out
const cutCanvas = new OffscreenCanvas(w, h);
const cutContext = cutCanvas.getContext('2d');
const cutPixelData = cutContext.getImageData(0, 0, w, h);
// Copy the image pixel data to the cut canvas
for (let i = 3; i < maskPixelData.data.length; i += 4) {
if (maskPixelData.data[i] > 0) {
for (let j = 0; j < 4; ++j) {
const offset = i - j;
cutPixelData.data[offset] = imagePixelData.data[offset];
}
}
}
cutContext.putImageData(cutPixelData, 0, 0);
// Download image
const link = document.createElement('a');
link.download = 'image.png';
link.href = URL.createObjectURL(await cutCanvas.convertToBlob());
link.click();
link.remove();
}
image.src = imageDataURI;
});
| transformers.js/examples/segment-anything-client/index.js/0 | {
"file_path": "transformers.js/examples/segment-anything-client/index.js",
"repo_id": "transformers.js",
"token_count": 3452
} |
export default function Progress({ text, percentage }) {
percentage ??= 0;
return (
<div className="relative text-black bg-white rounded-lg text-left overflow-hidden">
<div className='px-2 w-[1%] h-full bg-blue-500 whitespace-nowrap' style={{ width: `${percentage}%` }}>
{text} ({`${percentage.toFixed(2)}%`})
</div>
</div>
);
}
| transformers.js/examples/text-to-speech-client/src/components/Progress.jsx/0 | {
"file_path": "transformers.js/examples/text-to-speech-client/src/components/Progress.jsx",
"repo_id": "transformers.js",
"token_count": 144
} |
import { useCallback, useEffect, useRef, useState } from 'react'
import { Token } from './components/Token'
import './App.css'
// Define list of tokenizers and their corresponding human-readable names
const TOKENIZER_OPTIONS = Object.freeze({
'Xenova/gpt-4': 'gpt-4 / gpt-3.5-turbo / text-embedding-ada-002',
'Xenova/text-davinci-003': 'text-davinci-003 / text-davinci-002',
'Xenova/gpt-3': 'gpt-3',
'Xenova/grok-1-tokenizer': 'Grok-1',
'Xenova/claude-tokenizer': 'Claude',
'Xenova/mistral-tokenizer-v3': 'Mistral v3',
'Xenova/mistral-tokenizer-v1': 'Mistral v1',
'Xenova/gemma-tokenizer': 'Gemma',
'Xenova/llama-3-tokenizer': 'Llama 3',
'Xenova/llama-tokenizer': 'LLaMA / Llama 2',
'Xenova/c4ai-command-r-v01-tokenizer': 'Cohere Command-R',
'Xenova/t5-small': 'T5',
'Xenova/bert-base-cased': 'bert-base-cased',
'': 'Custom',
})
function App() {
// Allow user to set tokenizer and text via URL query parameters
const urlParams = new URLSearchParams(window.location.search);
const tokenizerParam = urlParams.get('tokenizer');
const textParam = urlParams.get('text');
const [tokenIds, setTokenIds] = useState([]);
const [decodedTokens, setDecodedTokens] = useState([]);
const [margins, setMargins] = useState([]);
const [outputOption, setOutputOption] = useState('text');
const [tokenizer, setTokenizer] = useState(tokenizerParam ?? 'Xenova/gpt-4');
const [customTokenizer, setCustomTokenizer] = useState('');
const textareaRef = useRef(null);
const outputRef = useRef(null);
// Create a reference to the worker object.
const worker = useRef(null);
// We use the `useEffect` hook to set up the worker as soon as the `App` component is mounted.
useEffect(() => {
if (!worker.current) {
// Create the worker if it does not yet exist.
worker.current = new Worker(new URL('./worker.js', import.meta.url), {
type: 'module'
});
}
// Create a callback function for messages from the worker thread.
const onMessageReceived = (e) => {
setTokenIds(e.data.token_ids);
setDecodedTokens(e.data.decoded);
setMargins(e.data.margins);
};
// Attach the callback function as an event listener.
worker.current.addEventListener('message', onMessageReceived);
// Define a cleanup function for when the component is unmounted.
return () => worker.current.removeEventListener('message', onMessageReceived);
}, []);
const resetOutput = useCallback(() => {
setOutputOption('text');
setTokenIds([]);
setDecodedTokens([]);
setMargins([]);
}, []);
const onInputChange = useCallback((e) => {
const model_id = tokenizer;
const text = e.target.value;
if (text.length > 10000) {
setOutputOption(null);
console.log('User most likely pasted in a large body of text (> 10k chars), so we hide the output (until specifically requested by the user).');
}
worker.current.postMessage({ model_id, text });
}, [tokenizer]);
useEffect(() => {
if (textParam) {
onInputChange({ target: { value: textParam } });
}
}, [onInputChange, textParam]);
const onTokenizerChange = useCallback((e) => {
const model_id = e.target.value;
setTokenizer(model_id);
if (!model_id) return;
worker.current.postMessage({ model_id, text: textareaRef.current.value });
}, []);
return (
<div className='w-full max-w-[720px] flex flex-col gap-4 items-center'>
<div>
<h1 className='text-5xl font-bold mb-2'>The Tokenizer Playground</h1>
<h2 className='text-lg font-normal'>Experiment with different tokenizers (running <a className="text-gray-900 underline" href="https://github.com/huggingface/transformers.js">locally</a> in your browser).</h2>
</div>
<div>
<select value={(tokenizer in TOKENIZER_OPTIONS && !customTokenizer) ? tokenizer : ''} onChange={(e) => {
resetOutput();
setCustomTokenizer('');
onTokenizerChange(e);
}} className="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2">
{Object.entries(TOKENIZER_OPTIONS).map(([value, label]) => (
<option key={value} value={value}>{label}</option>
))}
</select>
{(!(tokenizer in TOKENIZER_OPTIONS) || customTokenizer || tokenizer === '') && (
<input
type="text"
placeholder="Custom tokenizer"
defaultValue={customTokenizer || tokenizer}
onChange={(e) => {
setCustomTokenizer(e.target.value);
onTokenizerChange(e);
}}
className="bg-white border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full py-1 px-2 mt-1"
/>
)}
</div>
<textarea
ref={textareaRef}
onChange={onInputChange}
rows="8"
className="font-mono text-lg block w-full p-2.5 text-gray-900 bg-gray-50 rounded-lg border border-gray-200"
placeholder="Enter some text"
defaultValue={textParam ?? textareaRef.current?.value ?? ''}
></textarea>
<div className='flex justify-center gap-5'>
<div className='flex flex-col'>
<h2 className='font-semibold uppercase leading-4'>Tokens</h2>
<h3 className='font-semibold text-3xl'>{tokenIds.length.toLocaleString()}</h3>
</div>
<div className='flex flex-col'>
<h2 className='font-semibold uppercase leading-4'>Characters</h2>
<h3 className='font-semibold text-3xl'>{(textareaRef.current?.value.length ?? 0).toLocaleString()}</h3>
</div>
</div>
<div ref={outputRef} className='font-mono text-lg p-2.5 w-full bg-gray-100 rounded-lg border border-gray-200 whitespace-pre-wrap text-left h-[200px] overflow-y-auto'>
{outputOption === 'text' ? (
decodedTokens.map(
(token, index) => <Token key={index} text={token} position={index} margin={margins[index]} />
)
) : outputOption === 'token_ids' ? (
`[${tokenIds.join(', ')}]`
) : null}
</div>
<div className="flex items-center gap-2 self-end">
<div className="flex items-center">
<input checked={outputOption === 'text'} onChange={() => setOutputOption('text')} id="output-radio-1" type="radio" value="" name="output-radio" className="w-4 h-4 text-blue-600 bg-gray-100 border-gray-300 focus:ring-blue-500" />
<label htmlFor="output-radio-1" className="ml-1 text-sm font-medium text-gray-900 dark:text-gray-300">Text</label>
</div>
<div className="flex items-center">
<input checked={outputOption === 'token_ids'} onChange={() => setOutputOption('token_ids')} id="output-radio-2" type="radio" value="" name="output-radio" className="w-4 h-4 text-blue-600 bg-gray-100 border-gray-300 focus:ring-blue-500" />
<label htmlFor="output-radio-2" className="ml-1 text-sm font-medium text-gray-900 dark:text-gray-300">Token IDs</label>
</div>
<div className="flex items-center">
<input checked={outputOption === null} onChange={() => setOutputOption(null)} id="output-radio-3" type="radio" value="" name="output-radio" className="w-4 h-4 text-blue-600 bg-gray-100 border-gray-300 focus:ring-blue-500" />
<label htmlFor="output-radio-3" className="ml-1 text-sm font-medium text-gray-900 dark:text-gray-300">Hide</label>
</div>
</div>
</div >
)
}
export default App
| transformers.js/examples/tokenizer-playground/src/App.jsx/0 | {
"file_path": "transformers.js/examples/tokenizer-playground/src/App.jsx",
"repo_id": "transformers.js",
"token_count": 3075
} |
* {
box-sizing: border-box;
padding: 0;
margin: 0;
font-family: sans-serif;
}
html,
body {
height: 100%;
}
body {
padding: 16px 32px;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
h1 {
text-align: center;
}
#status {
min-height: 16px;
margin: 8px 0;
text-align: center;
}
button {
transition: all .25s;
background: rgba(40, 44, 52, 0.05);
border: 1px solid transparent;
border-radius: 6px;
color: #3080d0;
text-decoration: none !important;
display: inline-block;
font-size: 14px;
font-weight: 500;
padding: 8px 16px;
cursor: pointer;
-webkit-user-select: none;
-moz-user-select: none;
user-select: none;
}
button:disabled {
background: rgba(40, 44, 52, 0.1);
color: #a0a0a0;
cursor: not-allowed;
}
button:hover {
background: rgba(40, 44, 52, 0.1);
}
p {
text-align: center;
font-size: 12px;
max-width: 600px;
padding: 8px;
}
#chart-container {
position: relative;
height: 60vh;
width: min(90vw, 800px);
padding-right: 50px;
margin-bottom: 10px;
}
details {
position: fixed;
background-color: white;
right: 0;
top: 0;
padding: 16px;
}
summary {
text-align: right;
}
hr {
margin: 8px 0;
}
| transformers.js/examples/webgpu-embedding-benchmark/style.css/0 | {
"file_path": "transformers.js/examples/webgpu-embedding-benchmark/style.css",
"repo_id": "transformers.js",
"token_count": 518
} |
import { useMemo } from "react";
const Chunk = ({ chunk, currentTime, onClick, ...props }) => {
const { text, timestamp } = chunk;
const [start, end] = timestamp;
const bolded = start <= currentTime && currentTime < end;
return (
<span {...props}>
{text.startsWith(' ') ? " " : ""}
<span
onClick={onClick}
className="text-md text-gray-600 cursor-pointer hover:text-red-600"
title={timestamp.map(x => x.toFixed(2)).join(' → ')}
style={{
textDecoration: bolded ? 'underline' : 'none',
textShadow: bolded ? '0 0 1px #000' : 'none',
}}
>{text.trim()}</span>
</span>
)
}
const Transcript = ({ transcript, currentTime, setCurrentTime, ...props }) => {
const jsonTranscript = useMemo(() => {
return JSON.stringify(transcript, null, 2)
// post-process the JSON to make it more readable
.replace(/( {4}"timestamp": )\[\s+(\S+)\s+(\S+)\s+\]/gm, "$1[$2 $3]");
}, [transcript]);
const downloadTranscript = () => {
const blob = new Blob([jsonTranscript], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = 'transcript.json';
a.click();
URL.revokeObjectURL(url);
}
return (<>
<div {...props}>
{
transcript.chunks.map((chunk, i) => <Chunk key={i} chunk={chunk} currentTime={currentTime} onClick={e => {
setCurrentTime(chunk.timestamp[0]) // Set to start of chunk
}} />)
}
</div>
<div className="flex justify-center border-t text-sm text-gray-600 max-h-[150px] overflow-y-auto p-2 scrollbar-thin">
<button
className="flex items-center border px-2 py-1 rounded-lg bg-green-400 text-white hover:bg-green-500"
onClick={downloadTranscript}
>
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth={1.5} stroke="currentColor" className="size-6 mr-1">
<path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 0 0 5.25 21h13.5A2.25 2.25 0 0 0 21 18.75V16.5M16.5 12 12 16.5m0 0L7.5 12m4.5 4.5V3" />
</svg>
Download transcript
</button>
</div>
</>)
};
export default Transcript;
| transformers.js/examples/whisper-word-timestamps/src/components/Transcript.jsx/0 | {
"file_path": "transformers.js/examples/whisper-word-timestamps/src/components/Transcript.jsx",
"repo_id": "transformers.js",
"token_count": 1253
} |
import { env, pipeline } from '@xenova/transformers';
// Skip local model check since we are downloading the model from the Hugging Face Hub.
env.allowLocalModels = false;
class MyZeroShotClassificationPipeline {
static task = 'zero-shot-classification';
static model = 'MoritzLaurer/deberta-v3-xsmall-zeroshot-v1.1-all-33';
static instance = null;
static async getInstance(progress_callback = null) {
if (this.instance === null) {
this.instance = pipeline(this.task, this.model, {
quantized: true,
progress_callback,
});
}
return this.instance;
}
}
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Retrieve the pipeline. When called for the first time,
// this will load the pipeline and save it for future use.
const classifier = await MyZeroShotClassificationPipeline.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
const { text, labels } = event.data;
const split = text.split('\n');
for (const line of split) {
const output = await classifier(line, labels, {
hypothesis_template: 'This text is about {}.',
multi_label: true,
});
// Send the output back to the main thread
self.postMessage({ status: 'output', output });
}
// Send the output back to the main thread
self.postMessage({ status: 'complete' });
});
| transformers.js/examples/zero-shot-classification/src/worker.js/0 | {
"file_path": "transformers.js/examples/zero-shot-classification/src/worker.js",
"repo_id": "transformers.js",
"token_count": 584
} |
from optimum.exporters.onnx.model_configs import WhisperOnnxConfig
from optimum.exporters.onnx.base import ConfigBehavior
from typing import Dict
# List of [layer, head] pairs that select the cross-attention heads that are highly correlated to word-level timing.
# Source: https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a
ALIGNMENT_HEADS_MAPPING = {
'whisper-tiny.en': [[1, 0], [2, 0], [2, 5], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4]],
'whisper-tiny': [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]],
'whisper-base.en': [[3, 3], [4, 7], [5, 1], [5, 5], [5, 7]],
'whisper-base': [[3, 1], [4, 2], [4, 3], [4, 7], [5, 1], [5, 2], [5, 4], [5, 6]],
'whisper-small.en': [[6, 6], [7, 0], [7, 3], [7, 8], [8, 2], [8, 5], [8, 7], [9, 0], [9, 4], [9, 8], [9, 10], [10, 0], [10, 1], [10, 2], [10, 3], [10, 6], [10, 11], [11, 2], [11, 4]],
'whisper-small': [[5, 3], [5, 9], [8, 0], [8, 4], [8, 7], [8, 8], [9, 0], [9, 7], [9, 9], [10, 5]],
'whisper-medium.en': [[11, 4], [14, 1], [14, 12], [14, 14], [15, 4], [16, 0], [16, 4], [16, 9], [17, 12], [17, 14], [18, 7], [18, 10], [18, 15], [20, 0], [20, 3], [20, 9], [20, 14], [21, 12]],
'whisper-medium': [[13, 15], [15, 4], [15, 15], [16, 1], [20, 0], [23, 4]],
'whisper-large-v3-turbo': [[2, 4], [2, 11], [3, 3], [3, 6], [3, 11], [3, 14]],
'whisper-large-v2': [[10, 12], [13, 17], [16, 11], [16, 12], [16, 13], [17, 15], [17, 16], [18, 4], [18, 11], [18, 19], [19, 11], [21, 2], [21, 3], [22, 3], [22, 9], [22, 12], [23, 5], [23, 7], [23, 13], [25, 5], [26, 1], [26, 12], [27, 15]],
'whisper-large': [[9, 19], [11, 2], [11, 4], [11, 17], [22, 7], [22, 11], [22, 17], [23, 2], [23, 15]],
}
class CustomWhisperOnnxConfig(WhisperOnnxConfig):
"""
Custom ONNX config for Whisper models to output cross attentions.
Needed to compute token-level timestamps.
"""
@property
def outputs(self) -> Dict[str, Dict[int, str]]:
common_outputs = super().outputs
if self._behavior is ConfigBehavior.DECODER:
for i in range(self._config.decoder_layers):
common_outputs[f"cross_attentions.{i}"] = {
0: "batch_size",
2: "decoder_sequence_length",
3: "encoder_sequence_length_out"
}
return common_outputs
def get_main_export_kwargs(config, task):
# See https://github.com/huggingface/optimum/blob/a39b1f5637af9725c0c788b86ca1fdf71ad3dcc2/docs/source/exporters/onnx/usage_guides/export_a_model.mdx#L264
custom_config = CustomWhisperOnnxConfig(config=config, task=task)
custom_onnx_configs = dict(
encoder_model=custom_config.with_behavior("encoder"),
decoder_model=custom_config.with_behavior("decoder", use_past=True, use_past_in_inputs=False),
decoder_with_past_model=custom_config.with_behavior("decoder", use_past=True, use_past_in_inputs=True),
)
return dict(
model_kwargs={"output_attentions": True},
custom_onnx_configs=custom_onnx_configs,
)
def get_alignment_heads(config):
if getattr(config, '_name_or_path', None) is None:
raise ValueError(
"Unable to determine model type from config. Please specify `_name_or_path` in the config.")
for model_name, heads in ALIGNMENT_HEADS_MAPPING.items():
if model_name in config._name_or_path:
return heads
raise ValueError(
f"Unknown model type: {config._name_or_path}. Please add one of the following model types to `_name_or_path` in the config file: {list(ALIGNMENT_HEADS_MAPPING.keys())}")
| transformers.js/scripts/extra/whisper.py/0 | {
"file_path": "transformers.js/scripts/extra/whisper.py",
"repo_id": "transformers.js",
"token_count": 1700
} |
import { Processor } from "../../base/processing_utils.js";
import { AutoImageProcessor } from "../auto/image_processing_auto.js";
import { AutoTokenizer } from "../../tokenizers.js";
export class Florence2Processor extends Processor {
static tokenizer_class = AutoTokenizer
static image_processor_class = AutoImageProcessor
constructor(config, components) {
super(config, components);
const {
// @ts-expect-error TS2339
tasks_answer_post_processing_type,
// @ts-expect-error TS2339
task_prompts_without_inputs,
// @ts-expect-error TS2339
task_prompts_with_input,
} = this.image_processor.config;
/** @type {Map<string, string>} */
this.tasks_answer_post_processing_type = new Map(Object.entries(tasks_answer_post_processing_type ?? {}));
/** @type {Map<string, string>} */
this.task_prompts_without_inputs = new Map(Object.entries(task_prompts_without_inputs ?? {}));
/** @type {Map<string, string>} */
this.task_prompts_with_input = new Map(Object.entries(task_prompts_with_input ?? {}));
this.regexes = {
quad_boxes: /(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>/gm,
bboxes: /([^<]+)?<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>/gm,
}
this.size_per_bin = 1000;
}
/**
* Helper function to construct prompts from input texts
* @param {string|string[]} text
* @returns {string[]}
*/
construct_prompts(text) {
if (typeof text === 'string') {
text = [text];
}
const prompts = [];
for (const t of text) {
// 1. fixed task prompts without additional inputs
if (this.task_prompts_without_inputs.has(t)) {
prompts.push(this.task_prompts_without_inputs.get(t));
}
// 2. task prompts with additional inputs
else {
for (const [task, prompt] of this.task_prompts_with_input) {
if (t.includes(task)) {
prompts.push(prompt.replaceAll('{input}', t).replaceAll(task, ''));
break;
}
}
// 3. default prompt
if (prompts.length !== text.length) {
prompts.push(t);
}
}
}
return prompts;
}
/**
* Post-process the output of the model to each of the task outputs.
* @param {string} text The text to post-process.
* @param {string} task The task to post-process the text for.
* @param {[number, number]} image_size The size of the image. height x width.
*/
post_process_generation(text, task, image_size) {
const task_answer_post_processing_type = this.tasks_answer_post_processing_type.get(task) ?? 'pure_text';
// remove the special tokens
text = text.replaceAll('<s>', '').replaceAll('</s>', '');
let final_answer;
switch (task_answer_post_processing_type) {
case 'pure_text':
final_answer = text;
break;
case 'description_with_bboxes':
case 'bboxes':
case 'phrase_grounding':
case 'ocr':
const key = task_answer_post_processing_type === 'ocr' ? 'quad_boxes' : 'bboxes';
const matches = text.matchAll(this.regexes[key]);
const labels = [];
const items = [];
for (const [_, label, ...locations] of matches) {
// Push new label, or duplicate the last label
labels.push(label ? label.trim() : labels.at(-1) ?? '');
items.push(locations.map((x, i) =>
// NOTE: Add 0.5 to use the center position of the bin as the coordinate.
(Number(x) + 0.5) / this.size_per_bin * image_size[i % 2])
);
}
final_answer = { labels, [key]: items };
break;
default:
throw new Error(`Task "${task}" (of type "${task_answer_post_processing_type}") not yet implemented.`);
}
return { [task]: final_answer }
}
// NOTE: images and text are switched from the python version
// `images` is required, `text` is optional
async _call(images, text=null, kwargs = {}) {
if (!images && !text){
throw new Error('Either text or images must be provided');
}
const image_inputs = await this.image_processor(images, kwargs);
const text_inputs = text ? this.tokenizer(text, kwargs) : {};
return {
...image_inputs,
...text_inputs,
}
}
}
| transformers.js/src/models/florence2/processing_florence2.js/0 | {
"file_path": "transformers.js/src/models/florence2/processing_florence2.js",
"repo_id": "transformers.js",
"token_count": 2334
} |
import { Processor } from '../../base/processing_utils.js';
import { PyAnnoteFeatureExtractor } from './feature_extraction_pyannote.js';
export class PyAnnoteProcessor extends Processor {
static feature_extractor_class = PyAnnoteFeatureExtractor
/**
* Calls the feature_extractor function with the given audio input.
* @param {any} audio The audio input to extract features from.
* @returns {Promise<any>} A Promise that resolves with the extracted features.
*/
async _call(audio) {
return await this.feature_extractor(audio)
}
/** @type {PyAnnoteFeatureExtractor['post_process_speaker_diarization']} */
post_process_speaker_diarization(...args) {
return /** @type {PyAnnoteFeatureExtractor} */(this.feature_extractor).post_process_speaker_diarization(...args);
}
get sampling_rate() {
return this.feature_extractor.config.sampling_rate;
}
}
| transformers.js/src/models/pyannote/processing_pyannote.js/0 | {
"file_path": "transformers.js/src/models/pyannote/processing_pyannote.js",
"repo_id": "transformers.js",
"token_count": 316
} |
import { FeatureExtractor, validate_audio_inputs } from "../../base/feature_extraction_utils.js";
import { Tensor } from "../../utils/tensor.js";
export class Wav2Vec2FeatureExtractor extends FeatureExtractor {
/**
* @param {Float32Array} input_values
* @returns {Float32Array}
*/
_zero_mean_unit_var_norm(input_values) {
// TODO support batch?
const sum = input_values.reduce((a, b) => a + b, 0);
const mean = sum / input_values.length;
const variance = input_values.reduce((a, b) => a + (b - mean) ** 2, 0) / input_values.length;
return input_values.map(x => (x - mean) / Math.sqrt(variance + 1e-7));
}
/**
* Asynchronously extracts features from a given audio using the provided configuration.
* @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array.
* @returns {Promise<{ input_values: Tensor; attention_mask: Tensor }>} A Promise resolving to an object containing the extracted input features and attention mask as Tensors.
*/
async _call(audio) {
validate_audio_inputs(audio, 'Wav2Vec2FeatureExtractor');
if (audio instanceof Float64Array) {
audio = new Float32Array(audio);
}
let input_values = audio;
// zero-mean and unit-variance normalization
if (this.config.do_normalize) {
input_values = this._zero_mean_unit_var_norm(input_values);
}
// TODO: allow user to pass in attention mask
const shape = [1, input_values.length];
return {
input_values: new Tensor('float32', input_values, shape),
attention_mask: new Tensor('int64', new BigInt64Array(input_values.length).fill(1n), shape)
};
}
}
| transformers.js/src/models/wav2vec2/feature_extraction_wav2vec2.js/0 | {
"file_path": "transformers.js/src/models/wav2vec2/feature_extraction_wav2vec2.js",
"repo_id": "transformers.js",
"token_count": 700
} |
/**
* @file Custom data structures.
*
* These are only used internally, meaning an end-user shouldn't
* need to access anything here.
*
* @module utils/data-structures
*/
/**
* Efficient Heap-based Implementation of a Priority Queue.
* It uses an array-based binary heap, where the root is at index `0`, and the
* children of node `i` are located at indices `2i + 1` and `2i + 2`, respectively.
*
* Adapted from the following sources:
* - https://stackoverflow.com/a/42919752/13989043 (original)
* - https://github.com/belladoreai/llama-tokenizer-js (minor improvements)
*/
export class PriorityQueue {
/**
* Create a new PriorityQueue.
* @param {function(any, any): boolean} comparator Comparator function to determine priority. Defaults to a MaxHeap.
*/
constructor(comparator = (a, b) => a > b, maxSize = Infinity) {
this._heap = [];
this._comparator = comparator;
this._maxSize = maxSize;
}
/**
* The size of the queue
*/
get size() {
return this._heap.length;
}
/**
* Check if the queue is empty.
* @returns {boolean} `true` if the queue is empty, `false` otherwise.
*/
isEmpty() {
return this.size === 0;
}
/**
* Return the element with the highest priority in the queue.
* @returns {any} The highest priority element in the queue.
*/
peek() {
return this._heap[0];
}
/**
* Add one or more elements to the queue.
* @param {...any} values The values to push into the queue.
* @returns {number} The new size of the queue.
*/
push(...values) {
return this.extend(values);
}
/**
* Add multiple elements to the queue.
* @param {any[]} values The values to push into the queue.
* @returns {number} The new size of the queue.
*/
extend(values) {
for (const value of values) {
if (this.size < this._maxSize) {
this._heap.push(value);
this._siftUp();
} else {
// Get index of value with the lowest priority
const smallest = this._smallest();
// If the new value has higher priority than the smallest value in the heap
// then replace the smallest value with the new value and update the heap
if (this._comparator(value, this._heap[smallest])) {
this._heap[smallest] = value;
this._siftUpFrom(smallest);
}
}
}
return this.size;
}
/**
* Remove and return the element with the highest priority in the queue.
* @returns {any} The element with the highest priority in the queue.
*/
pop() {
const poppedValue = this.peek();
const bottom = this.size - 1;
if (bottom > 0) {
this._swap(0, bottom);
}
this._heap.pop();
this._siftDown();
return poppedValue;
}
/**
* Replace the element with the highest priority in the queue with a new value.
* @param {*} value The new value.
* @returns {*} The replaced value.
*/
replace(value) {
const replacedValue = this.peek();
this._heap[0] = value;
this._siftDown();
return replacedValue;
}
/**
* Compute the index for the parent of the node at index `i`.
* @param {number} i The index of the node to get the parent of.
* @returns {number} The index of the parent node.
* @private
*/
_parent(i) {
return ((i + 1) >>> 1) - 1;
}
/**
* Compute the index for the left child of the node at index `i`.
* @param {number} i The index of the node to get the left child of.
* @returns {number} The index of the left child.
* @private
*/
_left(i) {
return (i << 1) + 1;
}
/**
* Compute the index for the right child of the node at index `i`.
* @param {number} i The index of the node to get the right child of.
* @returns {number} The index of the right child.
* @private
*/
_right(i) {
return (i + 1) << 1;
}
/**
* Check if the element at index `i` is greater than the element at index `j`.
* @param {number} i The index of the first element to compare.
* @param {number} j The index of the second element to compare.
* @returns {boolean} `true` if the element at index `i` is greater than the element at index `j`, `false` otherwise.
* @private
*/
_greater(i, j) {
return this._comparator(this._heap[i], this._heap[j]);
}
/**
* Swap the elements at indices `i` and `j`.
* @param {number} i The index of the first element to swap.
* @param {number} j The index of the second element to swap.
* @private
*/
_swap(i, j) {
const temp = this._heap[i];
this._heap[i] = this._heap[j];
this._heap[j] = temp;
}
/**
* Maintain the heap property by updating positions in the heap,
* starting at the last element and moving up the heap.
* @private
*/
_siftUp() {
this._siftUpFrom(this.size - 1);
}
/**
* Helper function to sift up from a given node.
* @param {number} node The index of the node to start sifting up from.
*/
_siftUpFrom(node) {
while (node > 0 && this._greater(node, this._parent(node))) {
this._swap(node, this._parent(node));
node = this._parent(node);
}
}
/**
* Maintain the heap property by updating positions in the heap,
* starting at the first element and moving down the heap.
* @private
*/
_siftDown() {
let node = 0;
while (
(this._left(node) < this.size && this._greater(this._left(node), node)) ||
(this._right(node) < this.size && this._greater(this._right(node), node))
) {
const maxChild = (this._right(node) < this.size && this._greater(this._right(node), this._left(node)))
? this._right(node)
: this._left(node);
this._swap(node, maxChild);
node = maxChild;
}
}
/**
* Get the index of the smallest element in the heap. Since we use an array-based heap,
* the index can be computed without needing to traverse the heap.
* @private
*/
_smallest() {
return (2 ** (Math.floor(Math.log2(this.size))) - 1);
}
}
/**
* A trie structure to efficiently store and search for strings.
*/
export class CharTrie {
constructor() {
this.root = CharTrieNode.default();
}
/**
* Adds one or more `texts` to the trie.
* @param {string[]} texts The strings to add to the trie.
*/
extend(texts) {
for (const text of texts) {
this.push(text);
}
}
/**
* Adds text to the trie.
* @param {string} text The string to add to the trie.
*/
push(text) {
let node = this.root;
for (const ch of text) {
let child = node.children.get(ch);
if (child === undefined) {
child = CharTrieNode.default();
node.children.set(ch, child);
}
node = child;
}
node.isLeaf = true;
}
/**
* Searches the trie for all strings with a common prefix of `text`.
* @param {string} text The common prefix to search for.
* @yields {string} Each string in the trie that has `text` as a prefix.
*/
*commonPrefixSearch(text) {
let node = this.root;
if (node === undefined) return;
let prefix = "";
for (const ch of text) {
prefix += ch;
node = node.children.get(ch);
if (node === undefined) return;
if (node.isLeaf) {
yield prefix;
}
}
}
}
/**
* Represents a node in a character trie.
*/
class CharTrieNode {
/**
* Create a new CharTrieNode.
* @param {boolean} isLeaf Whether the node is a leaf node or not.
* @param {Map<string, CharTrieNode>} children A map containing the node's children, where the key is a character and the value is a `CharTrieNode`.
*/
constructor(isLeaf, children) {
this.isLeaf = isLeaf;
this.children = children;
}
/**
* Returns a new `CharTrieNode` instance with default values.
* @returns {CharTrieNode} A new `CharTrieNode` instance with `isLeaf` set to `false` and an empty `children` map.
*/
static default() {
return new CharTrieNode(false, new Map());
}
}
/**
* A lattice data structure to be used for tokenization.
*/
export class TokenLattice {
/**
* Creates a new TokenLattice instance.
*
* @param {string} sentence The input sentence to be tokenized.
* @param {number} bosTokenId The beginning-of-sequence token ID.
* @param {number} eosTokenId The end-of-sequence token ID.
*/
constructor(sentence, bosTokenId, eosTokenId) {
this.chars = Array.from(sentence);
this.len = this.chars.length;
this.bosTokenId = bosTokenId;
this.eosTokenId = eosTokenId;
this.nodes = [];
this.beginNodes = Array.from({ length: this.len + 1 }, () => []);
this.endNodes = Array.from({ length: this.len + 1 }, () => []);
const bos = new TokenLatticeNode(this.bosTokenId, 0, 0, 0, 0.0);
const eos = new TokenLatticeNode(this.eosTokenId, 1, this.len, 0, 0.0);
this.nodes.push(bos.clone());
this.nodes.push(eos.clone());
this.beginNodes[this.len].push(eos);
this.endNodes[0].push(bos);
}
/**
* Inserts a new token node into the token lattice.
*
* @param {number} pos The starting position of the token.
* @param {number} length The length of the token.
* @param {number} score The score of the token.
* @param {number} tokenId The token ID of the token.
*/
insert(pos, length, score, tokenId) {
const nodeId = this.nodes.length;
const node = new TokenLatticeNode(tokenId, nodeId, pos, length, score);
this.beginNodes[pos].push(node);
this.endNodes[pos + length].push(node);
this.nodes.push(node);
}
/**
* Implements the Viterbi algorithm to compute the most likely sequence of tokens.
*
* @returns {TokenLatticeNode[]} The most likely sequence of tokens.
*/
viterbi() {
const len = this.len;
let pos = 0;
while (pos <= len) {
if (this.beginNodes[pos].length == 0) {
return [];
}
for (let rnode of this.beginNodes[pos]) {
rnode.prev = null;
let bestScore = 0.0;
let bestNode = null;
for (let lnode of this.endNodes[pos]) {
const score = lnode.backtraceScore + rnode.score;
if (bestNode === null || score > bestScore) {
bestNode = lnode.clone();
bestScore = score;
}
}
if (bestNode !== null) {
rnode.prev = bestNode;
rnode.backtraceScore = bestScore;
} else {
return [];
}
}
++pos;
}
const results = [];
const root = this.beginNodes[len][0];
const prev = root.prev;
if (prev === null) {
return [];
}
let node = prev.clone();
while (node.prev !== null) {
results.push(node.clone());
const n = node.clone();
node = n.prev.clone();
}
results.reverse();
return results;
}
/**
* @param {TokenLatticeNode} node
* @returns {string} The array of nodes representing the most likely sequence of tokens.
*/
piece(node) {
return this.chars.slice(node.pos, node.pos + node.length).join('');
}
/**
* @returns {string[]} The most likely sequence of tokens.
*/
tokens() {
const nodes = this.viterbi();
return nodes.map(x => this.piece(x));
}
/**
* @returns {number[]} The most likely sequence of token ids.
*/
tokenIds() {
const nodes = this.viterbi();
return nodes.map(x => x.tokenId);
}
}
class TokenLatticeNode {
/**
* Represents a node in a token lattice for a given sentence.
* @param {number} tokenId The ID of the token associated with this node.
* @param {number} nodeId The ID of this node.
* @param {number} pos The starting position of the token in the sentence.
* @param {number} length The length of the token.
* @param {number} score The score associated with the token.
*/
constructor(tokenId, nodeId, pos, length, score) {
this.tokenId = tokenId;
this.nodeId = nodeId;
this.pos = pos;
this.length = length;
this.score = score;
this.prev = null;
this.backtraceScore = 0.0;
}
/**
* Returns a clone of this node.
* @returns {TokenLatticeNode} A clone of this node.
*/
clone() {
const n = new TokenLatticeNode(this.tokenId, this.nodeId, this.pos, this.length, this.score);
n.prev = this.prev;
n.backtraceScore = this.backtraceScore;
return n;
}
}
| transformers.js/src/utils/data-structures.js/0 | {
"file_path": "transformers.js/src/utils/data-structures.js",
"repo_id": "transformers.js",
"token_count": 5931
} |
import { AutoFeatureExtractor, ASTFeatureExtractor } from "../../../src/transformers.js";
import { load_cached_audio } from "../../asset_cache.js";
import { MAX_FEATURE_EXTRACTOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js";
export default () => {
// ASTFeatureExtractor
describe("ASTFeatureExtractor", () => {
const model_id = "Xenova/ast-finetuned-audioset-10-10-0.4593";
/** @type {ASTFeatureExtractor} */
let feature_extractor;
beforeAll(async () => {
feature_extractor = await AutoFeatureExtractor.from_pretrained(model_id);
}, MAX_FEATURE_EXTRACTOR_LOAD_TIME);
it(
"truncation",
async () => {
const audio = await load_cached_audio("mlk");
const { input_values } = await feature_extractor(audio);
expect(input_values.dims).toEqual([1, 1024, 128]);
expect(input_values.mean().item()).toBeCloseTo(-0.04054912979309085);
expect(input_values.data[0]).toBeCloseTo(-0.5662586092948914);
expect(input_values.data[1]).toBeCloseTo(-1.0300861597061157);
expect(input_values.data[129]).toBeCloseTo(-1.084834098815918);
expect(input_values.data[1025]).toBeCloseTo(-1.1204065084457397);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"padding",
async () => {
const audio = await load_cached_audio("mlk");
const { input_values } = await feature_extractor(audio.slice(0, 1000));
expect(input_values.dims).toEqual([1, 1024, 128]); // [1, 4, 128] -> (padded to) -> [1, 1024, 128]
expect(input_values.mean().item()).toBeCloseTo(0.4647964835166931);
expect(input_values.data[0]).toBeCloseTo(-0.5662586092948914);
expect(input_values.data[1]).toBeCloseTo(-1.0300861597061157);
expect(input_values.data[129]).toBeCloseTo(-1.084834098815918);
// padded values
expect(input_values.data[1025]).toBeCloseTo(0.46703237295150757);
expect(input_values.data[2049]).toBeCloseTo(0.46703237295150757);
expect(input_values.data[10000]).toBeCloseTo(0.46703237295150757);
},
MAX_TEST_EXECUTION_TIME,
);
});
};
| transformers.js/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.js/0 | {
"file_path": "transformers.js/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.js",
"repo_id": "transformers.js",
"token_count": 926
} |
import { GPT2Tokenizer } from "../../../src/tokenizers.js";
import { BASE_TEST_STRINGS, SENTENCEPIECE_TEST_STRINGS } from "../test_strings.js";
export const TOKENIZER_CLASS = GPT2Tokenizer;
export const TEST_CONFIG = {
// - clean_up_tokenization_spaces=true
// - default pretokenization regex
"Xenova/gpt2": {
SIMPLE: {
text: BASE_TEST_STRINGS.SIMPLE,
tokens: ["How", "\u0120are", "\u0120you", "\u0120doing", "?"],
ids: [2437, 389, 345, 1804, 30],
decoded: "How are you doing?",
},
SIMPLE_WITH_PUNCTUATION: {
text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION,
tokens: ["You", "\u0120should", "'ve", "\u0120done", "\u0120this"],
ids: [1639, 815, 1053, 1760, 428],
decoded: "You should've done this",
},
NUMBERS: {
text: BASE_TEST_STRINGS.NUMBERS,
tokens: ["01", "23", "45", "67", "89", "Ġ0", "Ġ1", "Ġ2", "Ġ3", "Ġ4", "Ġ5", "Ġ6", "Ġ7", "Ġ8", "Ġ9", "Ġ10", "Ġ100", "Ġ1000"],
ids: [486, 1954, 2231, 3134, 4531, 657, 352, 362, 513, 604, 642, 718, 767, 807, 860, 838, 1802, 8576],
decoded: "0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000",
},
TEXT_WITH_NUMBERS: {
text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS,
tokens: ["The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u01202016", "."],
ids: [464, 1664, 373, 9393, 287, 1584, 13],
decoded: "The company was founded in 2016.",
},
PUNCTUATION: {
text: BASE_TEST_STRINGS.PUNCTUATION,
tokens: ["A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."],
ids: [32, 198, 1183, 37867, 1462, 8348, 67, 7061, 67, 286, 11, 460, 470, 13],
decoded: "A\n'll!!to?'d''d of, can't.",
},
PYTHON_CODE: {
text: BASE_TEST_STRINGS.PYTHON_CODE,
tokens: ["def", "\u0120main", "():", "\u010a", "\u0109", "pass"],
ids: [4299, 1388, 33529, 198, 197, 6603],
decoded: "def main():\n\tpass",
},
JAVASCRIPT_CODE: {
text: BASE_TEST_STRINGS.JAVASCRIPT_CODE,
tokens: ["let", "\u0120a", "\u0120=", "\u0120obj", ".", "to", "String", "();", "\u010a", "to", "String", "();"],
ids: [1616, 257, 796, 26181, 13, 1462, 10100, 9783, 198, 1462, 10100, 9783],
decoded: "let a = obj.toString();\ntoString();",
},
NEWLINES: {
text: BASE_TEST_STRINGS.NEWLINES,
tokens: ["This", "\u010a", "\u010a", "is", "\u010a", "a", "\u010a", "test", "."],
ids: [1212, 198, 198, 271, 198, 64, 198, 9288, 13],
decoded: "This\n\nis\na\ntest.",
},
BASIC: {
text: BASE_TEST_STRINGS.BASIC,
tokens: ["UN", "want", "\u00c3\u00a9", "d", ",", "running"],
ids: [4944, 42949, 2634, 67, 11, 20270],
decoded: "UNwant\u00e9d,running",
},
CONTROL_TOKENS: {
text: BASE_TEST_STRINGS.CONTROL_TOKENS,
tokens: ["1", "\u0100", "2", "\u00ef\u00bf\u00bd", "3"],
ids: [16, 188, 17, 4210, 18],
decoded: "1\u00002\ufffd3",
},
HELLO_WORLD_TITLECASE: {
text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE,
tokens: ["Hello", "\u0120World"],
ids: [15496, 2159],
decoded: "Hello World",
},
HELLO_WORLD_LOWERCASE: {
text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE,
tokens: ["hello", "\u0120world"],
ids: [31373, 995],
decoded: "hello world",
},
CHINESE_ONLY: {
text: BASE_TEST_STRINGS.CHINESE_ONLY,
tokens: ["\u00e7\u0136\u0141", "\u00e6", "\u00b4", "\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e", "\u0141", "\u00e8", "\u00b0", "\u013d", "\u00e6\u013a\u00af"],
ids: [37955, 162, 112, 119, 21410, 40367, 253, 164, 108, 249, 42468],
decoded: "\u751f\u6d3b\u7684\u771f\u8c1b\u662f",
},
LEADING_SPACE: {
text: BASE_TEST_STRINGS.LEADING_SPACE,
tokens: ["\u0120", "\u0120", "\u0120leading", "\u0120space"],
ids: [220, 220, 3756, 2272],
decoded: " leading space",
},
TRAILING_SPACE: {
text: BASE_TEST_STRINGS.TRAILING_SPACE,
tokens: ["tra", "iling", "\u0120space", "\u0120", "\u0120", "\u0120"],
ids: [9535, 4386, 2272, 220, 220, 220],
decoded: "trailing space ",
},
DOUBLE_SPACE: {
text: BASE_TEST_STRINGS.DOUBLE_SPACE,
tokens: ["Hi", "\u0120", "\u0120Hello"],
ids: [17250, 220, 18435],
decoded: "Hi Hello",
},
CURRENCY: {
text: BASE_TEST_STRINGS.CURRENCY,
tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"],
ids: [9288, 720, 16, 371, 17, 1303, 18, 10432, 19, 4248, 20, 38221, 21, 2343, 224, 96, 22, 2343, 224, 117, 23, 2343, 224, 109, 24, 1332],
decoded: "test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test",
},
CURRENCY_WITH_DECIMALS: {
text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS,
tokens: ["I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120the", "\u0120store", "."],
ids: [40, 5839, 281, 17180, 329, 720, 16, 13, 405, 379, 262, 3650, 13],
decoded: "I bought an apple for $1.00 at the store.",
},
ELLIPSIS: {
text: BASE_TEST_STRINGS.ELLIPSIS,
tokens: ["you", "\u00e2\u0122\u00a6", "\u0120", "\u0120"],
ids: [5832, 1399, 220, 220],
decoded: "you\u2026 ",
},
TEXT_WITH_ESCAPE_CHARACTERS: {
text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS,
tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2\u0142\u00c2\u0142"],
ids: [5832, 1399, 4603],
decoded: "you\u2026\u00a0\u00a0",
},
TEXT_WITH_ESCAPE_CHARACTERS_2: {
text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2,
tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2\u0142", "\u00c2\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2\u0142\u00c2\u0142"],
ids: [5832, 1399, 1849, 1849, 5832, 1399, 4603],
decoded: "you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0",
},
TILDE_NORMALIZATION: {
text: BASE_TEST_STRINGS.TILDE_NORMALIZATION,
tokens: ["we", "ird", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120case"],
ids: [732, 1447, 27332, 121, 252, 5743, 27332, 121, 252, 1339],
decoded: "weird \uff5e edge \uff5e case",
},
SPIECE_UNDERSCORE: {
text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE,
tokens: ["\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138", "\u0123", "is", "\u0120\u00e2\u0138", "\u0123", "a", "\u0120\u00e2\u0138", "\u0123", "test", "\u0120\u00e2\u0138", "\u0123", "."],
ids: [5008, 223, 1212, 11019, 223, 271, 11019, 223, 64, 11019, 223, 9288, 11019, 223, 13],
decoded: "\u2581This \u2581is \u2581a \u2581test \u2581.",
},
SPECIAL_WITH_TRAILING_WHITESPACE: {
text: SENTENCEPIECE_TEST_STRINGS.SPECIAL_WITH_TRAILING_WHITESPACE,
tokens: ["<", "s", ">", "\u010a"],
ids: [27, 82, 29, 198],
decoded: "<s>\n",
},
SPECIAL_SURROUNDED_BY_WHITESPACE: {
text: SENTENCEPIECE_TEST_STRINGS.SPECIAL_SURROUNDED_BY_WHITESPACE,
tokens: ["\u0120</", "s", ">", "\u0120test", "\u0120</", "s", ">", "\u0120"],
ids: [7359, 82, 29, 1332, 7359, 82, 29, 220],
decoded: " </s> test </s> ",
},
SPECIAL_NO_WHITESPACE: {
text: SENTENCEPIECE_TEST_STRINGS.SPECIAL_NO_WHITESPACE,
tokens: ["</", "s", ">", "test", "</", "s", ">"],
ids: [3556, 82, 29, 9288, 3556, 82, 29],
decoded: "</s>test</s>",
},
},
// - clean_up_tokenization_spaces=false
// - custom pretokenization regex
"Xenova/gpt-4": {
PUNCTUATION: {
text: BASE_TEST_STRINGS.PUNCTUATION,
tokens: ["A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."],
ids: [32, 198, 3358, 11261, 998, 20837, 67, 4708, 67, 315, 11, 649, 956, 13],
decoded: "A\n'll !!to?'d''d of, can't.",
},
JAVASCRIPT_CODE: {
text: BASE_TEST_STRINGS.JAVASCRIPT_CODE,
tokens: ["let", "\u0120a", "\u0120=", "\u0120obj", ".toString", "();\u010a", "toString", "();"],
ids: [1169, 264, 284, 2909, 5180, 545, 6712, 2178],
decoded: "let a = obj.toString();\ntoString();",
},
CURRENCY: {
text: BASE_TEST_STRINGS.CURRENCY,
tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2\u0124\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"],
ids: [1985, 400, 16, 432, 17, 674, 18, 13281, 19, 7083, 20, 72588, 21, 2928, 224, 96, 22, 90891, 23, 2928, 224, 109, 24, 1296],
decoded: "test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test",
},
TILDE_NORMALIZATION: {
text: BASE_TEST_STRINGS.TILDE_NORMALIZATION,
tokens: ["we", "ird", "\u0120", "\u00ef\u00bd\u0140", "\u0120edge", "\u0120", "\u00ef\u00bd\u0140", "\u0120case"],
ids: [906, 2668, 220, 21909, 6964, 220, 21909, 1162],
decoded: "weird \uff5e edge \uff5e case",
},
},
"Xenova/gpt-4o": {
NUMBERS: {
text: BASE_TEST_STRINGS.NUMBERS,
tokens: ["012", "345", "678", "9", "Ġ", "0", "Ġ", "1", "Ġ", "2", "Ġ", "3", "Ġ", "4", "Ġ", "5", "Ġ", "6", "Ġ", "7", "Ġ", "8", "Ġ", "9", "Ġ", "10", "Ġ", "100", "Ġ", "100", "0"],
ids: [19267, 22901, 30833, 24, 220, 15, 220, 16, 220, 17, 220, 18, 220, 19, 220, 20, 220, 21, 220, 22, 220, 23, 220, 24, 220, 702, 220, 1353, 220, 1353, 15],
decoded: "0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000",
},
TEXT_WITH_NUMBERS: {
text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS,
tokens: ["The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u0120", "201", "6", "."],
ids: [976, 3175, 673, 24303, 306, 220, 667, 21, 13],
decoded: "The company was founded in 2016.",
},
PUNCTUATION: {
text: BASE_TEST_STRINGS.PUNCTUATION,
tokens: ["A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can't", "."],
ids: [32, 198, 6090, 17131, 935, 48511, 67, 5830, 67, 328, 11, 8535, 13],
decoded: "A\n'll !!to?'d''d of, can't.",
},
PYTHON_CODE: {
text: BASE_TEST_STRINGS.PYTHON_CODE,
tokens: ["def", "\u0120main", "():\u010a", "\u0109pass"],
ids: [1314, 2758, 8595, 100653],
decoded: "def main():\n\tpass",
},
JAVASCRIPT_CODE: {
text: BASE_TEST_STRINGS.JAVASCRIPT_CODE,
tokens: ["let", "\u0120a", "\u0120=", "\u0120obj", ".to", "String", "();\u010a", "to", "String", "();"],
ids: [1347, 261, 314, 4099, 3552, 916, 740, 935, 916, 4177],
decoded: "let a = obj.toString();\ntoString();",
},
NEWLINES: {
text: BASE_TEST_STRINGS.NEWLINES,
tokens: ["This", "\u010a\u010a", "is", "\u010a", "a", "\u010a", "test", "."],
ids: [2500, 279, 276, 198, 64, 198, 3190, 13],
decoded: "This\n\nis\na\ntest.",
},
BASIC: {
text: BASE_TEST_STRINGS.BASIC,
tokens: ["UN", "want", "\u00c3\u00a9d", ",r", "unning"],
ids: [2926, 72517, 6383, 33654, 11244],
decoded: "UNwant\u00e9d,running",
},
CHINESE_ONLY: {
text: BASE_TEST_STRINGS.CHINESE_ONLY,
tokens: ["\u00e7\u0136\u0141\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"],
ids: [32479, 1616, 7910, 7856, 249, 3221],
decoded: "\u751f\u6d3b\u7684\u771f\u8c1b\u662f",
},
LEADING_SPACE: {
text: BASE_TEST_STRINGS.LEADING_SPACE,
tokens: ["\u0120\u0120", "\u0120leading", "\u0120space"],
ids: [256, 8117, 4918],
decoded: " leading space",
},
TRAILING_SPACE: {
text: BASE_TEST_STRINGS.TRAILING_SPACE,
tokens: ["tr", "ailing", "\u0120space", "\u0120\u0120\u0120"],
ids: [371, 24408, 4918, 271],
decoded: "trailing space ",
},
CURRENCY: {
text: BASE_TEST_STRINGS.CURRENCY,
tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2\u00a5", "6", "\u0120\u00e2\u0124", "\u00a3", "7", "\u0120\u00e2\u0124\u00b9", "8", "\u0120\u00e2\u0124", "\u00b1", "9", "\u0120test"],
ids: [3190, 548, 16, 460, 17, 1069, 18, 7950, 19, 8989, 20, 123814, 21, 59790, 96, 22, 73406, 23, 59790, 109, 24, 1746],
decoded: "test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test",
},
ELLIPSIS: {
text: BASE_TEST_STRINGS.ELLIPSIS,
tokens: ["you", "\u00e2\u0122\u00a6", "\u0120\u0120"],
ids: [13320, 1131, 256],
decoded: "you\u2026 ",
},
TILDE_NORMALIZATION: {
text: BASE_TEST_STRINGS.TILDE_NORMALIZATION,
tokens: ["we", "ird", "\u0120\u00ef\u00bd\u0140", "\u0120edge", "\u0120\u00ef\u00bd\u0140", "\u0120case"],
ids: [854, 2716, 105665, 11165, 105665, 1890],
decoded: "weird \uff5e edge \uff5e case",
},
SPIECE_UNDERSCORE: {
text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE,
tokens: ["\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138\u0123", "is", "\u0120\u00e2\u0138\u0123", "a", "\u0120\u00e2\u0138\u0123", "test", "\u0120\u00e2\u0138\u0123", "."],
ids: [6762, 223, 2500, 39960, 276, 39960, 64, 39960, 3190, 39960, 13],
decoded: "\u2581This \u2581is \u2581a \u2581test \u2581.",
},
SPECIAL_WITH_TRAILING_WHITESPACE: {
text: SENTENCEPIECE_TEST_STRINGS.SPECIAL_WITH_TRAILING_WHITESPACE,
tokens: ["<s", ">\u010a"],
ids: [101950, 523],
decoded: "<s>\n",
},
},
"Xenova/claude-tokenizer": {
JAVASCRIPT_CODE: {
text: BASE_TEST_STRINGS.JAVASCRIPT_CODE,
tokens: ["let", "\u0120a", "\u0120=", "\u0120obj", ".", "toString", "();", "\u010a", "toString", "();"],
ids: [1785, 269, 284, 2652, 18, 26492, 4370, 203, 26492, 4370],
decoded: "let a = obj.toString();\ntoString();",
},
BASIC: {
text: BASE_TEST_STRINGS.BASIC,
tokens: ["UN", "want", "\u00c3\u00a9d", ",", "running"],
ids: [2359, 17571, 37911, 16, 7889],
decoded: "UNwant\u00e9d,running",
},
CHINESE_ONLY: {
text: BASE_TEST_STRINGS.CHINESE_ONLY,
tokens: ["\u00e7\u0136\u0141", "\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"],
ids: [14706, 37675, 2471, 56904, 15959, 254, 5977],
decoded: "\u751f\u6d3b\u7684\u771f\u8c1b\u662f",
},
TRAILING_SPACE: {
text: BASE_TEST_STRINGS.TRAILING_SPACE,
tokens: ["trailing", "\u0120space", "\u0120\u0120\u0120"],
ids: [40110, 3384, 264],
decoded: "trailing space ",
},
CURRENCY: {
text: BASE_TEST_STRINGS.CURRENCY,
tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"],
ids: [765, 734, 21, 487, 22, 379, 23, 36714, 24, 13206, 25, 2455, 103, 26, 4937, 229, 101, 27, 4937, 229, 122, 28, 4937, 229, 114, 29, 722],
decoded: "test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test",
},
ELLIPSIS: {
text: BASE_TEST_STRINGS.ELLIPSIS,
tokens: ["you", "...", "\u0120\u0120"],
ids: [6773, 1174, 261],
decoded: "you... ",
},
TEXT_WITH_ESCAPE_CHARACTERS: {
text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS,
tokens: ["you", "...", "\u0120\u0120"],
ids: [6773, 1174, 261],
decoded: "you... ",
},
TEXT_WITH_ESCAPE_CHARACTERS_2: {
text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2,
tokens: ["you", "...", "\u0120", "\u0120you", "...", "\u0120\u0120"],
ids: [6773, 1174, 225, 583, 1174, 261],
decoded: "you... you... ",
},
TILDE_NORMALIZATION: {
text: BASE_TEST_STRINGS.TILDE_NORMALIZATION,
tokens: ["we", "ird", "\u0120~", "\u0120edge", "\u0120~", "\u0120case"],
ids: [798, 2650, 6217, 4915, 6217, 1544],
decoded: "weird ~ edge ~ case",
},
},
"bigcode/santacoder": {
NUMBERS: {
text: BASE_TEST_STRINGS.NUMBERS,
tokens: ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "Ġ", "0", "Ġ", "1", "Ġ", "2", "Ġ", "3", "Ġ", "4", "Ġ", "5", "Ġ", "6", "Ġ", "7", "Ġ", "8", "Ġ", "9", "Ġ", "1", "0", "Ġ", "1", "0", "0", "Ġ", "1", "0", "0", "0"],
ids: [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 207, 15, 207, 16, 207, 17, 207, 18, 207, 19, 207, 20, 207, 21, 207, 22, 207, 23, 207, 24, 207, 16, 15, 207, 16, 15, 15, 207, 16, 15, 15, 15],
decoded: "0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000",
},
TEXT_WITH_NUMBERS: {
text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS,
tokens: ["The", "\u0120company", "\u0120was", "\u0120fo", "unded", "\u0120in", "\u0120", "2", "0", "1", "6", "."],
ids: [2111, 10107, 2501, 17436, 7584, 319, 207, 17, 15, 16, 21, 13],
decoded: "The company was founded in 2016.",
},
CHINESE_ONLY: {
text: BASE_TEST_STRINGS.CHINESE_ONLY,
tokens: ["\u00e7\u0136\u0141", "\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"],
ids: [8715, 24543, 1825, 34717, 37452, 236, 4343],
decoded: "\u751f\u6d3b\u7684\u771f\u8c1b\u662f",
},
CURRENCY_WITH_DECIMALS: {
text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS,
tokens: ["I", "\u0120bo", "ught", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "0", "0", "\u0120at", "\u0120the", "\u0120store", "."],
ids: [40, 12307, 10310, 743, 29806, 408, 763, 16, 13, 15, 15, 869, 331, 2823, 13],
decoded: "I bought an apple for $1.00 at the store.",
},
TILDE_NORMALIZATION: {
text: BASE_TEST_STRINGS.TILDE_NORMALIZATION,
tokens: ["we", "ird", "\u0120", "\u00ef\u00bd", "\u0140", "\u0120edge", "\u0120", "\u00ef\u00bd", "\u0140", "\u0120case"],
ids: [1850, 4427, 207, 29217, 239, 4959, 207, 29217, 239, 1210],
decoded: "weird \uff5e edge \uff5e case",
},
SPIECE_UNDERSCORE: {
text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE,
tokens: ["\u00e2\u0138", "\u0123", "This", "\u0120", "\u00e2\u0138", "\u0123", "is", "\u0120", "\u00e2\u0138", "\u0123", "a", "\u0120", "\u00e2\u0138", "\u0123", "test", "\u0120", "\u00e2\u0138", "\u0123", "."],
ids: [3718, 210, 3456, 207, 3718, 210, 280, 207, 3718, 210, 64, 207, 3718, 210, 706, 207, 3718, 210, 13],
decoded: "\u2581This \u2581is \u2581a \u2581test \u2581.",
},
},
"Xenova/CodeGPT-tokenizer": {
CHINESE_ONLY: {
text: BASE_TEST_STRINGS.CHINESE_ONLY,
tokens: ["\u00e7\u0136\u0141", "\u00e6", "\u00b4", "\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e", "\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"],
ids: [25506, 165, 115, 122, 5137, 43415, 256, 20679, 252, 13283],
decoded: "\u751f\u6d3b\u7684\u771f\u8c1b\u662f",
},
TRAILING_SPACE: {
text: BASE_TEST_STRINGS.TRAILING_SPACE,
tokens: ["trailing", "\u0120space", "\u0120", "\u0120", "\u0120"],
ids: [15584, 3497, 223, 223, 223],
decoded: "trailing space ",
},
TEXT_WITH_ESCAPE_CHARACTERS: {
text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS,
tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"],
ids: [13953, 29502, 129, 257, 129, 257],
decoded: "you\u2026\u00a0\u00a0",
},
TEXT_WITH_ESCAPE_CHARACTERS_2: {
text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2,
tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"],
ids: [13953, 29502, 129, 257, 129, 257, 13953, 29502, 129, 257, 129, 257],
decoded: "you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0",
},
},
"huggingface-course/codeparrot-ds": {
NUMBERS: {
text: BASE_TEST_STRINGS.NUMBERS,
tokens: ["0123456789", "Ġ0", "Ġ1", "Ġ2", "Ġ3", "Ġ4", "Ġ5", "Ġ6", "Ġ7", "Ġ8", "Ġ9", "Ġ10", "Ġ100", "Ġ1000"],
ids: [25218, 443, 396, 554, 869, 1163, 1462, 1911, 2624, 2070, 2837, 2009, 3038, 4764],
decoded: "0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000",
},
TEXT_WITH_NUMBERS: {
text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS,
tokens: ["The", "\u0120company", "\u0120was", "\u0120fo", "unded", "\u0120in", "\u01202016", "."],
ids: [2096, 16502, 1442, 11689, 7865, 253, 8780, 14],
decoded: "The company was founded in 2016.",
},
PUNCTUATION: {
text: BASE_TEST_STRINGS.PUNCTUATION,
tokens: ["A", "\u010a", "'ll", "\u0120!", "!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."],
ids: [33, 173, 6402, 905, 1, 403, 15227, 68, 589, 68, 311, 12, 796, 1059, 14],
decoded: "A\n'll!!to?'d''d of, can't.",
},
JAVASCRIPT_CODE: {
text: BASE_TEST_STRINGS.JAVASCRIPT_CODE,
tokens: ["let", "\u0120a", "\u0120=", "\u0120obj", ".", "toString", "();", "\u010a", "toString", "();"],
ids: [2047, 231, 233, 1300, 14, 30494, 16248, 173, 30494, 16248],
decoded: "let a = obj.toString();\ntoString();",
},
CHINESE_ONLY: {
text: BASE_TEST_STRINGS.CHINESE_ONLY,
tokens: ["\u00e7\u0136\u0141", "\u00e6\u00b4", "\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e", "\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"],
ids: [20185, 43799, 120, 3994, 37782, 211, 15933, 207, 11130],
decoded: "\u751f\u6d3b\u7684\u771f\u8c1b\u662f",
},
TRAILING_SPACE: {
text: BASE_TEST_STRINGS.TRAILING_SPACE,
tokens: ["trailing", "\u0120space", "\u0120\u0120\u0120"],
ids: [17031, 3000, 216],
decoded: "trailing space ",
},
CURRENCY: {
text: BASE_TEST_STRINGS.CURRENCY,
tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2", "\u0124\u00ac", "4", "\u0120\u00c2", "\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"],
ids: [1824, 3748, 17, 683, 18, 294, 19, 5161, 28898, 20, 23446, 97, 21, 23446, 99, 22, 5161, 182, 97, 23, 5161, 182, 118, 24, 5161, 182, 110, 25, 1737],
decoded: "test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test",
},
CURRENCY_WITH_DECIMALS: {
text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS,
tokens: ["I", "\u0120bo", "ught", "\u0120an", "\u0120app", "le", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120the", "\u0120store", "."],
ids: [41, 772, 8272, 309, 870, 239, 296, 3748, 17, 14, 543, 815, 256, 2689, 14],
decoded: "I bought an apple for $1.00 at the store.",
},
TILDE_NORMALIZATION: {
text: BASE_TEST_STRINGS.TILDE_NORMALIZATION,
tokens: ["we", "ird", "\u0120", "\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120", "\u00ef", "\u00bd", "\u0140", "\u0120case"],
ids: [955, 6075, 179, 166, 122, 210, 2703, 179, 166, 122, 210, 1539],
decoded: "weird \uff5e edge \uff5e case",
},
},
};
| transformers.js/tests/models/gpt2/test_tokenization_gpt2.js/0 | {
"file_path": "transformers.js/tests/models/gpt2/test_tokenization_gpt2.js",
"repo_id": "transformers.js",
"token_count": 12344
} |
import { SamProcessor, SamModel } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
export default () => {
describe("SamModel", () => {
const model_id = "Xenova/slimsam-77-uniform";
/** @type {SamModel} */
let model;
/** @type {SamProcessor} */
let processor;
beforeAll(async () => {
model = await SamModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
processor = await SamProcessor.from_pretrained(model_id);
}, MAX_MODEL_LOAD_TIME);
it(
"w/ input_points",
async () => {
// Prepare image and input points
const raw_image = await load_cached_image("corgi");
const input_points = [[[340, 250]]];
// Process inputs and perform mask generation
const inputs = await processor(raw_image, { input_points });
const { pred_masks, iou_scores } = await model(inputs);
expect(pred_masks.dims).toEqual([1, 1, 3, 256, 256]);
expect(pred_masks.mean().item()).toBeCloseTo(-5.76981782913208, 5);
expect(iou_scores.dims).toEqual([1, 1, 3]);
expect(iou_scores.tolist()).toBeCloseToNested([[[0.8583833575248718, 0.9773167967796326, 0.8511142730712891]]]);
// Post-process masks
const masks = await processor.post_process_masks(pred_masks, inputs.original_sizes, inputs.reshaped_input_sizes);
expect(masks).toHaveLength(1);
expect(masks[0].dims).toEqual([1, 3, 410, 614]);
expect(masks[0].type).toEqual("bool");
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await model?.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/models/sam/test_modeling_sam.js/0 | {
"file_path": "transformers.js/tests/models/sam/test_modeling_sam.js",
"repo_id": "transformers.js",
"token_count": 770
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.