text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
# Note: We subclass str so that serialization is straightforward
# https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json
from dataclasses import dataclass
from enum import Enum
from typing import Any, Protocol
class FeatureType(str, Enum):
STATE = "STATE"
VISUAL = "VISUAL"
ENV = "ENV"
ACTION = "ACTION"
class NormalizationMode(str, Enum):
MIN_MAX = "MIN_MAX"
MEAN_STD = "MEAN_STD"
IDENTITY = "IDENTITY"
class DictLike(Protocol):
def __getitem__(self, key: Any) -> Any: ...
@dataclass
class PolicyFeature:
type: FeatureType
shape: tuple
| lerobot/lerobot/configs/types.py/0 | {
"file_path": "lerobot/lerobot/configs/types.py",
"repo_id": "lerobot",
"token_count": 224
} |
"""
Tests for physical robots and their mocked versions.
If the physical robots are not connected to the computer, or not working,
the test will be skipped.
Example of running a specific test:
```bash
pytest -sx tests/test_control_robot.py::test_teleoperate
```
Example of running test on real robots connected to the computer:
```bash
pytest -sx 'tests/test_control_robot.py::test_teleoperate[koch-False]'
pytest -sx 'tests/test_control_robot.py::test_teleoperate[koch_bimanual-False]'
pytest -sx 'tests/test_control_robot.py::test_teleoperate[aloha-False]'
```
Example of running test on a mocked version of robots:
```bash
pytest -sx 'tests/test_control_robot.py::test_teleoperate[koch-True]'
pytest -sx 'tests/test_control_robot.py::test_teleoperate[koch_bimanual-True]'
pytest -sx 'tests/test_control_robot.py::test_teleoperate[aloha-True]'
```
"""
import multiprocessing
from pathlib import Path
from unittest.mock import patch
import pytest
from lerobot.common.logger import Logger
from lerobot.common.policies.act.configuration_act import ACTConfig
from lerobot.common.policies.factory import make_policy
from lerobot.common.robot_devices.control_configs import (
CalibrateControlConfig,
RecordControlConfig,
ReplayControlConfig,
TeleoperateControlConfig,
)
from lerobot.configs.default import DatasetConfig
from lerobot.configs.policies import PreTrainedConfig
from lerobot.configs.train import TrainPipelineConfig
from lerobot.scripts.control_robot import calibrate, record, replay, teleoperate
from tests.test_robots import make_robot
from tests.utils import DEVICE, TEST_ROBOT_TYPES, mock_calibration_dir, require_robot
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_teleoperate(tmpdir, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
tmpdir = Path(tmpdir)
calibration_dir = tmpdir / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
teleoperate(robot, TeleoperateControlConfig(teleop_time_s=1))
teleoperate(robot, TeleoperateControlConfig(fps=30, teleop_time_s=1))
teleoperate(robot, TeleoperateControlConfig(fps=60, teleop_time_s=1))
del robot
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_calibrate(tmpdir, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock:
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
tmpdir = Path(tmpdir)
calibration_dir = tmpdir / robot_type
robot_kwargs["calibration_dir"] = calibration_dir
robot = make_robot(**robot_kwargs)
calib_cfg = CalibrateControlConfig(arms=robot.available_arms)
calibrate(robot, calib_cfg)
del robot
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_record_without_cameras(tmpdir, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
# Avoid using cameras
robot_kwargs["cameras"] = {}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = Path(tmpdir) / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
repo_id = "lerobot/debug"
root = Path(tmpdir) / "data" / repo_id
single_task = "Do something."
robot = make_robot(**robot_kwargs)
rec_cfg = RecordControlConfig(
repo_id=repo_id,
single_task=single_task,
root=root,
fps=30,
warmup_time_s=0.1,
episode_time_s=1,
reset_time_s=0.1,
num_episodes=2,
run_compute_stats=False,
push_to_hub=False,
video=False,
play_sounds=False,
)
record(robot, rec_cfg)
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@require_robot
def test_record_and_replay_and_policy(tmpdir, request, robot_type, mock):
tmpdir = Path(tmpdir)
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmpdir / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
repo_id = "lerobot_test/debug"
root = tmpdir / "data" / repo_id
single_task = "Do something."
robot = make_robot(**robot_kwargs)
rec_cfg = RecordControlConfig(
repo_id=repo_id,
single_task=single_task,
root=root,
fps=1,
warmup_time_s=0.1,
episode_time_s=1,
reset_time_s=0.1,
num_episodes=2,
push_to_hub=False,
# TODO(rcadene, aliberts): test video=True
video=False,
# TODO(rcadene): display cameras through cv2 sometimes crashes on mac
display_cameras=False,
play_sounds=False,
)
dataset = record(robot, rec_cfg)
assert dataset.meta.total_episodes == 2
assert len(dataset) == 2
replay_cfg = ReplayControlConfig(
episode=0, fps=1, root=root, repo_id=repo_id, play_sounds=False, local_files_only=True
)
replay(robot, replay_cfg)
policy_cfg = ACTConfig()
policy = make_policy(policy_cfg, ds_meta=dataset.meta, device=DEVICE)
out_dir = tmpdir / "logger"
ds_cfg = DatasetConfig(repo_id, local_files_only=True)
train_cfg = TrainPipelineConfig(
dataset=ds_cfg,
policy=policy_cfg,
output_dir=out_dir,
device=DEVICE,
)
logger = Logger(train_cfg)
logger.save_checkpoint(
train_step=0,
identifier=0,
policy=policy,
)
pretrained_policy_path = out_dir / "checkpoints/last/pretrained_model"
# In `examples/9_use_aloha.md`, we advise using `num_image_writer_processes=1`
# during inference, to reach constent fps, so we test this here.
if robot_type == "aloha":
num_image_writer_processes = 1
# `multiprocessing.set_start_method("spawn", force=True)` avoids a hanging issue
# before exiting pytest. However, it outputs the following error in the log:
# Traceback (most recent call last):
# File "<string>", line 1, in <module>
# File "/Users/rcadene/miniconda3/envs/lerobot/lib/python3.10/multiprocessing/spawn.py", line 116, in spawn_main
# exitcode = _main(fd, parent_sentinel)
# File "/Users/rcadene/miniconda3/envs/lerobot/lib/python3.10/multiprocessing/spawn.py", line 126, in _main
# self = reduction.pickle.load(from_parent)
# File "/Users/rcadene/miniconda3/envs/lerobot/lib/python3.10/multiprocessing/synchronize.py", line 110, in __setstate__
# self._semlock = _multiprocessing.SemLock._rebuild(*state)
# FileNotFoundError: [Errno 2] No such file or directory
# TODO(rcadene, aliberts): fix FileNotFoundError in multiprocessing
multiprocessing.set_start_method("spawn", force=True)
else:
num_image_writer_processes = 0
eval_repo_id = "lerobot/eval_debug"
eval_root = tmpdir / "data" / eval_repo_id
rec_eval_cfg = RecordControlConfig(
repo_id=eval_repo_id,
root=eval_root,
single_task=single_task,
fps=1,
warmup_time_s=0.1,
episode_time_s=1,
reset_time_s=0.1,
num_episodes=2,
run_compute_stats=False,
push_to_hub=False,
video=False,
display_cameras=False,
play_sounds=False,
num_image_writer_processes=num_image_writer_processes,
device=DEVICE,
use_amp=False,
)
rec_eval_cfg.policy = PreTrainedConfig.from_pretrained(pretrained_policy_path)
rec_eval_cfg.policy.pretrained_path = pretrained_policy_path
dataset = record(robot, rec_eval_cfg)
assert dataset.num_episodes == 2
assert len(dataset) == 2
del robot
@pytest.mark.parametrize("robot_type, mock", [("koch", True)])
@require_robot
def test_resume_record(tmpdir, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmpdir / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
repo_id = "lerobot/debug"
root = Path(tmpdir) / "data" / repo_id
single_task = "Do something."
rec_cfg = RecordControlConfig(
repo_id=repo_id,
root=root,
single_task=single_task,
fps=1,
warmup_time_s=0,
episode_time_s=1,
push_to_hub=False,
video=False,
display_cameras=False,
play_sounds=False,
run_compute_stats=False,
local_files_only=True,
num_episodes=1,
)
dataset = record(robot, rec_cfg)
assert len(dataset) == 1, f"`dataset` should contain 1 frame, not {len(dataset)}"
with pytest.raises(FileExistsError):
# Dataset already exists, but resume=False by default
record(robot, rec_cfg)
rec_cfg.resume = True
dataset = record(robot, rec_cfg)
assert len(dataset) == 2, f"`dataset` should contain 2 frames, not {len(dataset)}"
@pytest.mark.parametrize("robot_type, mock", [("koch", True)])
@require_robot
def test_record_with_event_rerecord_episode(tmpdir, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock and robot_type != "aloha":
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmpdir / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener:
mock_events = {}
mock_events["exit_early"] = True
mock_events["rerecord_episode"] = True
mock_events["stop_recording"] = False
mock_listener.return_value = (None, mock_events)
repo_id = "lerobot/debug"
root = Path(tmpdir) / "data" / repo_id
single_task = "Do something."
rec_cfg = RecordControlConfig(
repo_id=repo_id,
root=root,
single_task=single_task,
fps=1,
warmup_time_s=0,
episode_time_s=1,
num_episodes=1,
push_to_hub=False,
video=False,
display_cameras=False,
play_sounds=False,
run_compute_stats=False,
)
dataset = record(robot, rec_cfg)
assert not mock_events["rerecord_episode"], "`rerecord_episode` wasn't properly reset to False"
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"
@pytest.mark.parametrize("robot_type, mock", [("koch", True)])
@require_robot
def test_record_with_event_exit_early(tmpdir, request, robot_type, mock):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock:
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmpdir / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener:
mock_events = {}
mock_events["exit_early"] = True
mock_events["rerecord_episode"] = False
mock_events["stop_recording"] = False
mock_listener.return_value = (None, mock_events)
repo_id = "lerobot/debug"
root = Path(tmpdir) / "data" / repo_id
single_task = "Do something."
rec_cfg = RecordControlConfig(
repo_id=repo_id,
root=root,
single_task=single_task,
fps=2,
warmup_time_s=0,
episode_time_s=1,
num_episodes=1,
push_to_hub=False,
video=False,
display_cameras=False,
play_sounds=False,
run_compute_stats=False,
)
dataset = record(robot, rec_cfg)
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"
@pytest.mark.parametrize(
"robot_type, mock, num_image_writer_processes", [("koch", True, 0), ("koch", True, 1)]
)
@require_robot
def test_record_with_event_stop_recording(tmpdir, request, robot_type, mock, num_image_writer_processes):
robot_kwargs = {"robot_type": robot_type, "mock": mock}
if mock:
request.getfixturevalue("patch_builtins_input")
# Create an empty calibration directory to trigger manual calibration
# and avoid writing calibration files in user .cache/calibration folder
calibration_dir = tmpdir / robot_type
mock_calibration_dir(calibration_dir)
robot_kwargs["calibration_dir"] = calibration_dir
else:
# Use the default .cache/calibration folder when mock=False
pass
robot = make_robot(**robot_kwargs)
with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener:
mock_events = {}
mock_events["exit_early"] = True
mock_events["rerecord_episode"] = False
mock_events["stop_recording"] = True
mock_listener.return_value = (None, mock_events)
repo_id = "lerobot/debug"
root = Path(tmpdir) / "data" / repo_id
single_task = "Do something."
rec_cfg = RecordControlConfig(
repo_id=repo_id,
root=root,
single_task=single_task,
fps=1,
warmup_time_s=0,
episode_time_s=1,
reset_time_s=0.1,
num_episodes=2,
push_to_hub=False,
video=False,
display_cameras=False,
play_sounds=False,
run_compute_stats=False,
num_image_writer_processes=num_image_writer_processes,
)
dataset = record(robot, rec_cfg)
assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False"
assert len(dataset) == 1, "`dataset` should contain only 1 frame"
| lerobot/tests/test_control_robot.py/0 | {
"file_path": "lerobot/tests/test_control_robot.py",
"repo_id": "lerobot",
"token_count": 6882
} |
# Open R1
*A fully open reproduction of DeepSeek-R1. This repo is a work in progress, let's build it together!*
**Table of Contents**
1. [Overview](#overview)
2. [Plan of attack](#plan-of-attack)
3. [Installation](#installation)
4. [Training models](#training-models)
- [SFT](#sft)
- [GRPO](#grpo)
5. [Evaluating models](#evaluating-models)
6. [Reproducing Deepseek's evaluation results](#reproducing-deepseeks-evaluation-results)
7. [Data generation](#data-generation)
- [Generate data from a smol distilled R1 model](#generate-data-from-a-smol-distilled-r1-model)
- [Generate data from DeepSeek-R1](#generate-data-from-deepseek-r1)
8. [Contributing](#contributing)
## Overview
The goal of this repo is to build the missing pieces of the R1 pipeline such that everybody can reproduce and build on top of it. The project is simple by design and mostly consists of:
- `src/open_r1`: contains the scripts to train and evaluate models as well as generate synthetic data:
- `grpo.py`: trains a model with GRPO on a given dataset.
- `sft.py`: performs a simple SFT of a model on a dataset.
- `evaluate.py`: evaluates a model on the R1 benchmarks.
- `generate.py`: generates synthetic data from a model using [Distilabel](https://github.com/argilla-io/distilabel).
- `Makefile`: contains easy-to-run commands for each step in the R1 pipeline leveraging the scripts above.
### Plan of attack
We will use the DeepSeek-R1 [tech report](https://github.com/deepseek-ai/DeepSeek-R1) as a guide, which can roughly be broken down into three main steps:
* Step 1: replicate the R1-Distill models by distilling a high-quality corpus from DeepSeek-R1.
* Step 2: replicate the pure RL pipeline that DeepSeek used to create R1-Zero. This will likely involve curating new, large-scale datasets for math, reasoning, and code.
* Step 3: show we can go from base model to RL-tuned via multi-stage training.
<center>
<img src="assets/plan-of-attack.png" width="500">
</center>
## Installation
> [!CAUTION]
> Libraries rely on CUDA 12.4. If you see errors related to segmentation faults, double check the version your system is running with `nvcc --version`.
To run the code in this project, first, create a Python virtual environment using e.g. `uv`.
To install `uv`, follow the [UV Installation Guide](https://docs.astral.sh/uv/getting-started/installation/).
```shell
uv venv openr1 --python 3.11 && source openr1/bin/activate && uv pip install --upgrade pip --link-mode=copy
```
Next, install vLLM:
```shell
uv pip install vllm==0.7.1 --link-mode=copy
```
This will also install PyTorch `v2.5.1` and it is **very important** to use this version since the vLLM binaries are compiled for it. You can then install the remaining dependencies for your specific use case via `pip install -e .[LIST OF MODES]`. For most contributors, we recommend:
```shell
GIT_LFS_SKIP_SMUDGE=1 uv pip install -e ".[dev]" --link-mode=copy
```
Next, log into your Hugging Face and Weights and Biases accounts as follows:
```shell
huggingface-cli login
wandb login
```
Finally, check whether your system has Git LFS installed so that you can load and push models/datasets to the Hugging Face Hub:
```shell
git-lfs --version
```
If it isn't installed, run:
```shell
sudo apt-get install git-lfs
```
## Training models
We support training models with either DDP or DeepSpeed (ZeRO-2 and ZeRO-3). For example, to run SFT on a dataset distilled from DeepSeek-R1 with reasoning traces such as [Bespoke-Stratos-17k](https://huggingface.co/datasets/bespokelabs/Bespoke-Stratos-17k), run:
```shell
# Train via command line
accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
--model_name_or_path Qwen/Qwen2.5-1.5B-Instruct \
--dataset_name HuggingFaceH4/Bespoke-Stratos-17k \
--learning_rate 2.0e-5 \
--num_train_epochs 1 \
--packing \
--max_seq_length 4096 \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 8 \
--gradient_checkpointing \
--bf16 \
--output_dir data/Qwen2.5-1.5B-Open-R1-Distill
# Train via YAML config
accelerate launch --config_file recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
--config recipes/Qwen2.5-1.5B-Instruct/sft/config_demo.yaml
```
Currently, the following tasks are supported:
* Supervised Fine-Tuning `sft`
* Group Relative Policy Optimization `grpo`
> [!TIP]
> If you scale up/down the number of GPUs, we recommend also scaling up the per-device batch size or number of gradient accumulation steps to keep the global batch size constant.
By default, these scripts will push each model to your Hugging Face Hub username, i.e. `{username}/{model_name}-{task}`. You can override the parameters in each YAML config by appending them to the command as follows:
```shell
# Change batch size, number of epochs etc
accelerate launch --config_file recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
--config recipes/Qwen2.5-1.5B-Instruct/sft/config_demo.yaml
--per_device_train_batch_size=1 --num_train_epochs=5
```
> [!NOTE]
> The training commands below are configured for a node of 8 x H100s (80GB). For different hardware and topologies, you may need to tune the batch size and number of gradient accumulation steps.
### SFT
To run SFT on a dataset distilled from DeepSeek-R1 with reasoning traces such as [Bespoke-Stratos-17k](https://huggingface.co/datasets/bespokelabs/Bespoke-Stratos-17k), run:
```shell
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/zero3.yaml \
src/open_r1/sft.py \
--config recipes/Qwen2.5-1.5B-Instruct/sft/config_demo.yaml
```
### GRPO
To train via the GRPO trainer, we use one GPU to run vLLM for faster generation and the remaining GPUs for training. For example, one a node with 8 GPUs, use the `recipes/accelerate_configs/zero3.yaml` config and then overwrite `num_processes` to run on 7 devices:
```shell
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/zero3.yaml \
--num_processes=7 src/open_r1/grpo.py \
--config recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo.yaml
```
We provide a minimal reproducible experiment using GRPO for mathematical reasoning, referencing the approach from [SimpleRL-Reason](https://hkust-nlp.notion.site/simplerl-reason) which uses a 7B model trained on 8K examples. Running this on 8 H100 80G GPU takes about 3 hours:
```shell
ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/zero2.yaml \
--num_processes=7 src/open_r1/grpo.py \
--config recipes/Qwen2.5-Math-7B/grpo/config_simple_rl.yaml
```
Our final [model](https://huggingface.co/Dongwei/Qwen-2.5-7B_Base_Math_smalllr), while using different learning rates, loss functions and reward structures, achieves 69.4% accuracy on MATH-500, demonstrating a 17%+ improvement over the base model.
### Launching jobs on a Slurm cluster
If you have access to a Slurm cluster, we provide a `slurm/train.slurm` script that will automatically queue training jobs for you. Here's how you can use it:
```shell
sbatch --job-name=open_r1 --nodes=1 slurm/train.slurm {model_name} {task} {config_suffix} {accelerator}
```
Here `{model_name}` and `{task}` are defined as above, while `{config_suffix}` refers to the specific config and `{accelerator}` refers to the choice of 🤗 Accelerate config in `recipes/accelerate_configs`. If you wish to override the default config parameters, you can provide them by appending a space-separated string like `'--arg1=value1 --arg2=value2'`. Here's a concrete example to run SFT on 1 node of 8 GPUs:
```shell
# Launch on Slurm and override default hyperparameters
sbatch --job-name=open_r1 --nodes=1 slurm/train.slurm Qwen2.5-1.5B-Instruct sft demo zero3 '--per_device_train_batch_size=1 --num_train_epochs=5'
```
You can scale the number of nodes by increasing the `--nodes` flag.
> [!NOTE]
> The configuration in `slurm/train.slurm` is optimised for the Hugging Face Compute Cluster and may require tweaking to be adapted to your own compute nodes.
## Evaluating models
We use `lighteval` to evaluate models, with custom tasks defined in `src/open_r1/evaluate.py`. For models which fit on a single GPU, run:
```shell
MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
MODEL_ARGS="pretrained=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilisation=0.8"
OUTPUT_DIR=data/evals/$MODEL
# AIME 2024
TASK=aime24
lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--output-dir $OUTPUT_DIR
# MATH-500
TASK=math_500
lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--output-dir $OUTPUT_DIR
# GPQA Diamond
TASK=gpqa:diamond
lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--output-dir $OUTPUT_DIR
```
> [!IMPORTANT]
> You must set `max_model_length=32768` in the `vllm` command to align with the `generation_size` we define per eval. Without this, `lighteval` will throw an error.
To increase throughput across multiple GPUs, use _data parallel_ as follows:
```shell
NUM_GPUS=8
MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
MODEL_ARGS="pretrained=$MODEL,dtype=bfloat16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
TASK=aime24
OUTPUT_DIR=data/evals/$MODEL
lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--output-dir $OUTPUT_DIR
```
For large models which require sharding across GPUs, use _tensor parallel_ and run:
```shell
NUM_GPUS=8
MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
MODEL_ARGS="pretrained=$MODEL,dtype=bfloat16,tensor_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
TASK=aime24
OUTPUT_DIR=data/evals/$MODEL
export VLLM_WORKER_MULTIPROC_METHOD=spawn
lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--output-dir $OUTPUT_DIR
```
You can also launch an evaluation with `make evaluate`, specifying the model, task, and optionally the parallelism technique and number of GPUs.
To evaluate on a single GPU:
```shell
make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24
```
To use Data Parallelism:
```shell
make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=data NUM_GPUS=8
```
To use Tensor Parallelism:
```shell
make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=tensor NUM_GPUS=8
```
## Reproducing Deepseek's evaluation results
> [!NOTE]
> The DeepSeek-R1 paper uses sampling with a temperature of 0.6, a top-p value of 0.95, and 64 responses per query to estimate `pass@1`. Below, we report the results from greedy decoding, which likely explains the small 1-3σ discrepancies between our results and theirs.
### MATH-500
We are able to reproduce Deepseek's reported results on the MATH-500 benchmark within ~1-3 standard deviations:
| Model | MATH-500 (🤗 LightEval) | MATH-500 (DeepSeek Reported) |
|:------------------------------|:-----------------------:|:----------------------------:|
| DeepSeek-R1-Distill-Qwen-1.5B | 81.2 | 83.9 |
| DeepSeek-R1-Distill-Qwen-7B | 91.8 | 92.8 |
| DeepSeek-R1-Distill-Qwen-14B | 94.2 | 93.9 |
| DeepSeek-R1-Distill-Qwen-32B | 95.0 | 94.3 |
| DeepSeek-R1-Distill-Llama-8B | 85.4 | 89.1 |
| DeepSeek-R1-Distill-Llama-70B | 93.4 | 94.5 |
To reproduce these results use the following command:
```shell
NUM_GPUS=1 # Set to 8 for 32B and 70B models
MODEL=deepseek-ai/{model_name}
MODEL_ARGS="pretrained=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilisation=0.8,tensor_parallel_size=$NUM_GPUS"
OUTPUT_DIR=data/evals/$MODEL
lighteval vllm $MODEL_ARGS "custom|math_500|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--output-dir $OUTPUT_DIR
```
Alternatively, you can launch Slurm jobs as follows:
```shell
python scripts/run_benchmarks.py --model-id={model_id} --benchmarks math_500
```
### GPQA Diamond
We are able to reproduce Deepseek's reported results on the GPQA Diamond benchmark within ~1-3 standard deviations:
| Model | GPQA Diamond (🤗 LightEval) | GPQA Diamond (DeepSeek Reported) |
|:------------------------------|:---------------------------:|:--------------------------------:|
| DeepSeek-R1-Distill-Qwen-1.5B | 33.3 | 33.8 |
| DeepSeek-R1-Distill-Qwen-7B | 48.4 | 49.1 |
| DeepSeek-R1-Distill-Qwen-14B | 55.6 | 59.1 |
| DeepSeek-R1-Distill-Qwen-32B | 58.6 | 62.1 |
| DeepSeek-R1-Distill-Llama-8B | 51.0 | 49.0 |
| DeepSeek-R1-Distill-Llama-70B | 65.2 | 65.2 |
To reproduce these results use the following command:
```shell
NUM_GPUS=1 # Set to 8 for 32B and 70B models
MODEL=deepseek-ai/{model_name}
MODEL_ARGS="pretrained=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilisation=0.8,tensor_parallel_size=$NUM_GPUS"
OUTPUT_DIR=data/evals/$MODEL
lighteval vllm $MODEL_ARGS "custom|gpqa:diamond|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--output-dir $OUTPUT_DIR
```
```shell
python scripts/run_benchmarks.py --model-id={model_id} --benchmarks gpqa
```
## Data generation
### Generate data from a smol distilled R1 model
The following example can be run in 1xH100.
First install the following dependencies:
```shell
uv pip install "distilabel[vllm]>=1.5.2"
```
Now save the following snippet into a file named `pipeline.py` and run it with `python pipeline.py`. It will generate 4 outputs for each of the 10 examples (change the username for the repository to your org/user name):
```python
from datasets import load_dataset
from distilabel.models import vLLM
from distilabel.pipeline import Pipeline
from distilabel.steps.tasks import TextGeneration
prompt_template = """\
You will be given a problem. Please reason step by step, and put your final answer within \boxed{}:
{{ instruction }}"""
dataset = load_dataset("AI-MO/NuminaMath-TIR", split="train").select(range(10))
model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B" # Exchange with another smol distilled r1
with Pipeline(
name="distill-qwen-7b-r1",
description="A pipeline to generate data from a distilled r1 model",
) as pipeline:
llm = vLLM(
model=model_id,
tokenizer=model_id,
extra_kwargs={
"tensor_parallel_size": 1,
"max_model_len": 8192,
},
generation_kwargs={
"temperature": 0.6,
"max_new_tokens": 8192,
},
)
prompt_column = "problem"
text_generation = TextGeneration(
llm=llm,
template=prompt_template,
num_generations=4,
input_mappings={"instruction": prompt_column} if prompt_column is not None else {}
)
if __name__ == "__main__":
distiset = pipeline.run(dataset=dataset)
distiset.push_to_hub(repo_id="username/numina-deepseek-r1-qwen-7b")
```
Take a look at the sample dataset at [HuggingFaceH4/numina-deepseek-r1-qwen-7b](https://huggingface.co/datasets/HuggingFaceH4/numina-deepseek-r1-qwen-7b).
### Generate data from DeepSeek-R1
To run the bigger DeepSeek-R1, we used 2 nodes, each with 8×H100 GPUs using the slurm file present in this repo at `slurm/generate.slurm`. First, install the dependencies:
(for now we need to install the vllm dev wheel that [fixes the R1 cuda graph capture](https://github.com/vllm-project/vllm/commits/221d388cc5a836fa189305785ed7e887cea8b510/csrc/moe/moe_align_sum_kernels.cu))
```shell
pip install https://wheels.vllm.ai/221d388cc5a836fa189305785ed7e887cea8b510/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl --extra-index-url https://download.pytorch.org/whl/cu121
uv pip install "distilabel[vllm,ray,openai]>=1.5.2"
```
And then run the following command:
```shell
sbatch slurm/generate.slurm \
--hf-dataset AI-MO/NuminaMath-TIR \
--temperature 0.6 \
--prompt-column problem \
--model deepseek-ai/DeepSeek-R1 \
--hf-output-dataset username/r1-dataset
```
> [!NOTE]
> While the job is running, you can setup an SSH tunnel through the cluster login node to access the Ray dashboard from your computer running `ssh -L 8265:ray_ip_head_node:8265 <login_node>`, then browsing `http://localhost:8265`
## Contributing
Contributions are welcome. Please refer to https://github.com/huggingface/open-r1/issues/23.
| open-r1/README.md/0 | {
"file_path": "open-r1/README.md",
"repo_id": "open-r1",
"token_count": 6695
} |
#!/bin/bash
#SBATCH --job-name=open-r1-sft
#SBATCH --ntasks-per-node=1
#SBATCH --exclusive
#SBATCH --gres=gpu:8
#SBATCH --partition=hopper-prod # Adjust this for your cluster
#SBATCH --output=./logs/%x-%j.out
#SBATCH --err=./logs/%x-%j.err
#SBATCH --requeue
# Specific configuration optimized for the Hugging Face Compute Cluster
# Be ye warned this may not work on other clusters!
module load cuda/12.4
set -x -e
source ~/.bashrc
source openr1/bin/activate
echo "START TIME: $(date)"
MODEL=$1
TASK=$2
CONFIG_SUFFIX=$3
ACCELERATOR=$4
OPTIONAL_ARGS=$5
# Training setup
NUM_NODES=$SLURM_NNODES
GPUS_PER_NODE=8
WORLD_SIZE=$(($NUM_NODES*$GPUS_PER_NODE))
# Due to conflicts between Accelerate's DeepSpeed configs and Transformers' TrainingArguments, we need to parse the gradient accumulation steps from the config file to ensure they match
CONFIG_FILE=recipes/$MODEL/$TASK/config_$CONFIG_SUFFIX.yaml
GRAD_ACC_STEPS=$(grep 'gradient_accumulation_steps' $CONFIG_FILE | awk '{print $2}')
# Split the string into individual arguments
IFS=' ' read -ra ARGS <<< "$OPTIONAL_ARGS"
# Loop through the arguments and find the one with "--gradient_accumulation_steps"
for arg in "${ARGS[@]}"; do
if [[ "$arg" == "--gradient_accumulation_steps="* ]]; then
# Extract the value after the equals sign
GRAD_ACC_STEPS="${arg#*=}"
break # Exit the loop once we find the desired argument
fi
done
echo "Gradient accumulation steps: $GRAD_ACC_STEPS"
# so processes know who to talk to
MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
MASTER_PORT=6000
export CMD=" \
src/open_r1/$TASK.py --config $CONFIG_FILE $OPTIONAL_ARGS
"
export LAUNCHER="HF_HUB_ENABLE_HF_TRANSFER=1 ACCELERATE_LOG_LEVEL=info TRANSFORMERS_VERBOSITY=info accelerate launch \
--config_file recipes/accelerate_configs/$ACCELERATOR.yaml \
--gradient_accumulation_steps $GRAD_ACC_STEPS \
--num_machines $NUM_NODES \
--num_processes $WORLD_SIZE \
--main_process_ip $MASTER_ADDR \
--main_process_port $MASTER_PORT \
--machine_rank \$SLURM_PROCID \
--rdzv_conf "rdzv_backend=c10d,rdzv_endpoint=$MASTER_ADDR:$MASTER_PORT" \
--max_restarts 1 \
--role \$(hostname -s): \
--tee 3 \
"
# force crashing on nccl issues like hanging broadcast
export NCCL_ASYNC_ERROR_HANDLING=1
# export NCCL_DEBUG=INFO
# export NCCL_DEBUG_SUBSYS=COLL
# export NCCL_SOCKET_NTHREADS=1
# export NCCL_NSOCKS_PERTHREAD=1
# export CUDA_LAUNCH_BLOCKING=1
# srun error handling:
# --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks
# --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code
SRUN_ARGS=" \
--wait=60 \
--kill-on-bad-exit=1 \
"
clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$LAUNCHER --role \$SLURMD_NODENAME: $CMD" 2>&1
echo "END TIME: $(date)" | open-r1/slurm/train.slurm/0 | {
"file_path": "open-r1/slurm/train.slurm",
"repo_id": "open-r1",
"token_count": 1146
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# PEFT checkpoint format
This document describes how PEFT's checkpoint files are structured and how to convert between the PEFT format and other formats.
## PEFT files
PEFT (parameter-efficient fine-tuning) methods only update a small subset of a model's parameters rather than all of them. This is nice because checkpoint files can generally be much smaller than the original model files and are easier to store and share. However, this also means that to load a PEFT model, you need to have the original model available as well.
When you call [`~PeftModel.save_pretrained`] on a PEFT model, the PEFT model saves three files, described below:
1. `adapter_model.safetensors` or `adapter_model.bin`
By default, the model is saved in the `safetensors` format, a secure alternative to the `bin` format, which is known to be susceptible to [security vulnerabilities](https://huggingface.co/docs/hub/security-pickle) because it uses the pickle utility under the hood. Both formats store the same `state_dict` though, and are interchangeable.
The `state_dict` only contains the parameters of the adapter module, not the base model. To illustrate the difference in size, a normal BERT model requires ~420MB of disk space, whereas an IA³ adapter on top of this BERT model only requires ~260KB.
2. `adapter_config.json`
The `adapter_config.json` file contains the configuration of the adapter module, which is necessary to load the model. Below is an example of an `adapter_config.json` for an IA³ adapter with standard settings applied to a BERT model:
```json
{
"auto_mapping": {
"base_model_class": "BertModel",
"parent_library": "transformers.models.bert.modeling_bert"
},
"base_model_name_or_path": "bert-base-uncased",
"fan_in_fan_out": false,
"feedforward_modules": [
"output.dense"
],
"inference_mode": true,
"init_ia3_weights": true,
"modules_to_save": null,
"peft_type": "IA3",
"revision": null,
"target_modules": [
"key",
"value",
"output.dense"
],
"task_type": null
}
```
The configuration file contains:
- the adapter module type stored, `"peft_type": "IA3"`
- information about the base model like `"base_model_name_or_path": "bert-base-uncased"`
- the revision of the model (if any), `"revision": null`
If the base model is not a pretrained Transformers model, the latter two entries will be `null`. Other than that, the settings are all related to the specific IA³ adapter that was used to fine-tune the model.
3. `README.md`
The generated `README.md` is the model card of a PEFT model and contains a few pre-filled entries. The intent of this is to make it easier to share the model with others and to provide some basic information about the model. This file is not needed to load the model.
## Convert to PEFT format
When converting from another format to the PEFT format, we require both the `adapter_model.safetensors` (or `adapter_model.bin`) file and the `adapter_config.json` file.
### adapter_model
For the model weights, it is important to use the correct mapping from parameter name to value for PEFT to load the file. Getting this mapping right is an exercise in checking the implementation details, as there is no generally agreed upon format for PEFT adapters.
Fortunately, figuring out this mapping is not overly complicated for common base cases. Let's look at a concrete example, the [`LoraLayer`](https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/layer.py):
```python
# showing only part of the code
class LoraLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B")
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.r = {}
self.lora_alpha = {}
self.scaling = {}
self.lora_dropout = nn.ModuleDict({})
self.lora_A = nn.ModuleDict({})
self.lora_B = nn.ModuleDict({})
# For Embedding layer
self.lora_embedding_A = nn.ParameterDict({})
self.lora_embedding_B = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.use_dora: dict[str, bool] = {}
self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA
self._caches: dict[str, Any] = {}
self.kwargs = kwargs
```
In the `__init__` code used by all `LoraLayer` classes in PEFT, there are a bunch of parameters used to initialize the model, but only a few are relevant for the checkpoint file: `lora_A`, `lora_B`, `lora_embedding_A`, and `lora_embedding_B`. These parameters are listed in the class attribute `adapter_layer_names` and contain the learnable parameters, so they must be included in the checkpoint file. All the other parameters, like the rank `r`, are derived from the `adapter_config.json` and must be included there (unless the default value is used).
Let's check the `state_dict` of a PEFT LoRA model applied to BERT. When printing the first five keys using the default LoRA settings (the remaining keys are the same, just with different layer numbers), we get:
- `base_model.model.encoder.layer.0.attention.self.query.lora_A.weight`
- `base_model.model.encoder.layer.0.attention.self.query.lora_B.weight`
- `base_model.model.encoder.layer.0.attention.self.value.lora_A.weight`
- `base_model.model.encoder.layer.0.attention.self.value.lora_B.weight`
- `base_model.model.encoder.layer.1.attention.self.query.lora_A.weight`
- etc.
Let's break this down:
- By default, for BERT models, LoRA is applied to the `query` and `value` layers of the attention module. This is why you see `attention.self.query` and `attention.self.value` in the key names for each layer.
- LoRA decomposes the weights into two low-rank matrices, `lora_A` and `lora_B`. This is where `lora_A` and `lora_B` come from in the key names.
- These LoRA matrices are implemented as `nn.Linear` layers, so the parameters are stored in the `.weight` attribute (`lora_A.weight`, `lora_B.weight`).
- By default, LoRA isn't applied to BERT's embedding layer, so there are _no entries_ for `lora_A_embedding` and `lora_B_embedding`.
- The keys of the `state_dict` always start with `"base_model.model."`. The reason is that, in PEFT, we wrap the base model inside a tuner-specific model (`LoraModel` in this case), which itself is wrapped in a general PEFT model (`PeftModel`). For this reason, these two prefixes are added to the keys. When converting to the PEFT format, it is required to add these prefixes.
<Tip>
This last point is not true for prefix tuning techniques like prompt tuning. There, the extra embeddings are directly stored in the `state_dict` without any prefixes added to the keys.
</Tip>
When inspecting the parameter names in the loaded model, you might be surprised to find that they look a bit different, e.g. `base_model.model.encoder.layer.0.attention.self.query.lora_A.default.weight`. The difference is the *`.default`* part in the second to last segment. This part exists because PEFT generally allows the addition of multiple adapters at once (using an `nn.ModuleDict` or `nn.ParameterDict` to store them). For example, if you add another adapter called "other", the key for that adapter would be `base_model.model.encoder.layer.0.attention.self.query.lora_A.other.weight`.
When you call [`~PeftModel.save_pretrained`], the adapter name is stripped from the keys. The reason is that the adapter name is not an important part of the model architecture; it is just an arbitrary name. When loading the adapter, you could choose a totally different name, and the model would still work the same way. This is why the adapter name is not stored in the checkpoint file.
<Tip>
If you call `save_pretrained("some/path")` and the adapter name is not `"default"`, the adapter is stored in a sub-directory with the same name as the adapter. So if the name is "other", it would be stored inside of `some/path/other`.
</Tip>
In some circumstances, deciding which values to add to the checkpoint file can become a bit more complicated. For example, in PEFT, DoRA is implemented as a special case of LoRA. If you want to convert a DoRA model to PEFT, you should create a LoRA checkpoint with extra entries for DoRA. You can see this in the `__init__` of the previous `LoraLayer` code:
```python
self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA
```
This indicates that there is an optional extra parameter per layer for DoRA.
### adapter_config
All the other information needed to load a PEFT model is contained in the `adapter_config.json` file. Let's check this file for a LoRA model applied to BERT:
```json
{
"alpha_pattern": {},
"auto_mapping": {
"base_model_class": "BertModel",
"parent_library": "transformers.models.bert.modeling_bert"
},
"base_model_name_or_path": "bert-base-uncased",
"bias": "none",
"fan_in_fan_out": false,
"inference_mode": true,
"init_lora_weights": true,
"layer_replication": null,
"layers_pattern": null,
"layers_to_transform": null,
"loftq_config": {},
"lora_alpha": 8,
"lora_dropout": 0.0,
"megatron_config": null,
"megatron_core": "megatron.core",
"modules_to_save": null,
"peft_type": "LORA",
"r": 8,
"rank_pattern": {},
"revision": null,
"target_modules": [
"query",
"value"
],
"task_type": null,
"use_dora": false,
"use_rslora": false
}
```
This contains a lot of entries, and at first glance, it could feel overwhelming to figure out all the right values to put in there. However, most of the entries are not necessary to load the model. This is either because they use the default values and don't need to be added or because they only affect the initialization of the LoRA weights, which is irrelevant when it comes to loading the model. If you find that you don't know what a specific parameter does, e.g., `"use_rslora",` don't add it, and you should be fine. Also note that as more options are added, this file will get more entries in the future, but it should be backward compatible.
At the minimum, you should include the following entries:
```json
{
"target_modules": ["query", "value"],
"peft_type": "LORA"
}
```
However, adding as many entries as possible, like the rank `r` or the `base_model_name_or_path` (if it's a Transformers model) is recommended. This information can help others understand the model better and share it more easily. To check which keys and values are expected, check out the [config.py](https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/config.py) file (as an example, this is the config file for LoRA) in the PEFT source code.
## Model storage
In some circumstances, you might want to store the whole PEFT model, including the base weights. This can be necessary if, for instance, the base model is not available to the users trying to load the PEFT model. You can merge the weights first or convert it into a Transformer model.
### Merge the weights
The most straightforward way to store the whole PEFT model is to merge the adapter weights into the base weights:
```python
merged_model = model.merge_and_unload()
merged_model.save_pretrained(...)
```
There are some disadvantages to this approach, though:
- Once [`~LoraModel.merge_and_unload`] is called, you get a basic model without any PEFT-specific functionality. This means you can't use any of the PEFT-specific methods anymore.
- You cannot unmerge the weights, load multiple adapters at once, disable the adapter, etc.
- Not all PEFT methods support merging weights.
- Some PEFT methods may generally allow merging, but not with specific settings (e.g. when using certain quantization techniques).
- The whole model will be much larger than the PEFT model, as it will contain all the base weights as well.
But inference with a merged model should be a bit faster.
### Convert to a Transformers model
Another way to save the whole model, assuming the base model is a Transformers model, is to use this hacky approach to directly insert the PEFT weights into the base model and save it, which only works if you "trick" Transformers into believing the PEFT model is not a PEFT model. This only works with LoRA because other adapters are not implemented in Transformers.
```python
model = ... # the PEFT model
...
# after you finish training the model, save it in a temporary location
model.save_pretrained(<temp_location>)
# now load this model directly into a transformers model, without the PEFT wrapper
# the PEFT weights are directly injected into the base model
model_loaded = AutoModel.from_pretrained(<temp_location>)
# now make the loaded model believe that it is _not_ a PEFT model
model_loaded._hf_peft_config_loaded = False
# now when we save it, it will save the whole model
model_loaded.save_pretrained(<final_location>)
# or upload to Hugging Face Hub
model_loaded.push_to_hub(<final_location>)
```
| peft/docs/source/developer_guides/checkpoint.md/0 | {
"file_path": "peft/docs/source/developer_guides/checkpoint.md",
"repo_id": "peft",
"token_count": 4146
} |
import gc
import os
import sys
import threading
import psutil
import torch
from accelerate import Accelerator
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
default_data_collator,
get_linear_schedule_with_warmup,
set_seed,
)
from peft import LoraConfig, TaskType, get_peft_model
def levenshtein_distance(str1, str2):
# TC: O(N^2)
# SC: O(N)
if str1 == str2:
return 0
num_rows = len(str1) + 1
num_cols = len(str2) + 1
dp_matrix = list(range(num_cols))
for i in range(1, num_rows):
prev = dp_matrix[0]
dp_matrix[0] = i
for j in range(1, num_cols):
temp = dp_matrix[j]
if str1[i - 1] == str2[j - 1]:
dp_matrix[j] = prev
else:
dp_matrix[j] = min(prev, dp_matrix[j], dp_matrix[j - 1]) + 1
prev = temp
return dp_matrix[num_cols - 1]
def get_closest_label(eval_pred, classes):
min_id = sys.maxsize
min_edit_distance = sys.maxsize
for i, class_label in enumerate(classes):
edit_distance = levenshtein_distance(eval_pred.strip(), class_label)
if edit_distance < min_edit_distance:
min_id = i
min_edit_distance = edit_distance
return classes[min_id]
# Converting Bytes to Megabytes
def b2mb(x):
return int(x / 2**20)
# This context manager is used to track the peak memory usage of the process
class TorchTracemalloc:
def __enter__(self):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.cuda.memory_allocated()
self.process = psutil.Process()
self.cpu_begin = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
return self
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_peak = -1
while True:
self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def __exit__(self, *exc):
self.peak_monitoring = False
gc.collect()
torch.cuda.empty_cache()
self.end = torch.cuda.memory_allocated()
self.peak = torch.cuda.max_memory_allocated()
self.used = b2mb(self.end - self.begin)
self.peaked = b2mb(self.peak - self.begin)
self.cpu_end = self.cpu_mem_used()
self.cpu_used = b2mb(self.cpu_end - self.cpu_begin)
self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def main():
accelerator = Accelerator()
model_name_or_path = "bigscience/bloomz-7b1"
dataset_name = "twitter_complaints"
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1)
text_column = "Tweet text"
label_column = "text_label"
lr = 3e-3
num_epochs = 20
batch_size = 8
seed = 42
max_length = 64
do_test = False
set_seed(seed)
dataset = load_dataset("ought/raft", dataset_name)
classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names]
dataset = dataset.map(
lambda x: {"text_label": [classes[label] for label in x["Label"]]},
batched=True,
num_proc=1,
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
def preprocess_function(examples):
batch_size = len(examples[text_column])
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
targets = [str(x) for x in examples[label_column]]
model_inputs = tokenizer(inputs)
labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id]
model_inputs["input_ids"][i] = sample_input_ids + label_input_ids
labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids
model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i])
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
max_length - len(sample_input_ids)
) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
"attention_mask"
][i]
labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length])
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def test_preprocess_function(examples):
batch_size = len(examples[text_column])
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
model_inputs = tokenizer(inputs)
# print(model_inputs)
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
max_length - len(sample_input_ids)
) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
"attention_mask"
][i]
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
return model_inputs
with accelerator.main_process_first():
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=True,
desc="Running tokenizer on dataset",
)
accelerator.wait_for_everyone()
train_dataset = processed_datasets["train"]
with accelerator.main_process_first():
processed_datasets = dataset.map(
test_preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
eval_dataset = processed_datasets["train"]
test_dataset = processed_datasets["test"]
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
test_dataloader = DataLoader(
test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
print(next(iter(train_dataloader)))
# creating model
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
# optimizer
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
# lr scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler = accelerator.prepare(
model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler
)
accelerator.print(model)
is_ds_zero_3 = False
if getattr(accelerator.state, "deepspeed_plugin", None):
is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3
for epoch in range(num_epochs):
with TorchTracemalloc() as tracemalloc:
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print(f"GPU Memory before entering the train : {b2mb(tracemalloc.begin)}")
accelerator.print(f"GPU Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
accelerator.print(f"GPU Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
accelerator.print(
f"GPU Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
)
accelerator.print(f"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}")
accelerator.print(f"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}")
accelerator.print(f"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}")
accelerator.print(
f"CPU Total Peak Memory consumed during the train (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}"
)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=}")
model.eval()
eval_preds = []
with TorchTracemalloc() as tracemalloc:
for _, batch in enumerate(tqdm(eval_dataloader)):
batch = {k: v for k, v in batch.items() if k != "labels"}
with torch.no_grad():
outputs = accelerator.unwrap_model(model).generate(
**batch, synced_gpus=is_ds_zero_3, max_new_tokens=10
) # synced_gpus=True for DS-stage 3
outputs = accelerator.pad_across_processes(outputs, dim=1, pad_index=tokenizer.pad_token_id)
preds = accelerator.gather_for_metrics(outputs)
preds = preds[:, max_length:].detach().cpu().numpy()
eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True))
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print(f"GPU Memory before entering the eval : {b2mb(tracemalloc.begin)}")
accelerator.print(f"GPU Memory consumed at the end of the eval (end-begin): {tracemalloc.used}")
accelerator.print(f"GPU Peak Memory consumed during the eval (max-begin): {tracemalloc.peaked}")
accelerator.print(
f"GPU Total Peak Memory consumed during the eval (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
)
accelerator.print(f"CPU Memory before entering the eval : {b2mb(tracemalloc.cpu_begin)}")
accelerator.print(f"CPU Memory consumed at the end of the eval (end-begin): {tracemalloc.cpu_used}")
accelerator.print(f"CPU Peak Memory consumed during the eval (max-begin): {tracemalloc.cpu_peaked}")
accelerator.print(
f"CPU Total Peak Memory consumed during the eval (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}"
)
correct = 0
total = 0
assert len(eval_preds) == len(dataset["train"][label_column]), (
f"{len(eval_preds)} != {len(dataset['train'][label_column])}"
)
for pred, true in zip(eval_preds, dataset["train"][label_column]):
if pred.strip() == true.strip():
correct += 1
total += 1
accuracy = correct / total * 100
accelerator.print(f"{accuracy=}")
accelerator.print(f"{eval_preds[:10]=}")
accelerator.print(f"{dataset['train'][label_column][:10]=}")
if do_test:
model.eval()
test_preds = []
for _, batch in enumerate(tqdm(test_dataloader)):
batch = {k: v for k, v in batch.items() if k != "labels"}
with torch.no_grad():
outputs = accelerator.unwrap_model(model).generate(
**batch, synced_gpus=is_ds_zero_3, max_new_tokens=10
) # synced_gpus=True for DS-stage 3
outputs = accelerator.pad_across_processes(outputs, dim=1, pad_index=tokenizer.pad_token_id)
preds = accelerator.gather(outputs)
preds = preds[:, max_length:].detach().cpu().numpy()
test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True))
test_preds_cleaned = []
for _, pred in enumerate(test_preds):
test_preds_cleaned.append(get_closest_label(pred, classes))
test_df = dataset["test"].to_pandas()
assert len(test_preds_cleaned) == len(test_df), f"{len(test_preds_cleaned)} != {len(test_df)}"
test_df[label_column] = test_preds_cleaned
test_df["text_labels_orig"] = test_preds
accelerator.print(test_df[[text_column, label_column]].sample(20))
pred_df = test_df[["ID", label_column]]
pred_df.columns = ["ID", "Label"]
os.makedirs(f"data/{dataset_name}", exist_ok=True)
pred_df.to_csv(f"data/{dataset_name}/predictions.csv", index=False)
accelerator.wait_for_everyone()
# Option1: Pushing the model to Hugging Face Hub
# model.push_to_hub(
# f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
# token = "hf_..."
# )
# token (`bool` or `str`, *optional*):
# `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated
# when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`
# is not specified.
# Or you can get your token from https://huggingface.co/settings/token
# Option2: Saving the model locally
peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace(
"/", "_"
)
model.save_pretrained(peft_model_id)
accelerator.wait_for_everyone()
if __name__ == "__main__":
main()
| peft/examples/causal_language_modeling/peft_lora_clm_accelerate_ds_zero3_offload.py/0 | {
"file_path": "peft/examples/causal_language_modeling/peft_lora_clm_accelerate_ds_zero3_offload.py",
"repo_id": "peft",
"token_count": 6862
} |
<jupyter_start><jupyter_text>Peft model evaluation using [lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness)In this notebook, we are going to learn how to evaluate the finetuned lora model on the hellaswag task using lm-eval-harness toolkit.<jupyter_code># Install LM-Eval
!pip install -q datasets evaluate lm_eval<jupyter_output>[notice] A new release of pip is available: 24.0 -> 24.3.1
[notice] To update, run: python.exe -m pip install --upgrade pip<jupyter_text>First we will check the accuracy score on the hellaswag task for the base bert without finetuning<jupyter_code>import lm_eval
output = lm_eval.simple_evaluate(model = 'hf',
model_args = {
'pretrained' : 'bert-base-cased',
'dtype' : 'bfloat16'},
tasks = 'hellaswag',
device = 'cuda:0',
batch_size = 128,
log_samples = False)
output["results"]<jupyter_output>2024-11-01:20:45:03,210 INFO [evaluator.py:164] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 | Setting fewshot manual seed to 1234
2024-11-01:20:45:03,211 INFO [evaluator.py:188] Initializing hf model, with arguments: {'pretrained': 'bert-base-cased', 'dtype': 'bfloat16'}
2024-11-01:20:45:03,213 INFO [huggingface.py:129] Using device 'cuda:0'
2024-11-01:20:45:03,450 INFO [huggingface.py:481] Using model type 'default'
2024-11-01:20:45:03,741 INFO [huggingface.py:365] Model parallel was set to False, max memory was not set, and device map was set to {'': 'cuda:0'}
If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`
2024-11-01:20:45:15,862 INFO [task.py:415] Building contexts for hellaswag on rank 0...
100%|██████████| 10042/10042 [00:02<00:00, 4477.77it/s]
2024-11-01:20:45:18,875 INFO [evaluator.py:489] Running loglikelihood requests
Running loglikelihood requests: 100%|██████████| 40168/40[...]<jupyter_text>Now lets try to finetune the bert on the imdb dataset (this is for demonstration and finetuning on imdb may not increase the scores on hellaswag task)<jupyter_code># Import necessary libraries
import evaluate
import numpy as np
from datasets import load_dataset
from transformers import AutoTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
from peft import LoraConfig, TaskType, get_peft_model
# Configure LoRA for Sequence Classification
lora_config = LoraConfig(
task_type=TaskType.SEQ_CLS, # Set task type to sequence classification
target_modules=["query", "key"] # Specify target modules for LoRA tuning
)
# Initialize the BERT model for sequence classification
model = BertForSequenceClassification.from_pretrained(
'bert-base-cased',
num_labels = 2
)
# Wrap the model with LoRA configuration
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
# load the dataset
dataset = load_dataset("imdb")
def tokenize_function(row):
return tokenizer(row["text"], padding="max_length", truncation = True)
tokenized_datasets = dataset.map(tokenize_function, batched = True)
train_dataset = tokenized_datasets["train"]
eval_dataset = tokenized_datasets["test"]
# Define a function to compute evaluation metrics
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
metric = evaluate.load("accuracy")
return metric.compute(predictions = predictions, references = labels)
# Configure training arguments
training_args = TrainingArguments("bert-lora-imdb",
eval_strategy="epoch",
per_device_train_batch_size=32, # decrease this for OOM error
per_device_eval_batch_size=64,
save_strategy="epoch",
learning_rate=2e-3,
num_train_epochs=5,
weight_decay=0.01,
load_best_model_at_end=True,
do_eval=True,
do_predict=True,
metric_for_best_model="accuracy",
report_to="none")
# Initialize the Trainer for the model training loop
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
#start training
trainer.train()<jupyter_output>13%|█▎ | 500/3910 [08:16<56:48, 1.00it/s]<jupyter_text>Now take the finetuned lora checkpoint and check the accuracy score on hellaswag task.<jupyter_code># use the path of your checkpoint here
output = lm_eval.simple_evaluate(model = 'hf',
model_args = {
'pretrained' : 'bert-base-cased',
'peft' : './bert-lora-imdb/checkpoint-3910',
'dtype' : 'bfloat16'},
tasks = 'hellaswag',
device = 'cuda:0',
batch_size = 128,
log_samples = False)
output["results"]<jupyter_output>2024-11-01:23:37:57,640 INFO [evaluator.py:164] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 | Setting fewshot manual seed to 1234
2024-11-01:23:37:57,641 INFO [evaluator.py:188] Initializing hf model, with arguments: {'pretrained': 'bert-base-cased', 'peft': './bert-lora-imdb/checkpoint-3910', 'dtype': 'bfloat16'}
2024-11-01:23:37:57,643 INFO [huggingface.py:129] Using device 'cuda:0'
2024-11-01:23:37:57,891 INFO [huggingface.py:481] Using model type 'default'
2024-11-01:23:37:58,161 INFO [huggingface.py:365] Model parallel was set to False, max memory was not set, and device map was set to {'': 'cuda:0'}
If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`
2024-11-01:23:38:10,295 INFO [task.py:415] Building contexts for hellaswag on rank 0...
100%|██████████| 10042/10042 [00:02<00:00, 4453.89it/s]
2024-11-01:23:38:13,313 INFO [evaluator.py:489] Running loglikelihood requests
Running logli[...] | peft/examples/evaluation/lora-lm-eval.ipynb/0 | {
"file_path": "peft/examples/evaluation/lora-lm-eval.ipynb",
"repo_id": "peft",
"token_count": 2454
} |
<jupyter_start><jupyter_text>IntroductionIn this notebook, we will learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from 🤗 PEFT to fine-tune an image classification model by ONLY using **0.77%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685). Let's get started by installing the dependencies. __*Note that this notebook builds on top the [official image classification example notebook](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb).*__ Install dependenciesHere we're installing `peft` from source to ensure we have access to all the bleeding edge features of `peft`.<jupyter_code>!pip install transformers accelerate evaluate datasets git+https://github.com/huggingface/peft -q<jupyter_output>Installing build dependencies ... [?25l[?25hdone
Getting requirements to build wheel ... [?25l[?25hdone
Preparing metadata (pyproject.toml) ... [?25l[?25hdone
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m6.3/6.3 MB[0m [31m53.1 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m199.7/199.7 KB[0m [31m24.5 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m81.4/81.4 KB[0m [31m11.3 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m462.8/462.8 KB[0m [31m46.9 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m190.3/190.3 KB[0m [31m23.1 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m7.6/7.6 MB[0m [31m102.9 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m213.0/213.0 KB[0m [31m25.4 MB/s[0m eta [[...]<jupyter_text>AuthenticationWe will share our fine-tuned model at the end of training. So, to do that we just authenticate using our 🤗 token. This token is available from [here](https://huggingface.co/settings/tokens). If you don't have a 🤗 account already, we highly encourage you to do so; it's free!<jupyter_code>from huggingface_hub import notebook_login
notebook_login()<jupyter_output>Token is valid.
Your token has been saved in your configured git credential helpers (store).
Your token has been saved to /root/.cache/huggingface/token
Login successful<jupyter_text>Check the library versions<jupyter_code>import transformers
import accelerate
import peft
print(f"Transformers version: {transformers.__version__}")
print(f"Accelerate version: {accelerate.__version__}")
print(f"PEFT version: {peft.__version__}")<jupyter_output>Transformers version: 4.26.0
Accelerate version: 0.16.0
PEFT version: 0.1.0.dev0<jupyter_text>Select a model checkpoint to fine-tune<jupyter_code>model_checkpoint = "google/vit-base-patch16-224-in21k" # pre-trained model from which to fine-tune<jupyter_output><empty_output><jupyter_text>Load a datasetWe're only loading the first 5000 instances from the training set of the [Food-101 dataset](https://huggingface.co/datasets/food101) to keep this example runtime short.<jupyter_code>from datasets import load_dataset
dataset = load_dataset("food101", split="train[:5000]")<jupyter_output><empty_output><jupyter_text>Prepare datasets for training and evaluation 1. Prepare `label2id` and `id2label` dictionaries. This will come in handy when performing inference and for metadata information.<jupyter_code>labels = dataset.features["label"].names
label2id, id2label = dict(), dict()
for i, label in enumerate(labels):
label2id[label] = i
id2label[i] = label
id2label[2]<jupyter_output><empty_output><jupyter_text>2. We load the image processor of the model we're fine-tuning.<jupyter_code>from transformers import AutoImageProcessor
image_processor = AutoImageProcessor.from_pretrained(model_checkpoint)
image_processor<jupyter_output><empty_output><jupyter_text>As one might notice, the `image_processor` has useful information on which size the training and evaluation images should be resized, stats that should be used to normalize the pixel values, etc. 3. Using the image processor we prepare transformation functions for the datasets. These functions will include augmentation and pixel scaling.<jupyter_code>from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std)
train_transforms = Compose(
[
RandomResizedCrop(image_processor.size["height"]),
RandomHorizontalFlip(),
ToTensor(),
normalize,
]
)
val_transforms = Compose(
[
Resize(image_processor.size["height"]),
CenterCrop(image_processor.size["height"]),
ToTensor(),
normalize,
]
)
def preprocess_train(example_batch):
"""Apply train_transforms across a batch."""
example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]]
return example_batch
def preprocess_val(example_batch):
"""Apply val_transforms across a batch."""
example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]]
return example_batch<jupyter_output><empty_output><jupyter_text>4. We split our mini dataset into training and validation.<jupyter_code># split up training into training + validation
splits = dataset.train_test_split(test_size=0.1)
train_ds = splits["train"]
val_ds = splits["test"]<jupyter_output><empty_output><jupyter_text>5. We set the transformation functions to the datasets accordingly.<jupyter_code>train_ds.set_transform(preprocess_train)
val_ds.set_transform(preprocess_val)<jupyter_output><empty_output><jupyter_text>Load and prepare a model In this section, we first load the model we want to fine-tune.<jupyter_code>def print_trainable_parameters(model):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}"
)<jupyter_output><empty_output><jupyter_text>The `get_peft_model()` method that we will use in a moment wraps the original model to be fine-tuned as a `PeftModel`. So, it's important for us to initialize the original model correctly. As such, we initialize it by specifying the `label2id` and `id2label` so that `AutoModelForImageClassification` can initialize a append classification head to the underlying model, adapted for our dataset. We can confirm this from the warning below:```Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and are newly initialized: ['classifier.weight', 'classifier.bias']```<jupyter_code>from transformers import AutoModelForImageClassification, TrainingArguments, Trainer
model = AutoModelForImageClassification.from_pretrained(
model_checkpoint,
label2id=label2id,
id2label=id2label,
ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint
)
print_trainable_parameters(model)<jupyter_output><empty_output><jupyter_text>Also, take note of the number of total trainable parameters of `model`: it's 100%! We'll compare this number to that of the LoRA model.We now use the `PeftModel` to wrap `model` so that the "update" matrices are added to the respective places.<jupyter_code>from peft import LoraConfig, get_peft_model
config = LoraConfig(
r=16,
lora_alpha=16,
target_modules=["query", "value"],
lora_dropout=0.1,
bias="none",
modules_to_save=["classifier"],
)
lora_model = get_peft_model(model, config)
print_trainable_parameters(lora_model)<jupyter_output>trainable params: 667493 || all params: 86466149 || trainable%: 0.77<jupyter_text>Let's unpack what's going on here. In order for LoRA to take effect, we need to specify the target modules to `LoraConfig` so that `get_peft_model()` knows which modules inside our model needs to be amended with LoRA matrices. In this case, we're only interested in targetting the query and value matrices of the attention blocks of the base model. Since the parameters corresponding to these matrices are "named" with `query` and `value` respectively, we specify them accordingly in the `target_modules` argument of `LoraConfig`. We also specify `modules_to_save`. After we wrap our base model `model` with `get_peft_model()` along with the `config`, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. Regarding the other parameters:* `r`: The dimension used by the LoRA update matrices.* `alpha`: Scaling factor.* `bias`: Specifying if the `bias` parameters should be trained. `None` denotes none of the `bias` parameters will be trained. `r` and `alpha` together control the total number of final trainable parameters when using LoRA giving us the flexbility to balance a trade-off between end performance and compute efficiency. We can also how many parameters we're actually training. Since we're interested in performing **parameter-efficient fine-tuning**, we should expect to notice a less number of trainable parameters from the `lora_model` in comparison to the original `model` which is indeed the case here. Training argumentsWe will leverage [🤗 Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) for fine-tuning. It accepts several arguments which we wrap using [`TrainingArguments`](https://huggingface.co/docs/transformers/main_classes/trainertransformers.TrainingArguments).<jupyter_code>from transformers import TrainingArguments, Trainer
model_name = model_checkpoint.split("/")[-1]
batch_size = 128
args = TrainingArguments(
f"{model_name}-finetuned-lora-food101",
remove_unused_columns=False,
eval_strategy="epoch",
save_strategy="epoch",
learning_rate=5e-3,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=4,
per_device_eval_batch_size=batch_size,
fp16=True,
num_train_epochs=5,
logging_steps=10,
load_best_model_at_end=True,
metric_for_best_model="accuracy",
push_to_hub=True,
label_names=["labels"],
)<jupyter_output><empty_output><jupyter_text>Some things to note here:* We're using a larger batch size since there is only a handful of parameters to train. * Larger learning rate than the normal (1e-5 for example). All of these things are a byproduct of the fact that we're training only a small number of parameters. This can potentially also reduce the need to conduct expensive hyperparameter tuning experiments. Prepare evaluation metric<jupyter_code>import numpy as np
import evaluate
metric = evaluate.load("accuracy")
# the compute_metrics function takes a Named Tuple as input:
# predictions, which are the logits of the model as Numpy arrays,
# and label_ids, which are the ground-truth labels as Numpy arrays.
def compute_metrics(eval_pred):
"""Computes accuracy on a batch of predictions"""
predictions = np.argmax(eval_pred.predictions, axis=1)
return metric.compute(predictions=predictions, references=eval_pred.label_ids)<jupyter_output><empty_output><jupyter_text>Collation functionThis is used by `Trainer` to gather a batch of training and evaluation examples and prepare them in a format that is acceptable by the underlying model.<jupyter_code>import torch
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
labels = torch.tensor([example["label"] for example in examples])
return {"pixel_values": pixel_values, "labels": labels}<jupyter_output><empty_output><jupyter_text>Train and evaluate<jupyter_code>trainer = Trainer(
lora_model,
args,
train_dataset=train_ds,
eval_dataset=val_ds,
tokenizer=image_processor,
compute_metrics=compute_metrics,
data_collator=collate_fn,
)
train_results = trainer.train()<jupyter_output>Cloning https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101 into local empty directory.
WARNING:huggingface_hub.repository:Cloning https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101 into local empty directory.<jupyter_text>In just a few minutes, we have a fine-tuned model with 96% validation accuracy. Also, note that we used a very small subset of the training dataset which is definitely impacting the results.<jupyter_code>trainer.evaluate(val_ds)<jupyter_output>***** Running Evaluation *****
Num examples = 500
Batch size = 128<jupyter_text>Sharing your model and inference Once the fine-tuning is done, we can share the LoRA parameters with the community like so:<jupyter_code>repo_name = f"sayakpaul/{model_name}-finetuned-lora-food101"
lora_model.push_to_hub(repo_name)<jupyter_output>Uploading the following files to sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101: adapter_config.json,adapter_model.bin<jupyter_text>When we call `push_to_hub()` on the `lora_model`, only the LoRA parameters along with any modules specified in `modules_to_save` are saved. If we take a look at the [trained LoRA parameters](https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101/blob/main/adapter_model.bin), we see that it's only **2.6 MB**! This greatly helps with portability especially when we're using a very large model to fine-tune (such as [BLOOM](https://huggingface.co/bigscience/bloom)). Next, we see how to load the LoRA updated parameters along with our base model for inference. When we wrap a base model with `PeftModel` that modifications are DONE in place. So to mitigate any concerns that might stem from in place modifications, we newly initialize our base model just like we did earlier and construct our inference model.<jupyter_code>from peft import PeftConfig, PeftModel
config = PeftConfig.from_pretrained(repo_name)
model = model = AutoModelForImageClassification.from_pretrained(
config.base_model_name_or_path,
label2id=label2id,
id2label=id2label,
ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint
)
# Load the Lora model
inference_model = PeftModel.from_pretrained(model, repo_name)<jupyter_output>loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--google--vit-base-patch16-224-in21k/snapshots/1ba429d32753f33a0660b80ac6f43a3c80c18938/config.json
Model config ViTConfig {
"_name_or_path": "google/vit-base-patch16-224-in21k",
"architectures": [
"ViTModel"
],
"attention_probs_dropout_prob": 0.0,
"encoder_stride": 16,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 768,
"id2label": {
"0": "apple_pie",
"1": "baby_back_ribs",
"2": "baklava",
"3": "beef_carpaccio",
"4": "beef_tartare",
"5": "beet_salad",
"6": "beignets",
"7": "bibimbap",
"8": "bread_pudding",
"9": "breakfast_burrito",
"10": "bruschetta",
"11": "caesar_salad",
"12": "cannoli",
"13": "caprese_salad",
"14": "carrot_cake",
"15": "ceviche",
"16": "cheesecake",
"17": "cheese_plate",
"18": "chicken_curry",
"19": "chicken_quesadilla",
"20": "chicken_wings",
"21": "ch[...]<jupyter_text>Don't worry about the warnings, they're harmless. Let's now fetch a sample for inference.<jupyter_code>from PIL import Image
import requests
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg"
image = Image.open(requests.get(url, stream=True).raw)
image<jupyter_output><empty_output><jupyter_text>We first instantiate an `image_processor` from the underlying model repo.<jupyter_code>image_processor = AutoImageProcessor.from_pretrained(repo_name)<jupyter_output>loading configuration file preprocessor_config.json from cache at /root/.cache/huggingface/hub/models--sayakpaul--vit-base-patch16-224-in21k-finetuned-lora-food101/snapshots/fa2503cc7d91e0dd69728c1dc66ed80d7bd3289b/preprocessor_config.json
Image processor ViTImageProcessor {
"do_normalize": true,
"do_rescale": true,
"do_resize": true,
"image_mean": [
0.5,
0.5,
0.5
],
"image_processor_type": "ViTImageProcessor",
"image_std": [
0.5,
0.5,
0.5
],
"resample": 2,
"rescale_factor": 0.00392156862745098,
"size": {
"height": 224,
"width": 224
}
}<jupyter_text>We then prepare the sample for inference.<jupyter_code># prepare image for the model
encoding = image_processor(image.convert("RGB"), return_tensors="pt")
print(encoding.pixel_values.shape)<jupyter_output>torch.Size([1, 3, 224, 224])<jupyter_text>And run inference!<jupyter_code>import torch
# forward pass
with torch.no_grad():
outputs = inference_model(**encoding)
logits = outputs.logits
predicted_class_idx = logits.argmax(-1).item()
print("Predicted class:", inference_model.config.id2label[predicted_class_idx])<jupyter_output>Predicted class: beignets | peft/examples/image_classification/image_classification_peft_lora.ipynb/0 | {
"file_path": "peft/examples/image_classification/image_classification_peft_lora.ipynb",
"repo_id": "peft",
"token_count": 6372
} |
<jupyter_start><jupyter_text>IntroductionIn this notebook, we will learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from 🤗 PEFT to fine-tune a SegFormer model variant for semantic segmentation by ONLY using **14%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685). Let's get started by installing the dependencies. Install dependenciesHere we're installing `peft` from source to ensure we have access to all the bleeding edge features of `peft`.<jupyter_code>!pip install transformers accelerate evaluate datasets git+https://github.com/huggingface/peft -q<jupyter_output><empty_output><jupyter_text>AuthenticationWe will share our fine-tuned model at the end of training. So, to do that we just authenticate using our 🤗 token. This token is available from [here](https://huggingface.co/settings/tokens). If you don't have a 🤗 account already, we highly encourage you to do so; it's free!<jupyter_code>from huggingface_hub import notebook_login
notebook_login()<jupyter_output><empty_output><jupyter_text>Load a datasetWe're only loading the first 150 instances from the training set of the [SceneParse150 dataset](https://huggingface.co/datasets/scene_parse_150) to keep this example runtime short.<jupyter_code>from datasets import load_dataset
ds = load_dataset("scene_parse_150", split="train[:150]")<jupyter_output><empty_output><jupyter_text>Prepare train and test splits<jupyter_code>ds = ds.train_test_split(test_size=0.1)
train_ds = ds["train"]
test_ds = ds["test"]<jupyter_output><empty_output><jupyter_text>Prepare label mappersWe create two dictionaries:* `label2id`: maps the semantic classes of the dataset to integer ids.* `id2label`: `label2id` reversed.<jupyter_code>import json
from huggingface_hub import cached_download, hf_hub_url
repo_id = "huggingface/label-files"
filename = "ade20k-id2label.json"
id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
num_labels = len(id2label)<jupyter_output><empty_output><jupyter_text>Prepare datasets for training and evaluation<jupyter_code>from transformers import AutoImageProcessor
checkpoint = "nvidia/mit-b0"
image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True)
from torchvision.transforms import ColorJitter
jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
from PIL import Image
import numpy as np
def handle_grayscale_image(image):
np_image = np.array(image)
if np_image.ndim == 2:
tiled_image = np.tile(np.expand_dims(np_image, -1), 3)
return Image.fromarray(tiled_image)
else:
return Image.fromarray(np_image)
def train_transforms(example_batch):
images = [jitter(handle_grayscale_image(x)) for x in example_batch["image"]]
labels = [x for x in example_batch["annotation"]]
inputs = image_processor(images, labels)
return inputs
def val_transforms(example_batch):
images = [handle_grayscale_image(x) for x in example_batch["image"]]
labels = [x for x in example_batch["annotation"]]
inputs = image_processor(images, labels)
return inputs
train_ds.set_transform(train_transforms)
test_ds.set_transform(val_transforms)<jupyter_output><empty_output><jupyter_text>Evaluation functionIncluding a metric during training is often helpful for evaluating your model’s performance. You can quickly load a evaluation method with the [🤗 Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [mean Intersection over Union (IoU)](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric):<jupyter_code>import torch
from torch import nn
import evaluate
metric = evaluate.load("mean_iou")
def compute_metrics(eval_pred):
with torch.no_grad():
logits, labels = eval_pred
logits_tensor = torch.from_numpy(logits)
# scale the logits to the size of the label
logits_tensor = nn.functional.interpolate(
logits_tensor,
size=labels.shape[-2:],
mode="bilinear",
align_corners=False,
).argmax(dim=1)
pred_labels = logits_tensor.detach().cpu().numpy()
# currently using _compute instead of compute
# see this issue for more info: https://github.com/huggingface/evaluate/pull/328#issuecomment-1286866576
metrics = metric._compute(
predictions=pred_labels,
references=labels,
num_labels=len(id2label),
ignore_index=0,
reduce_labels=image_processor.do_reduce_labels,
)
# add per category metrics as individual key-value pairs
per_category_accuracy = metrics.pop("per_category_accuracy").tolist()
per_category_iou = metrics.pop("per_category_iou").tolist()
metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)})
metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)})
return metrics<jupyter_output><empty_output><jupyter_text>Load a base modelFor this example, we use the [SegFormer B0 variant](https://huggingface.co/nvidia/mit-b0).<jupyter_code>def print_trainable_parameters(model):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}"
)<jupyter_output><empty_output><jupyter_text>We pass the `label2id` and `id2label` dictionaries to let the `AutoModelForSemanticSegmentation` class know that we're interested in a custom base model where the decoder head should be randomly initialized w.r.t our custom dataset. Note, however, that the rest of the model parameters are pre-trained and will be fine-tuned in a regular transfer learning setup.We also notice that the 100% parameters in the `model` are trainable.<jupyter_code>from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer
model = AutoModelForSemanticSegmentation.from_pretrained(
checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True
)
print_trainable_parameters(model)<jupyter_output><empty_output><jupyter_text>Wrap `model` as a `PeftModel` for LoRA trainingThis involves two steps:* Defining a config with `LoraConfig`* Wrapping the original `model` with `get_peft_model()` with the config defined in the step above.<jupyter_code>from peft import LoraConfig, get_peft_model
config = LoraConfig(
r=32,
lora_alpha=32,
target_modules=["query", "value"],
lora_dropout=0.1,
bias="lora_only",
modules_to_save=["decode_head"],
)
lora_model = get_peft_model(model, config)
print_trainable_parameters(lora_model)<jupyter_output>===================================BUG REPORT===================================
Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues
================================================================================
trainable params: 564374 || all params: 3883766 || trainable%: 14.53<jupyter_text>Let's unpack what's going on here. In order for LoRA to take effect, we need to specify the target modules to `LoraConfig` so that `PeftModel` knows which modules inside our model needs to be amended with LoRA matrices. In this case, we're only interested in targetting the query and value matrices of the attention blocks of the base model. Since the parameters corresponding to these matrices are "named" with `query` and `value` respectively, we specify them accordingly in the `target_modules` argument of `LoraConfig`. We also specify `modules_to_save`. After we wrap our base model `model` with `PeftModel` along with the `config`, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. Regarding the other parameters:* `r`: The dimension used by the LoRA update matrices.* `alpha`: Scaling factor.* `bias`: Specifying if the `bias` parameters should be trained. `lora_only` denotes only the LoRA `bias` parameters will be trained. `r` and `alpha` together control the total number of final trainable parameters when using LoRA giving us the flexbility to balance a trade-off between end performance and compute efficiency. We can also how many parameters we're actually training. Since we're interested in performing **parameter-efficient fine-tuning**, we should expect to notice a less number of trainable parameters from the `lora_model` in comparison to the original `model` which is indeed the case here. For sanity, let's also manually verify the modules that are actually trainable in `lora_model`.<jupyter_code>for name, param in lora_model.named_parameters():
if param.requires_grad:
print(name, param.shape)<jupyter_output>base_model.model.segformer.encoder.block.0.0.attention.self.query.lora_A.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.0.attention.self.query.lora_B.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.0.attention.self.value.lora_A.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.0.attention.self.value.lora_B.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.1.attention.self.query.lora_A.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.1.attention.self.query.lora_B.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.1.attention.self.value.lora_A.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.1.attention.self.value.lora_B.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.1.0.attention.self.query.lora_A.weight torch.Size([32, 64])
base_model.model.segformer.encoder.block.1.0.attention.self.query.lora_B.weight torch.Size([...]<jupyter_text>We can confirm that only the LoRA parameters appended to the attention blocks and the `decode_head` parameters are trainable. Train!This is a two-step process: 1. Define your training hyperparameters in [TrainingArguments](https://huggingface.co/docs/transformers/v4.26.0/en/main_classes/trainertransformers.TrainingArguments). It is important you don’t remove unused columns because this’ll drop the image column. Without the image column, you can’t create `pixel_values`. Set `remove_unused_columns=False` to prevent this behavior! The only other required parameter is output_dir which specifies where to save your model. At the end of each epoch, the `Trainer` will evaluate the IoU metric and save the training checkpoint.2. Pass the training arguments to [Trainer](https://huggingface.co/docs/transformers/v4.26.0/en/main_classes/trainertransformers.Trainer) along with the model, dataset, tokenizer, data collator, and `compute_metrics` function.3. Call `train()` to finetune your model.**Note** that This example is meant to walk you through the workflow when using PEFT for semantic segmentation. We didn't perform extensive hyperparameter tuning to achieve optimal results.<jupyter_code>model_name = checkpoint.split("/")[-1]
training_args = TrainingArguments(
output_dir=f"{model_name}-scene-parse-150-lora",
learning_rate=5e-4,
num_train_epochs=50,
per_device_train_batch_size=4,
per_device_eval_batch_size=2,
save_total_limit=3,
eval_strategy="epoch",
save_strategy="epoch",
logging_steps=5,
remove_unused_columns=False,
push_to_hub=True,
label_names=["labels"],
)
trainer = Trainer(
model=lora_model,
args=training_args,
train_dataset=train_ds,
eval_dataset=test_ds,
compute_metrics=compute_metrics,
)
trainer.train()<jupyter_output><empty_output><jupyter_text>Saving the model and inference Here we use the `save_pretrained()` method of the `lora_model` to save the *LoRA-only parameters* locally. However, you can also use thr `push_to_hub()` method to upload these parameters directly to the Hugging Face Hub (as shown [here](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_peft_lora.ipynb)).<jupyter_code>model_id = "segformer-scene-parse-150-lora"
lora_model.save_pretrained(model_id)<jupyter_output><empty_output><jupyter_text>We can see that the LoRA-only parameters are just **2.2 MB in size**! This greatly improves the portability when using very large models.<jupyter_code>!ls -lh {model_id}<jupyter_output>total 2.2M
-rw-r--r-- 1 root root 369 Feb 8 03:09 adapter_config.json
-rw-r--r-- 1 root root 2.2M Feb 8 03:09 adapter_model.bin<jupyter_text>Let's now prepare our `inference_model` and run an inference.<jupyter_code>from peft import PeftConfig
config = PeftConfig.from_pretrained(model_id)
model = AutoModelForSemanticSegmentation.from_pretrained(
checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True
)
# Load the Lora model
inference_model = PeftModel.from_pretrained(model, model_id)<jupyter_output><empty_output><jupyter_text>Fetch an image.<jupyter_code>import requests
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png"
image = Image.open(requests.get(url, stream=True).raw)
image<jupyter_output><empty_output><jupyter_text>Preprocess the image.<jupyter_code># prepare image for the model
encoding = image_processor(image.convert("RGB"), return_tensors="pt")
print(encoding.pixel_values.shape)<jupyter_output>torch.Size([1, 3, 512, 512])<jupyter_text>Run an inference.<jupyter_code>with torch.no_grad():
outputs = inference_model(pixel_values=encoding.pixel_values)
logits = outputs.logits
upsampled_logits = nn.functional.interpolate(
logits,
size=image.size[::-1],
mode="bilinear",
align_corners=False,
)
pred_seg = upsampled_logits.argmax(dim=1)[0]<jupyter_output><empty_output><jupyter_text>Visualize the results.We need a color palette to visualize the results. Here, we use [one provided by the TensorFlow Model Garden repository](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.pyL51).<jupyter_code>def ade_palette():
"""Creates a label colormap used in ADE20K segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
return np.asarray(
[
[0, 0, 0],
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
]
)
import matplotlib.pyplot as plt
color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8)
palette = np.array(ade_palette())
for label, color in enumerate(palette):
color_seg[pred_seg == label, :] = color
color_seg = color_seg[..., ::-1] # convert to BGR
img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map
img = img.astype(np.uint8)
plt.figure(figsize=(15, 10))
plt.imshow(img)
plt.show()<jupyter_output><empty_output> | peft/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb/0 | {
"file_path": "peft/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb",
"repo_id": "peft",
"token_count": 8322
} |
<jupyter_start><jupyter_text>IntroductionIn this notebook, we are going to fine-tune the LayoutLM model by Microsoft Research on the [FUNSD](https://guillaumejaume.github.io/FUNSD/) dataset, which is a collection of annotated form documents. The goal of our model is to learn the annotations of a number of labels ("question", "answer", "header" and "other") on those forms, such that it can be used to annotate unseen forms in the future.* Original LayoutLM paper: https://arxiv.org/abs/1912.13318* Original FUNSD paper: https://arxiv.org/abs/1905.13538 Install librariesCurrently you have to first install the `unilm` package, and then the `transformers` package (which updates the outdated `transformers` package that is included in the `unilm` package). The reason we also install the `unilm` package is because we need its preprocessing files. I've forked it, and removed some statements which introduced some issues.<jupyter_code># ! rm -r unilm
# ! pip install unilm<jupyter_output><empty_output><jupyter_text>Getting the dataHere we download the data of the [FUNSD dataset](https://guillaumejaume.github.io/FUNSD/) from the web. This results in a directory called "data" being created, which has 2 subdirectories, one for training and one for testing. Each of those has 2 subdirectories in turn, one containing the images as png files and one containing the annotations in json format.<jupyter_code># ! wget https://guillaumejaume.github.io/FUNSD/dataset.zip
# ! unzip dataset.zip && mv dataset data && rm -rf dataset.zip __MACOSX<jupyter_output><empty_output><jupyter_text>Let's take a look at a training example. For this, we are going to use PIL (Python Image Library).<jupyter_code>from PIL import Image, ImageDraw, ImageFont
import os
base_path = "/home/sourab/temp/data/dataset"
image = Image.open(os.path.join(base_path, "training_data/images/0000971160.png"))
image = image.convert("RGB")
image<jupyter_output><empty_output><jupyter_text>Now let's plot its corresponding annotations. Basically, if you type `data['form']`, you get a list of all general annotations. Each general annotation has a label, a bounding box, and one or more words, which in also have their own bounding box. The bounding boxes are in [xleft, ytop, xright, ybottom] format.<jupyter_code>import json
with open(os.path.join(base_path, "training_data/annotations/0000971160.json")) as f:
data = json.load(f)
for annotation in data["form"]:
print(annotation)<jupyter_output><empty_output><jupyter_text>The PIL library has a handy ImageDraw module, which -you guessed it- allows to draw things (such as rectangles) on an image:<jupyter_code>draw = ImageDraw.Draw(image, "RGBA")
font = ImageFont.load_default()
label2color = {"question": "blue", "answer": "green", "header": "orange", "other": "violet"}
for annotation in data["form"]:
label = annotation["label"]
general_box = annotation["box"]
draw.rectangle(general_box, outline=label2color[label], width=2)
draw.text((general_box[0] + 10, general_box[1] - 10), label, fill=label2color[label], font=font)
words = annotation["words"]
for word in words:
box = word["box"]
draw.rectangle(box, outline=label2color[label], width=1)
image<jupyter_output><empty_output><jupyter_text>Preprocessing the dataNext, we need to turn the document images into individual tokens and corresponding labels (BIOES format, see further). We do this both for the training and test datasets. Make sure to run this from the `/content` directory:<jupyter_code># ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/training_data/annotations \
# --data_split train \
# --output_dir data \
# --model_name_or_path microsoft/layoutlm-base-uncased \
# --max_len 510
# ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/testing_data/annotations \
# --data_split test \
# --output_dir data \
# --model_name_or_path microsoft/layoutlm-base-uncased \
# --max_len 510<jupyter_output><empty_output><jupyter_text>Next, we create a labels.txt file that contains the unique labels of the FUNSD dataset:<jupyter_code># ! cat data/train.txt | cut -d$'\t' -f 2 | grep -v "^$"| sort | uniq > data/labels.txt<jupyter_output><empty_output><jupyter_text>Define a PyTorch datasetFirst, we create a list containing the unique labels based on `data/labels.txt` (run this from the content directory):<jupyter_code>from torch.nn import CrossEntropyLoss
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
labels = get_labels("data/labels.txt")
num_labels = len(labels)
label_map = {i: label for i, label in enumerate(labels)}
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index<jupyter_output><empty_output><jupyter_text>We can see that the dataset uses the so-called BIOES annotation scheme to annotate the tokens. This means that a given token can be either at the beginning (B), inside (I), outside (O), at the end (E) or start (S) of a given entity. Entities include ANSWER, QUESTION, HEADER and OTHER:<jupyter_code>print(labels)<jupyter_output>['B-ANSWER', 'B-HEADER', 'B-QUESTION', 'E-ANSWER', 'E-HEADER', 'E-QUESTION', 'I-ANSWER', 'I-HEADER', 'I-QUESTION', 'O', 'S-ANSWER', 'S-HEADER', 'S-QUESTION']<jupyter_text>Next, we can create a PyTorch dataset and corresponding dataloader (both for training and evaluation):<jupyter_code>import logging
import os
import torch
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
class FunsdDataset(Dataset):
def __init__(self, args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
if args.local_rank == 0 and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
self.features = features
# Convert to Tensors and build dataset
self.all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
self.all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
self.all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
self.all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
self.all_bboxes = torch.tensor([f.boxes for f in features], dtype=torch.long)
def __len__(self):
return len(self.features)
def __getitem__(self, index):
return (
self.all_input_ids[index],
self.all_input_mask[index],
self.all_segment_ids[index],
self.all_label_ids[index],
self.all_bboxes[index],
)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(boxes)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(box_file_path, encoding="utf-8") as fb, open(
image_file_path, encoding="utf-8"
) as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
"""Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(example.words, example.labels, example.boxes, example.actual_bboxes):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
logger.info("boxes: %s", " ".join([str(x) for x in token_boxes]))
logger.info("actual_bboxes: %s", " ".join([str(x) for x in actual_bboxes]))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return features
from transformers import LayoutLMTokenizer
# from .unilm.layoutlm.data.funsd import FunsdDataset, InputFeatures
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
batch_size = 16
args = {
"local_rank": -1,
"overwrite_cache": True,
"data_dir": "/home/sourab/temp/data/",
"model_name_or_path": "microsoft/layoutlm-base-uncased",
"max_seq_length": 512,
"model_type": "layoutlm",
}
# class to turn the keys of a dict into attributes (thanks Stackoverflow)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
args = AttrDict(args)
tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
# the LayoutLM authors already defined a specific FunsdDataset, so we are going to use this here
train_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="train")
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size)
eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="test")
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=batch_size)
len(train_dataloader)
len(eval_dataloader)
batch = next(iter(train_dataloader))
input_ids = batch[0][0]
tokenizer.decode(input_ids)<jupyter_output><empty_output><jupyter_text>Define and fine-tune the modelAs this is a sequence labeling task, we are going to load `LayoutLMForTokenClassification` (the base sized model) from the hub. We are going to fine-tune it on a downstream task, namely FUNSD.<jupyter_code>from peft import get_peft_config, PeftModel, get_peft_model, LoraConfig, TaskType
peft_config = LoraConfig(
task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all"
)
peft_config
from transformers import LayoutLMForTokenClassification
import torch
from transformers import set_seed
seed = 100
set_seed(seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=num_labels)
model = get_peft_model(model, peft_config)
model.to(device)
print(model.model.layoutlm.encoder.layer[0].attention.self.query.weight)
print(model.model.layoutlm.encoder.layer[0].attention.self.query.lora_A.weight)
print(model.model.classifier.weight)<jupyter_output><empty_output><jupyter_text>Now we can start training:<jupyter_code>from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
num_train_epochs = 100
optimizer = torch.optim.AdamW(model.parameters(), lr=3e-3)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0.06 * (len(train_dataloader) * num_train_epochs),
num_training_steps=(len(train_dataloader) * num_train_epochs),
)
global_step = 0
t_total = len(train_dataloader) * num_train_epochs # total number of training steps
# put the model in training mode
model.train()
for epoch in range(num_train_epochs):
for batch in tqdm(train_dataloader, desc="Training"):
input_ids = batch[0].to(device)
bbox = batch[4].to(device)
attention_mask = batch[1].to(device)
token_type_ids = batch[2].to(device)
labels = batch[3].to(device)
# forward pass
outputs = model(
input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels
)
loss = outputs.loss
# print loss every 100 steps
if global_step % 10 == 0:
print(f"Loss after {global_step} steps: {loss.item()}")
# backward pass to get the gradients
loss.backward()
# print("Gradients on classification head:")
# print(model.classifier.weight.grad[6,:].sum())
# update
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
global_step += 1
import numpy as np
from seqeval.metrics import (
classification_report,
f1_score,
precision_score,
recall_score,
)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
# put model in evaluation mode
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
with torch.no_grad():
input_ids = batch[0].to(device)
bbox = batch[4].to(device)
attention_mask = batch[1].to(device)
token_type_ids = batch[2].to(device)
labels = batch[3].to(device)
# forward pass
outputs = model(
input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels
)
# get the loss and logits
tmp_eval_loss = outputs.loss
logits = outputs.logits
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
# compute the predictions
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)
# compute average evaluation loss
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
print(results)
model.print_trainable_parameters()
model.save_pretrained("peft_layoutlm")
!du -h "peft_layoutlm/adapter_model.bin"<jupyter_output>2,8M layoutlm_funsd.pt | peft/examples/token_classification/peft_lora_token_cls.ipynb/0 | {
"file_path": "peft/examples/token_classification/peft_lora_token_cls.ipynb",
"repo_id": "peft",
"token_count": 11949
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import torch
from .utils import PeftType
if TYPE_CHECKING:
from .config import PeftConfig
from .tuners.tuners_utils import BaseTuner
# these will be filled by the register_peft_method function
PEFT_TYPE_TO_CONFIG_MAPPING: dict[PeftType, type[PeftConfig]] = {}
PEFT_TYPE_TO_TUNER_MAPPING: dict[PeftType, type[BaseTuner]] = {}
PEFT_TYPE_TO_MIXED_MODEL_MAPPING: dict[PeftType, type[BaseTuner]] = {}
PEFT_TYPE_TO_PREFIX_MAPPING: dict[PeftType, str] = {}
def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig:
"""
Returns a Peft config object from a dictionary.
Args:
config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters.
"""
return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict)
def inject_adapter_in_model(
peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default", low_cpu_mem_usage: bool = False
) -> torch.nn.Module:
r"""
A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning
methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API
calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods.
Args:
peft_config (`PeftConfig`):
Configuration object containing the parameters of the Peft model.
model (`torch.nn.Module`):
The input model where the adapter will be injected.
adapter_name (`str`, `optional`, defaults to `"default"`):
The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
Create empty adapter weights on meta device. Useful to speed up the loading process.
"""
if peft_config.is_prompt_learning or peft_config.is_adaption_prompt:
raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.")
if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys():
raise ValueError(
f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`."
)
tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type]
# By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules.
peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name, low_cpu_mem_usage=low_cpu_mem_usage)
return peft_model.model
| peft/src/peft/mapping.py/0 | {
"file_path": "peft/src/peft/mapping.py",
"repo_id": "peft",
"token_count": 1133
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from dataclasses import dataclass, field
from peft.config import PeftConfig
from peft.utils import PeftType
from .utils import llama_compute_query_states
@dataclass
class AdaptionPromptConfig(PeftConfig):
"""Stores the configuration of an [`AdaptionPromptModel`]."""
target_modules: str = field(
default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."}
)
adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"})
adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"})
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.ADAPTION_PROMPT
@property
def is_adaption_prompt(self) -> bool:
"""Return True if this is an adaption prompt config."""
return True
# Contains the config that is specific to a transformers model type.
ModelTypeConfig = namedtuple(
"ModelTypeConfig", ["compute_query_states", "target_modules", "k_proj_layer", "v_proj_layer", "o_proj_layer"]
)
# Mapping of transformers model types to their specific configuration.
TRANSFORMERS_MODEL_CONFIG = {
"llama": ModelTypeConfig(
compute_query_states=llama_compute_query_states,
target_modules="self_attn",
k_proj_layer="k_proj",
v_proj_layer="v_proj",
o_proj_layer="o_proj",
),
"mistral": ModelTypeConfig( # same as llama,
compute_query_states=llama_compute_query_states,
target_modules="self_attn",
k_proj_layer="k_proj",
v_proj_layer="v_proj",
o_proj_layer="o_proj",
),
}
def prepare_config(
peft_config: AdaptionPromptConfig,
model,
) -> AdaptionPromptConfig:
"""Prepare the config based on the llama model type."""
if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG:
raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.")
model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type]
if peft_config.target_modules is None:
peft_config.target_modules = model_config.target_modules
return peft_config
| peft/src/peft/tuners/adaption_prompt/config.py/0 | {
"file_path": "peft/src/peft/tuners/adaption_prompt/config.py",
"repo_id": "peft",
"token_count": 1010
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Literal, Optional
from peft.config import PromptLearningConfig
from peft.utils import PeftType
@dataclass
class CPTConfig(PromptLearningConfig):
"""
CPT Configuration class extending PeftConfig for Context-aware Prompt Tuning (CPT).
This class introduces additional parameters required for CPT, such as:
- Token type masks
- Prompt tuning initialization
- Loss weighting
- Projection settings
For more details, see the paper: https://arxiv.org/abs/2410.17222
"""
# Token-related configurations
cpt_token_ids: Optional[list[int]] = field(
default=None, metadata={"help": "Tensor of token IDs used for CPT prompts."}
)
cpt_mask: Optional[list[int]] = field(default=None, metadata={"help": "Tensor mask applied to CPT tokens."})
cpt_tokens_type_mask: Optional[list[int]] = field(
default=None, metadata={"help": "Mask indicating the type of each CPT token."}
)
# Loss-related configurations
opt_weighted_loss_type: Optional[Literal["none", "decay"]] = field(
default="none", metadata={"help": "Type of weighted loss: 'none' or 'decay'."}
)
opt_loss_decay_factor: Optional[float] = field(
default=1.0, metadata={"help": "Factor for exponential decay in loss weighting."}
)
# Projection-related configurations
opt_projection_epsilon: Optional[float] = field(
default=0.1, metadata={"help": "Epsilon value for input projection."}
)
opt_projection_format_epsilon: Optional[float] = field(
default=0.1, metadata={"help": "Epsilon value for format projection."}
)
# Tokenizer configuration
tokenizer_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
},
)
# Neet to define CPT-specific static attributes
is_prompt_learning = True # Indicates that CPT is a prompt-learning method.
def __post_init__(self):
"""
Post-initialization hook to set additional attributes after the config is initialized.
"""
# CPT-specific static attributes
self.is_prompt_learning = True # Indicates that CPT is a prompt-learning method.
self.num_layers = None # Number of layers (optional, not always required).
self.token_dim = None # Dimension of token embeddings.
self.num_attention_heads = None # Number of attention heads (if applicable).
self.num_transformer_submodules = 1 # Number of transformer submodules used.
self.peft_type = PeftType.CPT # Specifies that the PEFT type is CPT.
self.task_type = "CAUSAL_LM" # Ensures task type is causal language modeling.
if self.cpt_token_ids is None:
self.cpt_token_ids = [0]
self.num_virtual_tokens = len(self.cpt_token_ids)
if self.cpt_mask is None:
self.cpt_mask = [1 for _ in self.cpt_token_ids]
if self.cpt_tokens_type_mask is None:
self.cpt_tokens_type_mask = [1 for _ in self.cpt_token_ids]
if not (
len(self.cpt_token_ids) == len(self.cpt_mask) == len(self.cpt_tokens_type_mask) == self.num_virtual_tokens
):
raise ValueError("cpt_token_ids, cpt_mask and cpt_tokens_type_mask must have the same length.")
| peft/src/peft/tuners/cpt/config.py/0 | {
"file_path": "peft/src/peft/tuners/cpt/config.py",
"repo_id": "peft",
"token_count": 1463
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class LNTuningConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a :class:`~peft.tuners.LNTuningModel`.
Args:
target_modules (`Optional[Union[List[str], str]]`):
List of module names or regex expression of the module names to replace with LNTuning. For example,
'.*decoder.*' or '.*encoder.*'. If this is not specified, modules will be chosen according to the model
architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
the target modules manually.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
modules_to_save (`Optional[Union[List[str], str]]`):
List of modules to be set as trainable and saved in the final checkpoint. For example, in Sequence
Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized
and as such need to be trainable and saved.
"""
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with LNTuning."
"For example, '.*decoder.*' or '.*encoder.*'. "
"If not specified, modules will be chosen according to the model architecture, If the architecture is "
"not known, an error will be raised -- in this case, you shoud specify the target modules manually."
),
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from LNTuning."},
)
modules_to_save: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "List of modules to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.LN_TUNING
| peft/src/peft/tuners/ln_tuning/config.py/0 | {
"file_path": "peft/src/peft/tuners/ln_tuning/config.py",
"repo_id": "peft",
"token_count": 1153
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference code: https://github.com/iboing/CorDA/blob/main/cordalib/decomposition.py
# Reference paper: https://arxiv.org/abs/2406.05223
import os
from typing import Any, Callable, Iterable, Optional
import torch
import torch.nn as nn
from attr import dataclass
from tqdm import tqdm
from peft.tuners.lora.config import LoraConfig
from peft.tuners.lora.model import LoraModel
from peft.utils.other import get_pattern_key
@dataclass
class CordaEigens:
S_WC: torch.Tensor
U_WC: torch.Tensor
V_WC: torch.Tensor
def target_modules(model: nn.Module, config: LoraConfig) -> Iterable[nn.Module]:
"""
Iterate over CorDA target name and modules of a model. A module is a target if its name is in
`config.target_modules` and is `nn.Linear`.
"""
for name, module in model.named_modules():
if LoraModel._check_target_module_exists(config, name) and isinstance(module, nn.Linear):
yield name, module
def get_model_device(model: nn.Module) -> str:
if hasattr(model, "module"): # Handle DeepSpeed/DataParallel
model = model.module
return next(iter(model.parameters())).device.type
@torch.no_grad()
def preprocess_corda(
model: nn.Module,
lora_config: LoraConfig,
run_model: Optional[Callable[[], None]] = None,
hooked_model: Optional[nn.Module] = None,
):
"""
Build necessary CorDA fields for a model.
For each `M * N` linear layer, a `M * M` covariance matrix will be built temporarily during the preprocessing
process, consuming roughly another `2 * MODEL_SIZE` memory for typical LLMs if model weight is FP16 and covariance
is FP32. If that's too much, consider specifying `use_float16_for_covariance` in `lora_config.corda_config`.
Args:
model (`nn.Module`):
Model to preprocess.
lora_config (`LoraConfig`):
Lora configuration of the model. `lora_config.corda_config` should be set.
run_model (`Optional[Callable[[], None]]`):
Callback to run the model when building covariance. Typically you should run model inference on your sample
dataset in this callback. Experiments have shown that when token count per sample is 2048, hidden dimension
is 4096, collecting 256 distinct samples is enough. If you collect too few or too repetitive samples, the
covariance matrix may be low-ranked and unstabilize preprocessing. You can estimate sample count as
`HIDDEN_DIM / TOKEN_PER_SAMPLE * 128`. `run_model` can be `None` only if covariance file in
`lora_config.corda_config` is already created.
hooked_model (`Optional[nn.Module]`):
Model to hook when building covariance. If none, original model will be hooked. This is only useful when
you want to hook a different model than the one you are training, typically you should leave this `None`.
Upon completion, the following fields are set for each target module:
eigens.S_WC (`torch.Tensor`):
Singular values of the weight matrix.
eigens.U_WC (`torch.Tensor`):
Left singular vectors of the weight matrix.
eigens.V_WC (`torch.Tensor`):
Right singular vectors of the weight matrix, multiplied by inverse of covariance matrix.
"""
cache_file = lora_config.corda_config.cache_file
covariance_file = lora_config.corda_config.covariance_file
corda_method = lora_config.corda_config.corda_method
verbose = lora_config.corda_config.verbose
prune_temporary_fields = lora_config.corda_config.prune_temporary_fields
# If cache exists, skip building
if cache_file is not None and os.path.exists(cache_file) and os.path.getsize(cache_file) > 0:
cache = torch.load(cache_file, map_location=get_model_device(model))
for name, module in target_modules(model, lora_config):
module.eigens = CordaEigens(
S_WC=cache[f"{name}.eigens.S_WC"],
U_WC=cache[f"{name}.eigens.U_WC"],
V_WC=cache[f"{name}.eigens.V_WC"],
)
else:
# Specify CorDA method for each layer
if corda_method is None:
raise ValueError("corda_method is required when cache_file is not provided.")
for name, module in target_modules(model, lora_config):
module.corda_method = corda_method
# Specify CorDA rank for each layer
for name, module in target_modules(model, lora_config):
r_key = get_pattern_key(lora_config.rank_pattern.keys(), name)
module.rank = lora_config.rank_pattern.get(r_key, lora_config.r)
# Calculate covariance matrix
calib_cov_distribution(model, lora_config, run_model, hooked_model, covariance_file)
# Calculate eigens
collect_eigens(model, lora_config, verbose)
# Crop CorDA eigens so that there's less to save
crop_corda_eigens(model, lora_config)
# Remove redundant fields if exist
if prune_temporary_fields:
for name, module in target_modules(model, lora_config):
if hasattr(module, "sample_count"):
del module.sample_count
if hasattr(module, "covariance_matrix"):
del module.covariance_matrix
if hasattr(module, "corda_method"):
del module.corda_method
if hasattr(module, "rank"):
del module.rank
# Save cache to disk
if cache_file is not None:
cache: dict[str, Any] = {}
for name, module in target_modules(model, lora_config):
cache[f"{name}.eigens.S_WC"] = module.eigens.S_WC
cache[f"{name}.eigens.U_WC"] = module.eigens.U_WC
cache[f"{name}.eigens.V_WC"] = module.eigens.V_WC
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
torch.save(cache, cache_file)
@torch.no_grad()
def calib_cov_distribution(
model: nn.Module,
config: LoraConfig,
run_model: Optional[Callable[[], None]],
hooked_model: Optional[nn.Module],
covariance_file: Optional[str],
):
if covariance_file is not None and os.path.exists(covariance_file) and os.path.getsize(covariance_file) > 0:
all_covariance_matrix = torch.load(covariance_file, map_location=get_model_device(model))
for name, module in target_modules(model, config):
module.covariance_matrix = all_covariance_matrix[name]
return
if run_model is None:
raise ValueError("run_model must be specified when covariance file and cache file aren't built.")
if hooked_model is None:
hooked_model = model
hooked_model.eval()
def hook(module, input, output):
input = input[0].detach().squeeze(0).data ## (context_length = 2048, dim)
if not config.corda_config.use_float16_for_covariance:
input = input.float()
input = input / torch.max(input).abs()
# check if input is valid
if torch.isnan(input).any() or torch.isinf(input).any():
raise ValueError("Invalid value found in input, please check your input data.")
# calculate covariance and check if it's valid
covariance = input.t().matmul(input)
if torch.isnan(covariance).any() or torch.isinf(covariance).any():
raise ValueError(
"Invalid value found in covariance. Please file an issue at https://github.com/huggingface/peft/issues."
)
# add to module
module.sample_count += 1
module.covariance_matrix += covariance
# free memory
del covariance, input
handles = []
for name, module in target_modules(hooked_model, config):
module.sample_count = 0
module.covariance_matrix = 0
handles.append(module.register_forward_hook(hook))
run_model()
# Clear the hooks
for handle in handles:
handle.remove()
# In some edge cases you might need to hook a model different from the model to add adapters,
# this case you would specify `hooked_model` and set it to a different model from `model`.
if hooked_model is not model:
targets = {}
for name, module in target_modules(model, config):
targets[name] = module
for name, module in target_modules(hooked_model, config):
# There can be modules used only in inference, but not training
# Exclude modules not in target model to prevent KeyError in this case
if name in targets:
targets[name].sample_count = module.sample_count
targets[name].covariance_matrix = module.covariance_matrix
# Divide by sample count
for name, module in target_modules(model, config):
module.covariance_matrix /= module.sample_count
# Save covariance to disk
if covariance_file is not None:
all_covariance_matrix = {}
for name, module in target_modules(model, config):
all_covariance_matrix[name] = module.covariance_matrix
os.makedirs(os.path.dirname(covariance_file), exist_ok=True)
torch.save(all_covariance_matrix, covariance_file)
@torch.no_grad()
def collect_eigens(
model: nn.Module,
config: LoraConfig,
verbose: bool,
):
"""Call collect_eigens_for_layer and store result in key `eigens` of each layer."""
linear_modules = []
for name, module in target_modules(model, config):
linear_modules.append((name, module))
if verbose:
linear_modules = tqdm(linear_modules, desc="Collecting eigens")
for name, module in linear_modules:
module.eigens = collect_eigens_for_layer(module, config)
@torch.no_grad()
def collect_eigens_for_layer(
linear: nn.Linear,
config: LoraConfig,
) -> CordaEigens:
w = linear.weight.data.float()
out_dim = w.size(0)
in_dim = w.size(1)
min_dim = min(in_dim, out_dim)
if not hasattr(linear, "covariance_matrix"):
raise ValueError(
"Covariance matrix not found in linear module. Please do not call this function directly, "
"instead call `preprocess_corda`. If your usage is correct but this error still encounters, "
"please file an issue at https://github.com/huggingface/peft/issues."
)
covariance_matrix = linear.covariance_matrix.float()
damp = 0.01
while True:
compensate = torch.diag(
torch.ones(covariance_matrix.size(0)).to(covariance_matrix.device)
* torch.mean(torch.diag(covariance_matrix))
* damp
)
fix_covariance_matrix = covariance_matrix + compensate
cov_inv = torch.linalg.inv(fix_covariance_matrix)
inv_error = torch.dist(
fix_covariance_matrix @ cov_inv, torch.eye(covariance_matrix.size(0)).to(get_model_device(linear))
).item()
if inv_error < 0.05:
break
else:
damp = damp * 2
w = w @ fix_covariance_matrix ## w: out_dim, in_dim; covariance_matrix: in_dim, in_dim
U, S, Vh = torch.linalg.svd(w, full_matrices=False)
V = (Vh @ cov_inv).transpose(0, 1)
# Sanity check, temporarily U and V are large, they will be crop after rank search
r = min_dim
if U.size(0) != out_dim or U.size(1) != r:
raise ValueError(
f"Matrix U size mismatch: {U.size()} vs. ({out_dim}, {r}), "
"please file an issue at https://github.com/huggingface/peft/issues."
)
if S.size(0) != r:
raise ValueError(
f"Matrix S size mismatch: {S.size()} vs. ({r},), "
"please file an issue at https://github.com/huggingface/peft/issues."
)
if V.size(0) != in_dim or V.size(1) != r:
raise ValueError(
f"Matrix V size mismatch: {V.size()} vs. ({in_dim}, {r}), "
"please file an issue at https://github.com/huggingface/peft/issues."
)
# Offload U and V to CPU, they consume too much memory
U = U.cpu()
V = V.cpu()
return CordaEigens(
S_WC=S,
U_WC=U,
V_WC=V,
)
@torch.no_grad()
def crop_corda_eigens(model: nn.Module, config: LoraConfig):
for name, module in target_modules(model, config):
# We don't expect saving sliced tensor writes the whole tensor to disk,
# so it's necessary to copy the tensors.
# Reference: https://github.com/pytorch/pytorch/issues/40157
if module.corda_method == "ipm":
module.eigens.S_WC = module.eigens.S_WC[: module.rank].clone()
module.eigens.U_WC = module.eigens.U_WC[:, : module.rank].clone().to(get_model_device(model))
module.eigens.V_WC = module.eigens.V_WC[:, : module.rank].clone().to(get_model_device(model))
elif module.corda_method == "kpm":
module.eigens.S_WC = module.eigens.S_WC[-module.rank :].clone()
module.eigens.U_WC = module.eigens.U_WC[:, -module.rank :].clone().to(get_model_device(model))
module.eigens.V_WC = module.eigens.V_WC[:, -module.rank :].clone().to(get_model_device(model))
else:
raise ValueError(f"Invalid corda_method found: {module.corda_method}, it should be 'ipm' or 'kpm'.")
# Sanity check
if module.eigens.S_WC.size(0) != module.rank:
raise ValueError(
f"rank mismatch: {module.eigens.S_WC.size(0)} vs. {module.rank},"
"please file an issue at https://github.com/huggingface/peft/issues."
)
if module.eigens.U_WC.size(0) != module.weight.size(0):
raise ValueError(
f"U size mismatch: {module.eigens.U_WC.size(0)} vs. {module.weight.size(0)},"
"please file an issue at https://github.com/huggingface/peft/issues."
)
if module.eigens.U_WC.size(1) != module.rank:
raise ValueError(
f"U size mismatch: {module.eigens.U_WC.size(1)} vs. {module.rank},"
"please file an issue at https://github.com/huggingface/peft/issues."
)
if module.eigens.V_WC.size(0) != module.weight.size(1):
raise ValueError(
f"V size mismatch: {module.eigens.V_WC.size(0)} vs. {module.weight.size(1)},"
"please file an issue at https://github.com/huggingface/peft/issues."
)
if module.eigens.V_WC.size(1) != module.rank:
raise ValueError(
f"V size mismatch: {module.eigens.V_WC.size(1)} vs. {module.rank},"
"please file an issue at https://github.com/huggingface/peft/issues."
)
| peft/src/peft/tuners/lora/corda.py/0 | {
"file_path": "peft/src/peft/tuners/lora/corda.py",
"repo_id": "peft",
"token_count": 6527
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PromptLearningConfig
from peft.utils import PeftType
class PromptTuningInit(str, enum.Enum):
TEXT = "TEXT"
RANDOM = "RANDOM"
@dataclass
class PromptTuningConfig(PromptLearningConfig):
"""
This is the configuration class to store the configuration of a [`PromptEmbedding`].
Args:
prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding.
prompt_tuning_init_text (`str`, *optional*):
The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`.
tokenizer_name_or_path (`str`, *optional*):
The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`.
tokenizer_kwargs (`dict`, *optional*):
The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if `prompt_tuning_init` is
`TEXT`.
"""
prompt_tuning_init: Union[PromptTuningInit, str] = field(
default=PromptTuningInit.RANDOM,
metadata={"help": "How to initialize the prompt tuning parameters"},
)
prompt_tuning_init_text: Optional[str] = field(
default=None,
metadata={
"help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
},
)
tokenizer_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
},
)
tokenizer_kwargs: Optional[dict] = field(
default=None,
metadata={
"help": (
"The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if prompt_tuning_init is "
"`TEXT`"
),
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.PROMPT_TUNING
if (self.prompt_tuning_init == PromptTuningInit.TEXT) and not self.tokenizer_name_or_path:
raise ValueError(
f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', "
f"tokenizer_name_or_path can't be {self.tokenizer_name_or_path}."
)
if (self.prompt_tuning_init == PromptTuningInit.TEXT) and self.prompt_tuning_init_text is None:
raise ValueError(
f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', "
f"prompt_tuning_init_text can't be {self.prompt_tuning_init_text}."
)
if self.tokenizer_kwargs and (self.prompt_tuning_init != PromptTuningInit.TEXT):
raise ValueError(
f"tokenizer_kwargs only valid when using prompt_tuning_init='{PromptTuningInit.TEXT.value}'."
)
| peft/src/peft/tuners/prompt_tuning/config.py/0 | {
"file_path": "peft/src/peft/tuners/prompt_tuning/config.py",
"repo_id": "peft",
"token_count": 1410
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
from contextlib import contextmanager
from functools import partial
from typing import Optional, Union
import torch
import torch.nn as nn
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.lora.model import LoraModel
from peft.tuners.tuners_utils import BaseTuner
from peft.utils.constants import DUMMY_TARGET_MODULES
from peft.utils.save_and_load import set_peft_model_state_dict
from .. import lora
from .classifier import XLoraClassifier
from .config import XLoraConfig
from .layer import XLoraConv2dLayer, XLoraEmbeddingLayer, XLoraLinearLayer
def convert_layers_to_xlora(
base: nn.Module, # PeftModel
xloramodel: nn.Module, # XLoraModel
config: XLoraConfig,
) -> tuple[int, torch.device | None]:
"""
Returns the number of swapped layers.
"""
total_swapped = 0
all_layers = []
device = None
for module in base.modules():
# Check the exact type because classes like OPTLearnedPositionalEmbedding inherit from nn.Embedding
if isinstance(module, lora.Linear):
device = module.lora_A[next(iter(module.lora_A))].weight.device
new_layer = XLoraLinearLayer(
model=xloramodel,
target=module,
target_forward=module.forward,
layer_number=total_swapped,
config=config,
)
all_layers.append(new_layer)
module.forward = new_layer.forward # type: ignore[method-assign]
total_swapped += 1
elif isinstance(module, lora.Embedding):
device = module.lora_embedding_A[next(iter(module.lora_embedding_A))].device
new_layer = XLoraEmbeddingLayer(
model=xloramodel,
target=module,
target_forward=module.forward,
layer_number=total_swapped,
config=config,
)
all_layers.append(new_layer)
module.forward = new_layer.forward # type: ignore[method-assign]
total_swapped += 1
elif isinstance(module, lora.Conv2d):
device = module.lora_A[next(iter(module.lora_A))].weight.device
new_layer = XLoraConv2dLayer(
model=xloramodel,
target=module,
target_forward=module.forward,
layer_number=total_swapped,
config=config,
)
all_layers.append(new_layer)
module.forward = new_layer.forward # type: ignore[method-assign]
total_swapped += 1
return (total_swapped, device)
def _load_adapter_into_lora_model(
lora_model: LoraModel,
adapter_name: str,
model_id: str,
torch_device: Optional[str] = None,
ephemeral_gpu_offload: bool = False,
autocast_adapter_dtype: bool = True,
subfolder: Optional[str] = None,
**kwargs,
):
"""
This method emulates the behavior of `PeftModel.from_pretrained`. Updates to `PeftModel.from_pretrained` may need
to be reflected here.
All params pertain to the adapter (adapter name, model id, `i` is the adapter number in 0 indexing).
"""
from peft.peft_model import PeftModel
from peft.tuners.lora.config import LoraConfig
from peft.utils.other import infer_device
from peft.utils.save_and_load import load_peft_weights
hf_hub_download_kwargs, kwargs = PeftModel._split_kwargs(kwargs)
if torch_device is None:
torch_device = infer_device()
if adapter_name not in lora_model.peft_config:
# load the config
lora_peft_config = LoraConfig.from_pretrained(
model_id,
ephemeral_gpu_offload=ephemeral_gpu_offload,
subfolder=subfolder,
**hf_hub_download_kwargs,
)
lora_peft_config.inference_mode = False
lora_model.peft_config[adapter_name] = lora_peft_config
lora_model.inject_adapter(lora_model.model, adapter_name)
adapter_weights = load_peft_weights(model_id, device=torch_device, subfolder=subfolder, **hf_hub_download_kwargs)
new_adapter_weights = {}
# Rework the keys to contain the adapter numbers
for old_key in adapter_weights.keys():
key: str = old_key
# Remove all the prefixes until we have model.<...>
while not (key.startswith("model.") and not key.startswith("model.model.")):
key = key[key.find(".") + 1 :]
# We always want model.model
key = "model." + key
new_adapter_weights[key] = adapter_weights[old_key]
# load the weights into the model
ignore_mismatched_sizes = kwargs.get("ignore_mismatched_sizes", False)
load_result = set_peft_model_state_dict(
lora_model,
new_adapter_weights,
adapter_name=adapter_name,
ignore_mismatched_sizes=ignore_mismatched_sizes,
)
if len(load_result.unexpected_keys) > 0:
raise ValueError(
f"Got unexpected keys! Please raise an issue and tag @EricLBuehler.\n\nunexpected_keys={load_result.unexpected_keys}"
)
if hasattr(lora_model, "_cast_adapter_dtype"):
lora_model._cast_adapter_dtype(adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
class XLoraModel(BaseTuner):
"""
Creates an X-LoRA (Mixture of LoRA experts), model from a pretrained transformers model. Currently, this X-LoRA
implementation only works with models with a transformer architecture.
The method is described in detail in https://arxiv.org/abs/2402.07148.
Args:
model ([`torch.nn.Module`]): The model to be adapted.
config ([`XLoraConfig`]): The configuration of the Lora model.
adapter_name (`str`): The name of the adapter, does not affect the LoRA adapter names.
Returns:
`torch.nn.Module`: The X-LoRA model.
Example:
```py
>>> from transformers import AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
>>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
>>> model_config = AutoConfig.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
>>> config = XLoraConfig(
... task_type="CAUSAL_LM",
... hidden_size=model_config.hidden_size,
... xlora_depth=4,
... adapters={
... "adapter_1": "./path/to/the/checkpoint/",
... "adapter_2": "./path/to/the/checkpoint/",
... "adapter_n": "./path/to/the/checkpoint/",
... },
... )
>>> int8_config = BitsAndBytesConfig(load_in_8bit=True)
>>> model = AutoModelForCausalLM.from_pretrained(
... "mistralai/Mistral-7B-Instruct-v0.1",
... trust_remote_code=True,
... attn_implementation="flash_attention_2",
... device_map="cuda:0",
... torch_dtype=torch.bfloat16,
... quantization_config=int8_config,
... )
>>> model = prepare_model_for_kbit_training(4)
>>> xlora_model = get_peft_model(model, config)
```
"""
def __init__(
self,
model: nn.Module,
config: Union[dict[str, XLoraConfig], XLoraConfig],
adapter_name: str,
torch_device: Optional[str] = None,
ephemeral_gpu_offload: bool = False,
autocast_adapter_dtype: bool = True,
**kwargs,
) -> None:
"""
Create a new X-LoRA model
Args:
model (`nn.Module`):
Base model to apply X-LoRA to.
config: ([`XLoraConfig`]):
X-LoRA configuration object.
adapter_name: (`str`):
Adapter name for the X-LoRA adapter.
torch_device (`str`, *optional*, defaults to None):
(For loading the LoRA adapters) The device to load the adapter on. If `None`, the device will be
inferred.
ephemeral_gpu_offload (`bool`, *optional*, defaults to `False`):
(For loading the LoRA adapters) Whether to use ephemeral GPU offloading for partially loaded modules.
Defaults to `False`.
autocast_adapter_dtype (`bool`, *optional*, defaults to `True`):
(For loading the LoRA adapters) Whether to autocast the adapter dtype. Defaults to `True`. Right now,
this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically
required for stable training, and only affect select PEFT tuners.
kwargs: (`optional`):
(For loading the LoRA adapters) Additional arguments to modify the way the adapter is loaded, e.g. the
token for Hugging Face Hub.
"""
nn.Module.__init__(self)
if isinstance(config, dict):
conf = config[adapter_name]
else:
conf = config
# Create an empty LoraModel
base_lora_config = copy.copy(conf)
base_lora_config.target_modules = DUMMY_TARGET_MODULES
# Imitate a LoraConfig, fields might need to be updated if LoraConfig is updated
base_lora_config.layer_replication = None
base_lora_config.bias = "none"
lora_model = LoraModel(model, base_lora_config, adapter_name)
self.xlora_config = conf
self.lora_model = lora_model
peft_config = conf
if hasattr(model.config, "use_cache") and model.config.use_cache:
raise ValueError("`use_cache` must be False")
adapters_items = peft_config.adapters.items()
if hasattr(self.xlora_config, "_subfolders"):
adapters_items = zip(peft_config.adapters.items(), self.xlora_config._subfolders)
else:
adapters_items = peft_config.adapters.items()
if hasattr(self.xlora_config, "_subfolders"):
for i, (_adapter_name, model_id), subfolder in enumerate(adapters_items):
_load_adapter_into_lora_model(
lora_model=self.lora_model,
adapter_name=str(i),
model_id=model_id,
torch_device=torch_device,
ephemeral_gpu_offload=ephemeral_gpu_offload,
autocast_adapter_dtype=autocast_adapter_dtype,
subfolder=subfolder,
**kwargs,
)
else:
for i, (_adapter_name, model_id) in enumerate(adapters_items):
_load_adapter_into_lora_model(
lora_model=self.lora_model,
adapter_name=str(i),
model_id=model_id,
torch_device=torch_device,
ephemeral_gpu_offload=ephemeral_gpu_offload,
autocast_adapter_dtype=autocast_adapter_dtype,
subfolder=None,
**kwargs,
)
self.lora_model.set_adapter(list(peft_config.adapters.keys()))
self._maybe_freeze_all_adapters()
total_swapped, device = convert_layers_to_xlora(
model,
self,
peft_config,
)
n_classes = len(peft_config.adapters)
xlora_classifier = XLoraClassifier(model, peft_config, n_classes, total_swapped, device)
# Setup the model internal state
self.internal_xlora_classifier = xlora_classifier
self.internal_xlora_scalings = None # type: ignore
# Controlled by enable_adapter_layers or disable_adapter_layers
self.disabled = False
def _maybe_freeze_all_adapters(self):
self.eval()
if not self.xlora_config.use_trainable_adapters:
for name, param in self.named_parameters():
if "lora_" in name:
param.requires_grad = False
def generate(self, *args, **kwargs):
res = self.lora_model.generate(*args, **kwargs) # type: ignore
# This is necessary because we use PeftModel.disable_adapter() which reenables the adapters
self._maybe_freeze_all_adapters()
return res
@contextmanager
def _enable_peft_forward_hooks(self, *generate_args, **generate_kwargs):
def scalings_injection_hook(target, args, kwargs, scalings):
# pre-forward hook to inject the adapter_names argument when using mixed adapter batches inference
kwargs["scalings"] = scalings
return args, kwargs
handles_to_remove = None
def pre_forward(module, *args, **kwargs):
nonlocal handles_to_remove
# =========================== Forward pass with "dummy" scalings ==================
args_real = args[0]
kwargs_real = args[1]
kwargs_real.update(kwargs)
dummy_scalings = self.internal_xlora_classifier.make_dummy_scalings(*args_real, **kwargs_real)
hook_handles = []
for module in self.modules():
if isinstance(module, LoraLayer):
pre_forward = partial(scalings_injection_hook, scalings=dummy_scalings)
handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True)
hook_handles.append(handle)
with torch.no_grad():
self.lora_model.disable_adapter_layers()
try:
scaling_pass_kwargs = kwargs_real.copy()
scaling_pass_kwargs["output_hidden_states"] = True
scaling_pass_kwargs["return_dict"] = True
try:
base_output = self.lora_model.model.forward(*args_real, **scaling_pass_kwargs)
finally:
# Clean everything up
for handle in hook_handles:
handle.remove()
finally:
self.lora_model.enable_adapter_layers()
xlora_scalings = self.internal_xlora_classifier(result=base_output, *args_real, **kwargs_real)
# =========================== Real forward pass with calculated scalings ==================
hook_handles = []
for module in self.modules():
if isinstance(module, LoraLayer):
pre_forward = partial(scalings_injection_hook, scalings=xlora_scalings)
handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True)
hook_handles.append(handle)
handles_to_remove = hook_handles
if not self.disabled:
forward_handle = self.lora_model.model.register_forward_pre_hook(pre_forward, with_kwargs=True)
# Run the forward pass: first the scaling pass in the hook, and then with the base model
yield
if not self.disabled:
# TODO(EricLBuehler): If we get a forward exception, we may have multiple forward hooks.
for handle in handles_to_remove:
handle.remove()
forward_handle.remove()
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "lora_model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.lora_model, name)
@staticmethod
def _prepare_adapter_config(peft_config, _model_config):
# Handle X-LoRA case
return peft_config
"""
Does nothing. X-LoRA needs adapters to be frozen.
"""
def _mark_only_adapters_as_trainable(self) -> None: ...
"""
This enables the X-LoRA adapter.
"""
def enable_adapter_layers(self) -> None:
self.disabled = False
"""
This diasables the X-LoRA adapter.
"""
def disable_adapter_layers(self) -> None:
self.disabled = True
def _create_and_replace(
self,
lora_config,
adapter_name,
target,
target_name,
parent,
current_key,
):
# Does nothing because XLoraModel has no target modules
pass
@staticmethod
def _check_target_module_exists(lora_config, key):
# Does nothing because XLoraModel has no target modules
return False
def forward(self, *args, **kwargs):
return self.lora_model.model(*args, **kwargs)
def set_topk_lora(self, value: Optional[int]):
"""
Sparsely select the specified top_k LoRA experts instead of the default dense method. Set to None to use dense.
This is reflected in the config.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
classifier.config.top_k_lora = value
def set_global_scaling_weight(self, weight: float):
"""
Set the global LoRA weight, a scalar to multiply the output of each LoRA adapter by. This is by default 1. This
is reflected in the config.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
classifier.config.global_scaling_weight = weight
def set_scaling_pass_value(self, value: float | None):
"""
Set the scaling pass value, the value to set the scalings to during the scaling pass. If the value is None, the
scaling pass value will be 1/n where n is the number of adapters.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
classifier._set_override_scaling_pass_value(value)
def get_global_scaling_weight(self) -> float:
"""
Get the global LoRA weight.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
return classifier.config.global_scaling_weight
def get_latest_scalings(self) -> Optional[torch.Tensor]:
"""
Returns the latest scalings prediction, or None if no scalings have been predicted. The tensor is of shape
(batch_size, seq_len, n_layers, n_classes).
"""
return self.internal_xlora_scalings
def get_scalings_log(self) -> list[torch.Tensor]:
"""
Returns a shallow (only copying the list itself not the tensors) copy of the list containing the scalings log.
Editing the list does not change the underlying log. The tensors are of shape (batch_size, seq_len, n_layers,
n_classes). The seq_len dim may vary with input dimension.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
return classifier.log_scalings.copy()
def enable_scalings_logging(self):
"""
Enable scalings logging.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
classifier.scalings_logging = True
def disable_scalings_logging(self):
"""
Disable scalings logging, without clearing the log.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
classifier.scalings_logging = False
def clear_scalings_log(self):
"""
Clear the scalings log.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
classifier.log_scalings.clear()
def get_bucketed_scalings_log(self) -> dict[int, tuple[list[int], list[torch.Tensor]]]:
"""
Returns bucketed scalings, bucketed by seq_len. Each value consists of the positions (the first) and the
associated tensors. The positions are paired with the associated tensors and give the position in the scaling
log.
"""
classifier: XLoraClassifier = self.internal_xlora_classifier # type: ignore
return classifier._get_bucketed_scalings()
| peft/src/peft/tuners/xlora/model.py/0 | {
"file_path": "peft/src/peft/tuners/xlora/model.py",
"repo_id": "peft",
"token_count": 9075
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import torch
from peft import (
AutoPeftModel,
AutoPeftModelForCausalLM,
AutoPeftModelForFeatureExtraction,
AutoPeftModelForQuestionAnswering,
AutoPeftModelForSeq2SeqLM,
AutoPeftModelForSequenceClassification,
AutoPeftModelForTokenClassification,
PeftModel,
PeftModelForCausalLM,
PeftModelForFeatureExtraction,
PeftModelForQuestionAnswering,
PeftModelForSeq2SeqLM,
PeftModelForSequenceClassification,
PeftModelForTokenClassification,
)
from peft.utils import infer_device
class PeftAutoModelTester(unittest.TestCase):
dtype = torch.float16 if infer_device() == "mps" else torch.bfloat16
def test_peft_causal_lm(self):
model_id = "peft-internal-testing/tiny-OPTForCausalLM-lora"
model = AutoPeftModelForCausalLM.from_pretrained(model_id)
assert isinstance(model, PeftModelForCausalLM)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model = AutoPeftModelForCausalLM.from_pretrained(tmp_dirname)
assert isinstance(model, PeftModelForCausalLM)
# check if kwargs are passed correctly
model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=self.dtype)
assert isinstance(model, PeftModelForCausalLM)
assert model.base_model.lm_head.weight.dtype == self.dtype
adapter_name = "default"
is_trainable = False
# This should work
_ = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=self.dtype)
def test_peft_causal_lm_extended_vocab(self):
model_id = "peft-internal-testing/tiny-random-OPTForCausalLM-extended-vocab"
model = AutoPeftModelForCausalLM.from_pretrained(model_id)
assert isinstance(model, PeftModelForCausalLM)
# check if kwargs are passed correctly
model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=self.dtype)
assert isinstance(model, PeftModelForCausalLM)
assert model.base_model.lm_head.weight.dtype == self.dtype
adapter_name = "default"
is_trainable = False
# This should work
_ = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=self.dtype)
def test_peft_seq2seq_lm(self):
model_id = "peft-internal-testing/tiny_T5ForSeq2SeqLM-lora"
model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id)
assert isinstance(model, PeftModelForSeq2SeqLM)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model = AutoPeftModelForSeq2SeqLM.from_pretrained(tmp_dirname)
assert isinstance(model, PeftModelForSeq2SeqLM)
# check if kwargs are passed correctly
model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=self.dtype)
assert isinstance(model, PeftModelForSeq2SeqLM)
assert model.base_model.lm_head.weight.dtype == self.dtype
adapter_name = "default"
is_trainable = False
# This should work
_ = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=self.dtype)
def test_peft_sequence_cls(self):
model_id = "peft-internal-testing/tiny_OPTForSequenceClassification-lora"
model = AutoPeftModelForSequenceClassification.from_pretrained(model_id)
assert isinstance(model, PeftModelForSequenceClassification)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model = AutoPeftModelForSequenceClassification.from_pretrained(tmp_dirname)
assert isinstance(model, PeftModelForSequenceClassification)
# check if kwargs are passed correctly
model = AutoPeftModelForSequenceClassification.from_pretrained(model_id, torch_dtype=self.dtype)
assert isinstance(model, PeftModelForSequenceClassification)
assert model.score.original_module.weight.dtype == self.dtype
adapter_name = "default"
is_trainable = False
# This should work
_ = AutoPeftModelForSequenceClassification.from_pretrained(
model_id, adapter_name, is_trainable, torch_dtype=self.dtype
)
def test_peft_token_classification(self):
model_id = "peft-internal-testing/tiny_GPT2ForTokenClassification-lora"
model = AutoPeftModelForTokenClassification.from_pretrained(model_id)
assert isinstance(model, PeftModelForTokenClassification)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model = AutoPeftModelForTokenClassification.from_pretrained(tmp_dirname)
assert isinstance(model, PeftModelForTokenClassification)
# check if kwargs are passed correctly
model = AutoPeftModelForTokenClassification.from_pretrained(model_id, torch_dtype=self.dtype)
assert isinstance(model, PeftModelForTokenClassification)
assert model.base_model.classifier.original_module.weight.dtype == self.dtype
adapter_name = "default"
is_trainable = False
# This should work
_ = AutoPeftModelForTokenClassification.from_pretrained(
model_id, adapter_name, is_trainable, torch_dtype=self.dtype
)
def test_peft_question_answering(self):
model_id = "peft-internal-testing/tiny_OPTForQuestionAnswering-lora"
model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id)
assert isinstance(model, PeftModelForQuestionAnswering)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model = AutoPeftModelForQuestionAnswering.from_pretrained(tmp_dirname)
assert isinstance(model, PeftModelForQuestionAnswering)
# check if kwargs are passed correctly
model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id, torch_dtype=self.dtype)
assert isinstance(model, PeftModelForQuestionAnswering)
assert model.base_model.qa_outputs.original_module.weight.dtype == self.dtype
adapter_name = "default"
is_trainable = False
# This should work
_ = AutoPeftModelForQuestionAnswering.from_pretrained(
model_id, adapter_name, is_trainable, torch_dtype=self.dtype
)
def test_peft_feature_extraction(self):
model_id = "peft-internal-testing/tiny_OPTForFeatureExtraction-lora"
model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id)
assert isinstance(model, PeftModelForFeatureExtraction)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model = AutoPeftModelForFeatureExtraction.from_pretrained(tmp_dirname)
assert isinstance(model, PeftModelForFeatureExtraction)
# check if kwargs are passed correctly
model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id, torch_dtype=self.dtype)
assert isinstance(model, PeftModelForFeatureExtraction)
assert model.base_model.model.decoder.embed_tokens.weight.dtype == self.dtype
adapter_name = "default"
is_trainable = False
# This should work
_ = AutoPeftModelForFeatureExtraction.from_pretrained(
model_id, adapter_name, is_trainable, torch_dtype=self.dtype
)
def test_peft_whisper(self):
model_id = "peft-internal-testing/tiny_WhisperForConditionalGeneration-lora"
model = AutoPeftModel.from_pretrained(model_id)
assert isinstance(model, PeftModel)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model = AutoPeftModel.from_pretrained(tmp_dirname)
assert isinstance(model, PeftModel)
# check if kwargs are passed correctly
model = AutoPeftModel.from_pretrained(model_id, torch_dtype=self.dtype)
assert isinstance(model, PeftModel)
assert model.base_model.model.model.encoder.embed_positions.weight.dtype == self.dtype
adapter_name = "default"
is_trainable = False
# This should work
_ = AutoPeftModel.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=self.dtype)
| peft/tests/test_auto.py/0 | {
"file_path": "peft/tests/test_auto.py",
"repo_id": "peft",
"token_count": 3615
} |
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import os
import unittest
import torch
import torch.nn.init as init
from peft import LoraConfig, PeftModel, get_peft_model, get_peft_model_state_dict
from .testing_utils import require_torch_gpu
def is_megatron_available() -> bool:
return importlib.util.find_spec("megatron") is not None
if is_megatron_available():
from megatron.core import parallel_state, tensor_parallel
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
world_size = 1
rank = 0
def initialize_distributed():
print(f"Initializing torch.distributed with rank: {rank}, world_size: {world_size}")
torch.cuda.set_device(0)
init_method = "tcp://"
master_ip = os.getenv("MASTER_ADDR", "localhost")
master_port = os.getenv("MASTER_PORT", "6001")
init_method += master_ip + ":" + master_port
torch.distributed.init_process_group(backend="nccl", world_size=world_size, rank=rank, init_method=init_method)
def destroy_model_parallel():
parallel_state.destroy_model_parallel()
torch.distributed.barrier()
def initialize_model_parallel(
tensor_model_parallel_size=1,
pipeline_model_parallel_size=1,
virtual_pipeline_model_parallel_size=None,
pipeline_model_parallel_split_rank=None,
):
parallel_state.destroy_model_parallel()
if not torch.distributed.is_initialized():
initialize_distributed()
parallel_state.initialize_model_parallel(
tensor_model_parallel_size,
pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size,
pipeline_model_parallel_split_rank,
)
class DummyModule(MegatronModule):
def __init__(self, config: TransformerConfig):
super().__init__(config)
self.linear = tensor_parallel.ColumnParallelLinear(
input_size=10,
output_size=10,
config=config,
init_method=init.xavier_normal_,
bias=False,
gather_output=False,
)
self.lm_head = tensor_parallel.RowParallelLinear(
input_size=10,
output_size=10,
config=config,
init_method=init.xavier_normal_,
bias=False,
input_is_parallel=True,
skip_bias_add=True,
)
def forward(self, input):
x = self.linear(input)[0]
x = self.lm_head(x)[0]
return x
@require_torch_gpu
class TestMegatronLora(unittest.TestCase):
def setUp(self):
initialize_model_parallel(1, 1)
model_parallel_cuda_manual_seed(123)
transformer_config = {
"num_layers": 2,
"hidden_size": 12,
"num_attention_heads": 4,
"use_cpu_initialization": True,
}
config = TransformerConfig(**transformer_config)
self.megatron_module = DummyModule(config=config).cuda()
self.dummy_module = copy.deepcopy(self.megatron_module).cuda()
lora_config = LoraConfig(
lora_alpha=16,
lora_dropout=0.1,
r=64,
bias="none",
target_modules=["linear", "lm_head"],
megatron_config=config,
megatron_core="megatron.core",
)
self.megatron_module = get_peft_model(self.megatron_module, lora_config)
def tearDown(self):
destroy_model_parallel()
def test_megatron_lora_module(self):
megatron_module = self.megatron_module
assert isinstance(megatron_module, PeftModel)
for name, module in megatron_module.named_modules():
if name.endswith("linear"):
assert hasattr(module, "lora_A")
assert hasattr(module, "lora_B")
if name.endswith("linear.lora_A.default"):
assert isinstance(module, torch.nn.Linear)
if name.endswith("linear.lora_B.default"):
assert isinstance(module, tensor_parallel.ColumnParallelLinear)
if name.endswith("lm_head.lora_A.default"):
assert isinstance(module, tensor_parallel.RowParallelLinear)
if name.endswith("lm_head.lora_B.default"):
assert isinstance(module, torch.nn.Linear)
def test_forward(self):
x = torch.ones((2, 4, 10)).cuda()
megatron_module_result = self.megatron_module(x)
dummt_module_result = self.dummy_module(x)
# Because lora_B is initialized with 0, the forward results of two models should be equal before backward.
assert megatron_module_result.equal(dummt_module_result)
def test_backward(self):
optimizer = torch.optim.AdamW(self.megatron_module.parameters())
loss_fn = torch.nn.CrossEntropyLoss()
x = torch.randn(2, 4, 10, requires_grad=True).cuda()
label = torch.randint(10, (2 * 4,)).cuda()
output = self.megatron_module(x)
output = output.reshape(2 * 4, 10)
loss = loss_fn(output, label)
loss.backward()
optimizer.step()
def test_get_peft_model_state_dict(self):
peft_state_dict = get_peft_model_state_dict(self.megatron_module)
for key in peft_state_dict.keys():
assert "lora" in key
| peft/tests/test_lora_megatron.py/0 | {
"file_path": "peft/tests/test_lora_megatron.py",
"repo_id": "peft",
"token_count": 2989
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from contextlib import contextmanager
import numpy as np
import pytest
import torch
from accelerate.test_utils.testing import get_backend
from peft.import_utils import (
is_aqlm_available,
is_auto_awq_available,
is_auto_gptq_available,
is_eetq_available,
is_gptqmodel_available,
is_hqq_available,
is_optimum_available,
is_torchao_available,
)
torch_device, device_count, memory_allocated_func = get_backend()
def require_non_cpu(test_case):
"""
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
hardware accelerator available.
"""
return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)
def require_non_xpu(test_case):
"""
Decorator marking a test that should be skipped for XPU.
"""
return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case)
def require_torch_gpu(test_case):
"""
Decorator marking a test that requires a GPU. Will be skipped when no GPU is available.
"""
if not torch.cuda.is_available():
return unittest.skip("test requires GPU")(test_case)
else:
return test_case
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires multiple GPUs. Will be skipped when less than 2 GPUs are available.
"""
if not torch.cuda.is_available() or torch.cuda.device_count() < 2:
return unittest.skip("test requires multiple GPUs")(test_case)
else:
return test_case
def require_multi_accelerator(test_case):
"""
Decorator marking a test that requires multiple hardware accelerators. These tests are skipped on a machine without
multiple accelerators.
"""
return unittest.skipUnless(
torch_device != "cpu" and device_count > 1, "test requires multiple hardware accelerators"
)(test_case)
def require_bitsandbytes(test_case):
"""
Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library is not installed.
"""
try:
import bitsandbytes # noqa: F401
test_case = pytest.mark.bitsandbytes(test_case)
except ImportError:
test_case = pytest.mark.skip(reason="test requires bitsandbytes")(test_case)
return test_case
def require_auto_gptq(test_case):
"""
Decorator marking a test that requires auto-gptq. These tests are skipped when auto-gptq isn't installed.
"""
return unittest.skipUnless(is_gptqmodel_available() or is_auto_gptq_available(), "test requires auto-gptq")(
test_case
)
def require_gptqmodel(test_case):
"""
Decorator marking a test that requires gptqmodel. These tests are skipped when gptqmodel isn't installed.
"""
return unittest.skipUnless(is_gptqmodel_available(), "test requires gptqmodel")(test_case)
def require_aqlm(test_case):
"""
Decorator marking a test that requires aqlm. These tests are skipped when aqlm isn't installed.
"""
return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
def require_hqq(test_case):
"""
Decorator marking a test that requires aqlm. These tests are skipped when aqlm isn't installed.
"""
return unittest.skipUnless(is_hqq_available(), "test requires hqq")(test_case)
def require_auto_awq(test_case):
"""
Decorator marking a test that requires auto-awq. These tests are skipped when auto-awq isn't installed.
"""
return unittest.skipUnless(is_auto_awq_available(), "test requires auto-awq")(test_case)
def require_eetq(test_case):
"""
Decorator marking a test that requires eetq. These tests are skipped when eetq isn't installed.
"""
return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
def require_optimum(test_case):
"""
Decorator marking a test that requires optimum. These tests are skipped when optimum isn't installed.
"""
return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
def require_torchao(test_case):
"""
Decorator marking a test that requires torchao. These tests are skipped when torchao isn't installed.
"""
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
@contextmanager
def temp_seed(seed: int):
"""Temporarily set the random seed. This works for python numpy, pytorch."""
np_state = np.random.get_state()
np.random.seed(seed)
torch_state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch_cuda_states = torch.cuda.get_rng_state_all()
torch.cuda.manual_seed_all(seed)
try:
yield
finally:
np.random.set_state(np_state)
torch.random.set_rng_state(torch_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state_all(torch_cuda_states)
def get_state_dict(model, unwrap_compiled=True):
"""
Get the state dict of a model. If the model is compiled, unwrap it first.
"""
if unwrap_compiled:
model = getattr(model, "_orig_mod", model)
return model.state_dict()
| peft/tests/testing_utils.py/0 | {
"file_path": "peft/tests/testing_utils.py",
"repo_id": "peft",
"token_count": 2055
} |
#!/usr/bin/env python3
""" Bulk Model Script Runner
Run validation or benchmark script in separate process for each model
Benchmark all 'vit*' models:
python bulk_runner.py --model-list 'vit*' --results-file vit_bench.csv benchmark.py --amp -b 512
Validate all models:
python bulk_runner.py --model-list all --results-file val.csv --pretrained validate.py --data-dir /imagenet/validation/ --amp -b 512 --retry
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import os
import sys
import csv
import json
import subprocess
import time
from typing import Callable, List, Tuple, Union
from timm.models import is_model, list_models, get_pretrained_cfg, get_arch_pretrained_cfgs
parser = argparse.ArgumentParser(description='Per-model process launcher')
# model and results args
parser.add_argument(
'--model-list', metavar='NAME', default='',
help='txt file based list of model names to benchmark')
parser.add_argument(
'--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument(
'--sort-key', default='', type=str, metavar='COL',
help='Specify sort key for results csv')
parser.add_argument(
"--pretrained", action='store_true',
help="only run models with pretrained weights")
parser.add_argument(
"--delay",
type=float,
default=0,
help="Interval, in seconds, to delay between model invocations.",
)
parser.add_argument(
"--start_method", type=str, default="spawn", choices=["spawn", "fork", "forkserver"],
help="Multiprocessing start method to use when creating workers.",
)
parser.add_argument(
"--no_python",
help="Skip prepending the script with 'python' - just execute it directly. Useful "
"when the script is not a Python script.",
)
parser.add_argument(
"-m",
"--module",
help="Change each process to interpret the launch script as a Python module, executing "
"with the same behavior as 'python -m'.",
)
# positional
parser.add_argument(
"script", type=str,
help="Full path to the program/script to be launched for each model config.",
)
parser.add_argument("script_args", nargs=argparse.REMAINDER)
def cmd_from_args(args) -> Tuple[Union[Callable, str], List[str]]:
# If ``args`` not passed, defaults to ``sys.argv[:1]``
with_python = not args.no_python
cmd: Union[Callable, str]
cmd_args = []
if with_python:
cmd = os.getenv("PYTHON_EXEC", sys.executable)
cmd_args.append("-u")
if args.module:
cmd_args.append("-m")
cmd_args.append(args.script)
else:
if args.module:
raise ValueError(
"Don't use both the '--no_python' flag"
" and the '--module' flag at the same time."
)
cmd = args.script
cmd_args.extend(args.script_args)
return cmd, cmd_args
def _get_model_cfgs(
model_names,
num_classes=None,
expand_train_test=False,
include_crop=True,
expand_arch=False,
):
model_cfgs = set()
for name in model_names:
if expand_arch:
pt_cfgs = get_arch_pretrained_cfgs(name).values()
else:
pt_cfg = get_pretrained_cfg(name)
pt_cfgs = [pt_cfg] if pt_cfg is not None else []
for cfg in pt_cfgs:
if cfg.input_size is None:
continue
if num_classes is not None and getattr(cfg, 'num_classes', 0) != num_classes:
continue
# Add main configuration
size = cfg.input_size[-1]
if include_crop:
model_cfgs.add((name, size, cfg.crop_pct))
else:
model_cfgs.add((name, size))
# Add test configuration if required
if expand_train_test and cfg.test_input_size is not None:
test_size = cfg.test_input_size[-1]
if include_crop:
test_crop = cfg.test_crop_pct or cfg.crop_pct
model_cfgs.add((name, test_size, test_crop))
else:
model_cfgs.add((name, test_size))
# Format the output
if include_crop:
return [(n, {'img-size': r, 'crop-pct': cp}) for n, r, cp in sorted(model_cfgs)]
else:
return [(n, {'img-size': r}) for n, r in sorted(model_cfgs)]
def main():
args = parser.parse_args()
cmd, cmd_args = cmd_from_args(args)
model_cfgs = []
if args.model_list == 'all':
model_names = list_models(
pretrained=args.pretrained, # only include models w/ pretrained checkpoints if set
)
model_cfgs = [(n, None) for n in model_names]
elif args.model_list == 'all_in1k':
model_names = list_models(pretrained=True)
model_cfgs = _get_model_cfgs(model_names, num_classes=1000, expand_train_test=True)
elif args.model_list == 'all_res':
model_names = list_models()
model_cfgs = _get_model_cfgs(model_names, expand_train_test=True, include_crop=False, expand_arch=True)
elif not is_model(args.model_list):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model_list)
model_cfgs = [(n, None) for n in model_names]
if not model_cfgs and os.path.exists(args.model_list):
with open(args.model_list) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = _get_model_cfgs(
model_names,
#num_classes=1000,
expand_train_test=True,
#include_crop=False,
)
if len(model_cfgs):
results_file = args.results_file or './results.csv'
results = []
errors = []
model_strings = '\n'.join([f'{x[0]}, {x[1]}' for x in model_cfgs])
print(f"Running script on these models:\n {model_strings}")
if not args.sort_key:
if 'benchmark' in args.script:
if any(['train' in a for a in args.script_args]):
sort_key = 'train_samples_per_sec'
else:
sort_key = 'infer_samples_per_sec'
else:
sort_key = 'top1'
else:
sort_key = args.sort_key
print(f'Script: {args.script}, Args: {args.script_args}, Sort key: {sort_key}')
try:
for m, ax in model_cfgs:
if not m:
continue
args_str = (cmd, *[str(e) for e in cmd_args], '--model', m)
if ax is not None:
extra_args = [(f'--{k}', str(v)) for k, v in ax.items()]
extra_args = [i for t in extra_args for i in t]
args_str += tuple(extra_args)
try:
o = subprocess.check_output(args=args_str).decode('utf-8').split('--result')[-1]
r = json.loads(o)
results.append(r)
except Exception as e:
# FIXME batch_size retry loop is currently done in either validation.py or benchmark.py
# for further robustness (but more overhead), we may want to manage that by looping here...
errors.append(dict(model=m, error=str(e)))
if args.delay:
time.sleep(args.delay)
except KeyboardInterrupt as e:
pass
errors.extend(list(filter(lambda x: 'error' in x, results)))
if errors:
print(f'{len(errors)} models had errors during run.')
for e in errors:
if 'model' in e:
print(f"\t {e['model']} ({e.get('error', 'Unknown')})")
else:
print(e)
results = list(filter(lambda x: 'error' not in x, results))
no_sortkey = list(filter(lambda x: sort_key not in x, results))
if no_sortkey:
print(f'{len(no_sortkey)} results missing sort key, skipping sort.')
else:
results = sorted(results, key=lambda x: x[sort_key], reverse=True)
if len(results):
print(f'{len(results)} models run successfully. Saving results to {results_file}.')
write_results(results_file, results)
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| pytorch-image-models/bulk_runner.py/0 | {
"file_path": "pytorch-image-models/bulk_runner.py",
"repo_id": "pytorch-image-models",
"token_count": 3951
} |
# CSP-DarkNet
**CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network.
This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('cspdarknet53', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `cspdarknet53`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('cspdarknet53', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{bochkovskiy2020yolov4,
title={YOLOv4: Optimal Speed and Accuracy of Object Detection},
author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao},
year={2020},
eprint={2004.10934},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: CSP DarkNet
Paper:
Title: 'YOLOv4: Optimal Speed and Accuracy of Object Detection'
URL: https://paperswithcode.com/paper/yolov4-optimal-speed-and-accuracy-of-object
Models:
- Name: cspdarknet53
In Collection: CSP DarkNet
Metadata:
FLOPs: 8545018880
Parameters: 27640000
File Size: 110775135
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Mish
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- CutMix
- Label Smoothing
- Mosaic
- Polynomial Learning Rate Decay
- SGD with Momentum
- Self-Adversarial Training
- Weight Decay
Training Data:
- ImageNet
Training Resources: 1x NVIDIA RTX 2070 GPU
ID: cspdarknet53
LR: 0.1
Layers: 53
Crop Pct: '0.887'
Momentum: 0.9
Batch Size: 128
Image Size: '256'
Warmup Steps: 1000
Weight Decay: 0.0005
Interpolation: bilinear
Training Steps: 8000000
FPS (GPU RTX 2070): 66
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L441
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.05%
Top 5 Accuracy: 95.09%
--> | pytorch-image-models/hfdocs/source/models/csp-darknet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/csp-darknet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1758
} |
# PNASNet
**Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('pnasnet5large', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `pnasnet5large`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('pnasnet5large', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{liu2018progressive,
title={Progressive Neural Architecture Search},
author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy},
year={2018},
eprint={1712.00559},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: PNASNet
Paper:
Title: Progressive Neural Architecture Search
URL: https://paperswithcode.com/paper/progressive-neural-architecture-search
Models:
- Name: pnasnet5large
In Collection: PNASNet
Metadata:
FLOPs: 31458865950
Parameters: 86060000
File Size: 345153926
Architecture:
- Average Pooling
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- ReLU
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 100x NVIDIA P100 GPUs
ID: pnasnet5large
LR: 0.015
Dropout: 0.5
Crop Pct: '0.911'
Momentum: 0.9
Batch Size: 1600
Image Size: '331'
Interpolation: bicubic
Label Smoothing: 0.1
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/pnasnet.py#L343
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 0.98%
Top 5 Accuracy: 18.58%
--> | pytorch-image-models/hfdocs/source/models/pnasnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/pnasnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1624
} |
# SSL ResNet
**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification.
Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('ssl_resnet18', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `ssl_resnet18`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('ssl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1905-00546,
author = {I. Zeki Yalniz and
Herv{\'{e}} J{\'{e}}gou and
Kan Chen and
Manohar Paluri and
Dhruv Mahajan},
title = {Billion-scale semi-supervised learning for image classification},
journal = {CoRR},
volume = {abs/1905.00546},
year = {2019},
url = {http://arxiv.org/abs/1905.00546},
archivePrefix = {arXiv},
eprint = {1905.00546},
timestamp = {Mon, 28 Sep 2020 08:19:37 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: SSL ResNet
Paper:
Title: Billion-scale semi-supervised learning for image classification
URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for
Models:
- Name: ssl_resnet18
In Collection: SSL ResNet
Metadata:
FLOPs: 2337073152
Parameters: 11690000
File Size: 46811375
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- YFCC-100M
Training Resources: 64x GPUs
ID: ssl_resnet18
LR: 0.0015
Epochs: 30
Layers: 18
Crop Pct: '0.875'
Batch Size: 1536
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L894
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.62%
Top 5 Accuracy: 91.42%
- Name: ssl_resnet50
In Collection: SSL ResNet
Metadata:
FLOPs: 5282531328
Parameters: 25560000
File Size: 102480594
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- YFCC-100M
Training Resources: 64x GPUs
ID: ssl_resnet50
LR: 0.0015
Epochs: 30
Layers: 50
Crop Pct: '0.875'
Batch Size: 1536
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L904
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.24%
Top 5 Accuracy: 94.83%
--> | pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2427
} |
# Learning Rate Schedulers
This page contains the API reference documentation for learning rate schedulers included in `timm`.
## Schedulers
### Factory functions
[[autodoc]] timm.scheduler.scheduler_factory.create_scheduler
[[autodoc]] timm.scheduler.scheduler_factory.create_scheduler_v2
### Scheduler Classes
[[autodoc]] timm.scheduler.cosine_lr.CosineLRScheduler
[[autodoc]] timm.scheduler.multistep_lr.MultiStepLRScheduler
[[autodoc]] timm.scheduler.plateau_lr.PlateauLRScheduler
[[autodoc]] timm.scheduler.poly_lr.PolyLRScheduler
[[autodoc]] timm.scheduler.step_lr.StepLRScheduler
[[autodoc]] timm.scheduler.tanh_lr.TanhLRScheduler
| pytorch-image-models/hfdocs/source/reference/schedulers.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/reference/schedulers.mdx",
"repo_id": "pytorch-image-models",
"token_count": 242
} |
""" AutoAugment, RandAugment, AugMix, and 3-Augment for PyTorch
This code implements the searched ImageNet policies with various tweaks and improvements and
does not include any of the search code.
AA and RA Implementation adapted from:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
AugMix adapted from:
https://github.com/google-research/augmix
3-Augment based on: https://github.com/facebookresearch/deit/blob/main/README_revenge.md
Papers:
AutoAugment: Learning Augmentation Policies from Data - https://arxiv.org/abs/1805.09501
Learning Data Augmentation Strategies for Object Detection - https://arxiv.org/abs/1906.11172
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781
3-Augment: DeiT III: Revenge of the ViT - https://arxiv.org/abs/2204.07118
Hacked together by / Copyright 2019, Ross Wightman
"""
import random
import math
import re
from functools import partial
from typing import Dict, List, Optional, Union
from PIL import Image, ImageOps, ImageEnhance, ImageChops, ImageFilter
import PIL
import numpy as np
_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]])
_FILL = (128, 128, 128)
_LEVEL_DENOM = 10. # denominator for conversion from 'Mx' magnitude scale to fractional aug level for op arguments
_HPARAMS_DEFAULT = dict(
translate_const=250,
img_mean=_FILL,
)
if hasattr(Image, "Resampling"):
_RANDOM_INTERPOLATION = (Image.Resampling.BILINEAR, Image.Resampling.BICUBIC)
_DEFAULT_INTERPOLATION = Image.Resampling.BICUBIC
else:
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
_DEFAULT_INTERPOLATION = Image.BICUBIC
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', _DEFAULT_INTERPOLATION)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
return interpolation
def _check_args_tf(kwargs):
if 'fillcolor' in kwargs and _PIL_VER < (5, 0):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
if _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def gaussian_blur(img, factor, **__):
img = img.filter(ImageFilter.GaussianBlur(radius=factor))
return img
def gaussian_blur_rand(img, factor, **__):
radius_min = 0.1
radius_max = 2.0
img = img.filter(ImageFilter.GaussianBlur(radius=random.uniform(radius_min, radius_max * factor)))
return img
def desaturate(img, factor, **_):
factor = min(1., max(0., 1. - factor))
# enhance factor 0 = grayscale, 1.0 = no-change
return ImageEnhance.Color(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level, _hparams):
# range [-30, 30]
level = (level / _LEVEL_DENOM) * 30.
level = _randomly_negate(level)
return level,
def _enhance_level_to_arg(level, _hparams):
# range [0.1, 1.9]
return (level / _LEVEL_DENOM) * 1.8 + 0.1,
def _enhance_increasing_level_to_arg(level, _hparams):
# the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
# range [0.1, 1.9] if level <= _LEVEL_DENOM
level = (level / _LEVEL_DENOM) * .9
level = max(0.1, 1.0 + _randomly_negate(level)) # keep it >= 0.1
return level,
def _minmax_level_to_arg(level, _hparams, min_val=0., max_val=1.0, clamp=True):
level = (level / _LEVEL_DENOM)
level = min_val + (max_val - min_val) * level
if clamp:
level = max(min_val, min(max_val, level))
return level,
def _shear_level_to_arg(level, _hparams):
# range [-0.3, 0.3]
level = (level / _LEVEL_DENOM) * 0.3
level = _randomly_negate(level)
return level,
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams['translate_const']
level = (level / _LEVEL_DENOM) * float(translate_const)
level = _randomly_negate(level)
return level,
def _translate_rel_level_to_arg(level, hparams):
# default range [-0.45, 0.45]
translate_pct = hparams.get('translate_pct', 0.45)
level = (level / _LEVEL_DENOM) * translate_pct
level = _randomly_negate(level)
return level,
def _posterize_level_to_arg(level, _hparams):
# As per Tensorflow TPU EfficientNet impl
# range [0, 4], 'keep 0 up to 4 MSB of original image'
# intensity/severity of augmentation decreases with level
return int((level / _LEVEL_DENOM) * 4),
def _posterize_increasing_level_to_arg(level, hparams):
# As per Tensorflow models research and UDA impl
# range [4, 0], 'keep 4 down to 0 MSB of original image',
# intensity/severity of augmentation increases with level
return 4 - _posterize_level_to_arg(level, hparams)[0],
def _posterize_original_level_to_arg(level, _hparams):
# As per original AutoAugment paper description
# range [4, 8], 'keep 4 up to 8 MSB of image'
# intensity/severity of augmentation decreases with level
return int((level / _LEVEL_DENOM) * 4) + 4,
def _solarize_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation decreases with level
return min(256, int((level / _LEVEL_DENOM) * 256)),
def _solarize_increasing_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation increases with level
return 256 - _solarize_level_to_arg(level, _hparams)[0],
def _solarize_add_level_to_arg(level, _hparams):
# range [0, 110]
return min(128, int((level / _LEVEL_DENOM) * 110)),
LEVEL_TO_ARG = {
'AutoContrast': None,
'Equalize': None,
'Invert': None,
'Rotate': _rotate_level_to_arg,
# There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers
'Posterize': _posterize_level_to_arg,
'PosterizeIncreasing': _posterize_increasing_level_to_arg,
'PosterizeOriginal': _posterize_original_level_to_arg,
'Solarize': _solarize_level_to_arg,
'SolarizeIncreasing': _solarize_increasing_level_to_arg,
'SolarizeAdd': _solarize_add_level_to_arg,
'Color': _enhance_level_to_arg,
'ColorIncreasing': _enhance_increasing_level_to_arg,
'Contrast': _enhance_level_to_arg,
'ContrastIncreasing': _enhance_increasing_level_to_arg,
'Brightness': _enhance_level_to_arg,
'BrightnessIncreasing': _enhance_increasing_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'SharpnessIncreasing': _enhance_increasing_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'TranslateX': _translate_abs_level_to_arg,
'TranslateY': _translate_abs_level_to_arg,
'TranslateXRel': _translate_rel_level_to_arg,
'TranslateYRel': _translate_rel_level_to_arg,
'Desaturate': partial(_minmax_level_to_arg, min_val=0.5, max_val=1.0),
'GaussianBlur': partial(_minmax_level_to_arg, min_val=0.1, max_val=2.0),
'GaussianBlurRand': _minmax_level_to_arg,
}
NAME_TO_OP = {
'AutoContrast': auto_contrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'PosterizeIncreasing': posterize,
'PosterizeOriginal': posterize,
'Solarize': solarize,
'SolarizeIncreasing': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'ColorIncreasing': color,
'Contrast': contrast,
'ContrastIncreasing': contrast,
'Brightness': brightness,
'BrightnessIncreasing': brightness,
'Sharpness': sharpness,
'SharpnessIncreasing': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x_abs,
'TranslateY': translate_y_abs,
'TranslateXRel': translate_x_rel,
'TranslateYRel': translate_y_rel,
'Desaturate': desaturate,
'GaussianBlur': gaussian_blur,
'GaussianBlurRand': gaussian_blur_rand,
}
class AugmentOp:
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = hparams or _HPARAMS_DEFAULT
self.name = name
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = dict(
fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL,
resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION,
)
# If magnitude_std is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from a normal distribution
# with mean `magnitude` and std-dev of `magnitude_std`.
# NOTE This is my own hack, being tested, not in papers or reference impls.
# If magnitude_std is inf, we sample magnitude from a uniform distribution
self.magnitude_std = self.hparams.get('magnitude_std', 0)
self.magnitude_max = self.hparams.get('magnitude_max', None)
def __call__(self, img):
if self.prob < 1.0 and random.random() > self.prob:
return img
magnitude = self.magnitude
if self.magnitude_std > 0:
# magnitude randomization enabled
if self.magnitude_std == float('inf'):
# inf == uniform sampling
magnitude = random.uniform(0, magnitude)
elif self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
# default upper_bound for the timm RA impl is _LEVEL_DENOM (10)
# setting magnitude_max overrides this to allow M > 10 (behaviour closer to Google TF RA impl)
upper_bound = self.magnitude_max or _LEVEL_DENOM
magnitude = max(0., min(magnitude, upper_bound))
level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple()
return self.aug_fn(img, *level_args, **self.kwargs)
def __repr__(self):
fs = self.__class__.__name__ + f'(name={self.name}, p={self.prob}'
fs += f', m={self.magnitude}, mstd={self.magnitude_std}'
if self.magnitude_max is not None:
fs += f', mmax={self.magnitude_max}'
fs += ')'
return fs
def auto_augment_policy_v0(hparams):
# ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_v0r(hparams):
# ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used
# in Google research implementation (number of bits discarded increases with magnitude)
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_original(hparams):
# ImageNet policy from https://arxiv.org/abs/1805.09501
policy = [
[('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
[('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
[('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
[('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)],
[('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
[('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)],
[('Rotate', 0.8, 8), ('Color', 0.4, 0)],
[('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
[('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Rotate', 0.8, 8), ('Color', 1.0, 2)],
[('Color', 0.8, 8), ('Solarize', 0.8, 7)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
[('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
[('Color', 0.4, 0), ('Equalize', 0.6, 3)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
]
pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_originalr(hparams):
# ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation
policy = [
[('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
[('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
[('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
[('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)],
[('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
[('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)],
[('Rotate', 0.8, 8), ('Color', 0.4, 0)],
[('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
[('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Rotate', 0.8, 8), ('Color', 1.0, 2)],
[('Color', 0.8, 8), ('Solarize', 0.8, 7)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
[('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
[('Color', 0.4, 0), ('Equalize', 0.6, 3)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
]
pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_3a(hparams):
policy = [
[('Solarize', 1.0, 5)], # 128 solarize threshold @ 5 magnitude
[('Desaturate', 1.0, 10)], # grayscale at 10 magnitude
[('GaussianBlurRand', 1.0, 10)],
]
pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy(name='v0', hparams=None):
hparams = hparams or _HPARAMS_DEFAULT
if name == 'original':
return auto_augment_policy_original(hparams)
if name == 'originalr':
return auto_augment_policy_originalr(hparams)
if name == 'v0':
return auto_augment_policy_v0(hparams)
if name == 'v0r':
return auto_augment_policy_v0r(hparams)
if name == '3a':
return auto_augment_policy_3a(hparams)
assert False, f'Unknown AA policy {name}'
class AutoAugment:
def __init__(self, policy):
self.policy = policy
def __call__(self, img):
sub_policy = random.choice(self.policy)
for op in sub_policy:
img = op(img)
return img
def __repr__(self):
fs = self.__class__.__name__ + '(policy='
for p in self.policy:
fs += '\n\t['
fs += ', '.join([str(op) for op in p])
fs += ']'
fs += ')'
return fs
def auto_augment_transform(config_str: str, hparams: Optional[Dict] = None):
""" Create a AutoAugment transform
Args:
config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by
dashes ('-').
The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr').
While the remaining sections define other arguments
* 'mstd' - float std deviation of magnitude noise applied
hparams: Other hparams (kwargs) for the AutoAugmentation scheme
Returns:
A PyTorch compatible Transform
Examples::
'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5
"""
config = config_str.split('-')
policy_name = config[0]
config = config[1:]
for c in config:
cs = re.split(r'(\d.*)', c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == 'mstd':
# noise param injected via hparams for now
hparams.setdefault('magnitude_std', float(val))
else:
assert False, 'Unknown AutoAugment config section'
aa_policy = auto_augment_policy(policy_name, hparams=hparams)
return AutoAugment(aa_policy)
_RAND_TRANSFORMS = [
'AutoContrast',
'Equalize',
'Invert',
'Rotate',
'Posterize',
'Solarize',
'SolarizeAdd',
'Color',
'Contrast',
'Brightness',
'Sharpness',
'ShearX',
'ShearY',
'TranslateXRel',
'TranslateYRel',
# 'Cutout' # NOTE I've implement this as random erasing separately
]
_RAND_INCREASING_TRANSFORMS = [
'AutoContrast',
'Equalize',
'Invert',
'Rotate',
'PosterizeIncreasing',
'SolarizeIncreasing',
'SolarizeAdd',
'ColorIncreasing',
'ContrastIncreasing',
'BrightnessIncreasing',
'SharpnessIncreasing',
'ShearX',
'ShearY',
'TranslateXRel',
'TranslateYRel',
# 'Cutout' # NOTE I've implement this as random erasing separately
]
_RAND_3A = [
'SolarizeIncreasing',
'Desaturate',
'GaussianBlur',
]
_RAND_WEIGHTED_3A = {
'SolarizeIncreasing': 6,
'Desaturate': 6,
'GaussianBlur': 6,
'Rotate': 3,
'ShearX': 2,
'ShearY': 2,
'PosterizeIncreasing': 1,
'AutoContrast': 1,
'ColorIncreasing': 1,
'SharpnessIncreasing': 1,
'ContrastIncreasing': 1,
'BrightnessIncreasing': 1,
'Equalize': 1,
'Invert': 1,
}
# These experimental weights are based loosely on the relative improvements mentioned in paper.
# They may not result in increased performance, but could likely be tuned to so.
_RAND_WEIGHTED_0 = {
'Rotate': 3,
'ShearX': 2,
'ShearY': 2,
'TranslateXRel': 1,
'TranslateYRel': 1,
'ColorIncreasing': .25,
'SharpnessIncreasing': 0.25,
'AutoContrast': 0.25,
'SolarizeIncreasing': .05,
'SolarizeAdd': .05,
'ContrastIncreasing': .05,
'BrightnessIncreasing': .05,
'Equalize': .05,
'PosterizeIncreasing': 0.05,
'Invert': 0.05,
}
def _get_weighted_transforms(transforms: Dict):
transforms, probs = list(zip(*transforms.items()))
probs = np.array(probs)
probs = probs / np.sum(probs)
return transforms, probs
def rand_augment_choices(name: str, increasing=True):
if name == 'weights':
return _RAND_WEIGHTED_0
if name == '3aw':
return _RAND_WEIGHTED_3A
if name == '3a':
return _RAND_3A
return _RAND_INCREASING_TRANSFORMS if increasing else _RAND_TRANSFORMS
def rand_augment_ops(
magnitude: Union[int, float] = 10,
prob: float = 0.5,
hparams: Optional[Dict] = None,
transforms: Optional[Union[Dict, List]] = None,
):
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _RAND_TRANSFORMS
return [AugmentOp(
name, prob=prob, magnitude=magnitude, hparams=hparams) for name in transforms]
class RandAugment:
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
# no replacement when using weighted choice
ops = np.random.choice(
self.ops,
self.num_layers,
replace=self.choice_weights is None,
p=self.choice_weights,
)
for op in ops:
img = op(img)
return img
def __repr__(self):
fs = self.__class__.__name__ + f'(n={self.num_layers}, ops='
for op in self.ops:
fs += f'\n\t{op}'
fs += ')'
return fs
def rand_augment_transform(
config_str: str,
hparams: Optional[Dict] = None,
transforms: Optional[Union[str, Dict, List]] = None,
):
""" Create a RandAugment transform
Args:
config_str (str): String defining configuration of random augmentation. Consists of multiple sections separated
by dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand').
The remaining sections, not order specific determine
* 'm' - integer magnitude of rand augment
* 'n' - integer num layers (number of transform ops selected per image)
* 'p' - float probability of applying each layer (default 0.5)
* 'mstd' - float std deviation of magnitude noise applied, or uniform sampling if infinity (or > 100)
* 'mmax' - set upper bound for magnitude to something other than default of _LEVEL_DENOM (10)
* 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
* 't' - str name of transform set to use
hparams (dict): Other hparams (kwargs) for the RandAugmentation scheme
Returns:
A PyTorch compatible Transform
Examples::
'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-tweights' results in mag std 1.0, weighted transforms, default mag of 10 and num_layers 2
"""
magnitude = _LEVEL_DENOM # default to _LEVEL_DENOM for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
increasing = False
prob = 0.5
config = config_str.split('-')
assert config[0] == 'rand'
config = config[1:]
for c in config:
if c.startswith('t'):
# NOTE old 'w' key was removed, 'w0' is not equivalent to 'tweights'
val = str(c[1:])
if transforms is None:
transforms = val
else:
# numeric options
cs = re.split(r'(\d.*)', c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == 'mstd':
# noise param / randomization of magnitude values
mstd = float(val)
if mstd > 100:
# use uniform sampling in 0 to magnitude if mstd is > 100
mstd = float('inf')
hparams.setdefault('magnitude_std', mstd)
elif key == 'mmax':
# clip magnitude between [0, mmax] instead of default [0, _LEVEL_DENOM]
hparams.setdefault('magnitude_max', int(val))
elif key == 'inc':
if bool(val):
increasing = True
elif key == 'm':
magnitude = int(val)
elif key == 'n':
num_layers = int(val)
elif key == 'p':
prob = float(val)
else:
assert False, 'Unknown RandAugment config section'
if isinstance(transforms, str):
transforms = rand_augment_choices(transforms, increasing=increasing)
elif transforms is None:
transforms = _RAND_INCREASING_TRANSFORMS if increasing else _RAND_TRANSFORMS
choice_weights = None
if isinstance(transforms, Dict):
transforms, choice_weights = _get_weighted_transforms(transforms)
ra_ops = rand_augment_ops(magnitude=magnitude, prob=prob, hparams=hparams, transforms=transforms)
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
_AUGMIX_TRANSFORMS = [
'AutoContrast',
'ColorIncreasing', # not in paper
'ContrastIncreasing', # not in paper
'BrightnessIncreasing', # not in paper
'SharpnessIncreasing', # not in paper
'Equalize',
'Rotate',
'PosterizeIncreasing',
'SolarizeIncreasing',
'ShearX',
'ShearY',
'TranslateXRel',
'TranslateYRel',
]
def augmix_ops(
magnitude: Union[int, float] = 10,
hparams: Optional[Dict] = None,
transforms: Optional[Union[str, Dict, List]] = None,
):
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _AUGMIX_TRANSFORMS
return [AugmentOp(
name,
prob=1.0,
magnitude=magnitude,
hparams=hparams
) for name in transforms]
class AugMixAugment:
""" AugMix Transform
Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py
From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty -
https://arxiv.org/abs/1912.02781
"""
def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False):
self.ops = ops
self.alpha = alpha
self.width = width
self.depth = depth
self.blended = blended # blended mode is faster but not well tested
def _calc_blended_weights(self, ws, m):
ws = ws * m
cump = 1.
rws = []
for w in ws[::-1]:
alpha = w / cump
cump *= (1 - alpha)
rws.append(alpha)
return np.array(rws[::-1], dtype=np.float32)
def _apply_blended(self, img, mixing_weights, m):
# This is my first crack and implementing a slightly faster mixed augmentation. Instead
# of accumulating the mix for each chain in a Numpy array and then blending with original,
# it recomputes the blending coefficients and applies one PIL image blend per chain.
# TODO the results appear in the right ballpark but they differ by more than rounding.
img_orig = img.copy()
ws = self._calc_blended_weights(mixing_weights, m)
for w in ws:
depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
ops = np.random.choice(self.ops, depth, replace=True)
img_aug = img_orig # no ops are in-place, deep copy not necessary
for op in ops:
img_aug = op(img_aug)
img = Image.blend(img, img_aug, w)
return img
def _apply_basic(self, img, mixing_weights, m):
# This is a literal adaptation of the paper/official implementation without normalizations and
# PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the
# typical augmentation transforms, could use a GPU / Kornia implementation.
img_shape = img.size[0], img.size[1], len(img.getbands())
mixed = np.zeros(img_shape, dtype=np.float32)
for mw in mixing_weights:
depth = self.depth if self.depth > 0 else np.random.randint(1, 4)
ops = np.random.choice(self.ops, depth, replace=True)
img_aug = img # no ops are in-place, deep copy not necessary
for op in ops:
img_aug = op(img_aug)
mixed += mw * np.asarray(img_aug, dtype=np.float32)
np.clip(mixed, 0, 255., out=mixed)
mixed = Image.fromarray(mixed.astype(np.uint8))
return Image.blend(img, mixed, m)
def __call__(self, img):
mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width))
m = np.float32(np.random.beta(self.alpha, self.alpha))
if self.blended:
mixed = self._apply_blended(img, mixing_weights, m)
else:
mixed = self._apply_basic(img, mixing_weights, m)
return mixed
def __repr__(self):
fs = self.__class__.__name__ + f'(alpha={self.alpha}, width={self.width}, depth={self.depth}, ops='
for op in self.ops:
fs += f'\n\t{op}'
fs += ')'
return fs
def augment_and_mix_transform(config_str: str, hparams: Optional[Dict] = None):
""" Create AugMix PyTorch transform
Args:
config_str (str): String defining configuration of random augmentation. Consists of multiple sections separated
by dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand').
The remaining sections, not order specific determine
'm' - integer magnitude (severity) of augmentation mix (default: 3)
'w' - integer width of augmentation chain (default: 3)
'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1)
'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0)
'mstd' - float std deviation of magnitude noise applied (default: 0)
Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2
hparams: Other hparams (kwargs) for the Augmentation transforms
Returns:
A PyTorch compatible Transform
"""
magnitude = 3
width = 3
depth = -1
alpha = 1.
blended = False
config = config_str.split('-')
assert config[0] == 'augmix'
config = config[1:]
for c in config:
cs = re.split(r'(\d.*)', c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == 'mstd':
# noise param injected via hparams for now
hparams.setdefault('magnitude_std', float(val))
elif key == 'm':
magnitude = int(val)
elif key == 'w':
width = int(val)
elif key == 'd':
depth = int(val)
elif key == 'a':
alpha = float(val)
elif key == 'b':
blended = bool(val)
else:
assert False, 'Unknown AugMix config section'
hparams.setdefault('magnitude_std', float('inf')) # default to uniform sampling (if not set via mstd arg)
ops = augmix_ops(magnitude=magnitude, hparams=hparams)
return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended)
| pytorch-image-models/timm/data/auto_augment.py/0 | {
"file_path": "pytorch-image-models/timm/data/auto_augment.py",
"repo_id": "pytorch-image-models",
"token_count": 15926
} |
""" Dataset reader that wraps Hugging Face datasets
Hacked together by / Copyright 2022 Ross Wightman
"""
import io
import math
from typing import Optional
import torch
import torch.distributed as dist
from PIL import Image
try:
import datasets
except ImportError as e:
print("Please install Hugging Face datasets package `pip install datasets`.")
raise e
from .class_map import load_class_map
from .reader import Reader
def get_class_labels(info, label_key='label'):
if 'label' not in info.features:
return {}
class_label = info.features[label_key]
class_to_idx = {n: class_label.str2int(n) for n in class_label.names}
return class_to_idx
class ReaderHfds(Reader):
def __init__(
self,
name: str,
root: Optional[str] = None,
split: str = 'train',
class_map: dict = None,
input_key: str = 'image',
target_key: str = 'label',
download: bool = False,
trust_remote_code: bool = False
):
"""
"""
super().__init__()
self.root = root
self.split = split
self.dataset = datasets.load_dataset(
name, # 'name' maps to path arg in hf datasets
split=split,
cache_dir=self.root, # timm doesn't expect hidden cache dir for datasets, specify a path if root set
trust_remote_code=trust_remote_code
)
# leave decode for caller, plus we want easy access to original path names...
self.dataset = self.dataset.cast_column(input_key, datasets.Image(decode=False))
self.image_key = input_key
self.label_key = target_key
self.remap_class = False
if class_map:
self.class_to_idx = load_class_map(class_map)
self.remap_class = True
else:
self.class_to_idx = get_class_labels(self.dataset.info, self.label_key)
self.split_info = self.dataset.info.splits[split]
self.num_samples = self.split_info.num_examples
def __getitem__(self, index):
item = self.dataset[index]
image = item[self.image_key]
if 'bytes' in image and image['bytes']:
image = io.BytesIO(image['bytes'])
else:
assert 'path' in image and image['path']
image = open(image['path'], 'rb')
label = item[self.label_key]
if self.remap_class:
label = self.class_to_idx[label]
return image, label
def __len__(self):
return len(self.dataset)
def _filename(self, index, basename=False, absolute=False):
item = self.dataset[index]
return item[self.image_key]['path']
| pytorch-image-models/timm/data/readers/reader_hfds.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/reader_hfds.py",
"repo_id": "pytorch-image-models",
"token_count": 1197
} |
from typing import List, Optional, Type, Union
import torch
from torch import nn as nn
from torch.nn import functional as F
from .config import use_fused_attn
from .create_conv2d import create_conv2d
from .helpers import to_2tuple
from .pool2d_same import create_pool2d
class MultiQueryAttentionV2(nn.Module):
"""Multi Query Attention.
Fast Transformer Decoding: One Write-Head is All You Need
https://arxiv.org/pdf/1911.02150.pdf
This is an acceletor optimized version - removing multiple unnecessary
tensor transpose by re-arranging indices according to the following rules: 1)
contracted indices are at the end, 2) other indices have the same order in the
input and output tensores.
Compared to V1, this gives 3x speed up.
"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
num_heads: int = 8,
key_dim: int = 64,
value_dim: int = 64,
attn_drop: float = 0.,
proj_drop: float = 0.,
):
"""Initializer."""
super().__init__()
dim_out = dim_out or dim
self.num_heads = num_heads
self.key_dim = key_dim
self.value_dim = value_dim
self.scale = key_dim ** -0.5
self.query_proj = nn.Parameter(torch.randn([self.num_heads, self.key_dim, dim]))
self.key_proj = nn.Parameter(torch.randn([dim, self.key_dim]))
self.value_proj = nn.Parameter(torch.randn([dim, self.value_dim]))
self.attn_drop = nn.Dropout(attn_drop)
self.out_proj = nn.Parameter(torch.randn([dim_out, self.num_heads, self.value_dim]))
self.proj_drop = nn.Dropout(proj_drop)
def _reshape_input(self, t):
"""Reshapes a tensor to three dimensions, keeping the first and last."""
s = t.shape
# Propagate the shape statically where possible.
#num = t.shape[1:-1].numel()
#return t.reshape(s[0], num, s[-1])
return t.reshape(s[0], s[1], -1).transpose(1, 2)
def forward(self, x, m: Optional[torch.Tensor] = None):
"""Run layer computation."""
b, _, h, w = x.shape
m = m if m is not None else x
reshaped_x = self._reshape_input(x)
reshaped_m = self._reshape_input(m)
q = torch.einsum('bnd,hkd->bnhk', reshaped_x, self.query_proj)
k = torch.einsum('bmd,dk->bmk', reshaped_m, self.key_proj)
attn = torch.einsum('bnhk,bmk->bnhm', q, k) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
v = torch.einsum('bmd,dv->bmv', reshaped_m, self.value_proj)
o = torch.einsum('bnhm,bmv->bnhv', attn, v)
result = torch.einsum('bnhv,dhv->bdn', o, self.out_proj)
result = self.proj_drop(result)
return result.reshape(b, -1, h, w)
class MultiQueryAttention2d(nn.Module):
"""Multi Query Attention with spatial downsampling.
3 parameters are introduced for the spatial downsampling:
1. kv_stride: downsampling factor on Key and Values only.
2. query_strides: horizontal & vertical strides on Query only.
This is an optimized version.
1. Projections in Attention is explicit written out as 1x1 Conv2D.
2. Additional reshapes are introduced to bring a up to 3x speed up.
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
num_heads: int = 8,
key_dim: Optional[int] = None,
value_dim: Optional[int] = None,
query_strides: int = 1,
kv_stride: int = 1,
dw_kernel_size: int = 3,
dilation: int = 1,
padding: Union[str, int, List[int]] = '',
attn_drop: float = 0.,
proj_drop: float = 0.,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
use_bias: bool = False,
):
"""Initializer.
Args:
num_heads: Number of attention heads.
key_dim: Size of the attention key dimension.
value_dim: Size of the attention value dimension.
query_strides: Vertical stride size for query only.
kv_stride: Key and value stride size.
dw_kernel_size: Spatial dimension of the depthwise kernel.
"""
super().__init__()
dim_out = dim_out or dim
self.num_heads = num_heads
self.key_dim = key_dim or dim // num_heads
self.value_dim = value_dim or dim // num_heads
self.query_strides = to_2tuple(query_strides)
self.kv_stride = kv_stride
self.has_query_strides = any([s > 1 for s in self.query_strides])
self.scale = self.key_dim ** -0.5
self.fused_attn = use_fused_attn()
self.drop = attn_drop
self.query = nn.Sequential()
if self.has_query_strides:
# FIXME dilation
if padding == 'same':
self.query.add_module('down_pool', create_pool2d(
'avg',
kernel_size=self.query_strides,
padding='same',
))
else:
# no pad if not 'same' as kern=stride=even
self.query.add_module('down_pool', nn.AvgPool2d(kernel_size=query_strides))
self.query.add_module('norm', norm_layer(dim))
self.query.add_module('proj', create_conv2d(
dim,
self.num_heads * self.key_dim,
kernel_size=1,
bias=use_bias,
))
self.key = nn.Sequential()
if kv_stride > 1:
self.key.add_module('down_conv', create_conv2d(
dim,
dim,
kernel_size=dw_kernel_size,
stride=kv_stride,
dilation=dilation,
padding=padding,
depthwise=True,
))
self.key.add_module('norm', norm_layer(dim))
self.key.add_module('proj', create_conv2d(
dim,
self.key_dim,
kernel_size=1,
padding=padding,
bias=use_bias,
))
self.value = nn.Sequential()
if kv_stride > 1:
self.value.add_module('down_conv', create_conv2d(
dim,
dim,
kernel_size=dw_kernel_size,
stride=kv_stride,
dilation=dilation,
padding=padding,
depthwise=True,
))
self.value.add_module('norm', norm_layer(dim))
self.value.add_module('proj', create_conv2d(
dim,
self.value_dim,
kernel_size=1,
bias=use_bias,
))
self.attn_drop = nn.Dropout(attn_drop)
self.output = nn.Sequential()
if self.has_query_strides:
self.output.add_module('upsample', nn.Upsample(scale_factor=self.query_strides, mode='bilinear', align_corners=False))
self.output.add_module('proj', create_conv2d(
self.value_dim * self.num_heads,
dim_out,
kernel_size=1,
bias=use_bias,
))
self.output.add_module('drop', nn.Dropout(proj_drop))
self.einsum = False
def init_weights(self):
# using xavier appeared to improve stability for mobilenetv4 hybrid w/ this layer
nn.init.xavier_uniform_(self.query.proj.weight)
nn.init.xavier_uniform_(self.key.proj.weight)
nn.init.xavier_uniform_(self.value.proj.weight)
if self.kv_stride > 1:
nn.init.xavier_uniform_(self.key.down_conv.weight)
nn.init.xavier_uniform_(self.value.down_conv.weight)
nn.init.xavier_uniform_(self.output.proj.weight)
def _reshape_input(self, t: torch.Tensor):
"""Reshapes a tensor to three dimensions, keeping the batch and channels."""
s = t.shape
t = t.reshape(s[0], s[1], -1).transpose(1, 2)
if self.einsum:
return t
else:
return t.unsqueeze(1).contiguous()
def _reshape_projected_query(self, t: torch.Tensor, num_heads: int, key_dim: int):
"""Reshapes projected query: [b, n, n, h x k] -> [b, n x n, h, k]."""
s = t.shape
t = t.reshape(s[0], num_heads, key_dim, -1)
if self.einsum:
return t.permute(0, 3, 1, 2).contiguous()
else:
return t.transpose(-1, -2).contiguous()
def _reshape_output(self, t: torch.Tensor, num_heads: int, h_px: int, w_px: int):
"""Reshape output:[b, n x n x h, k] -> [b, n, n, hk]."""
s = t.shape
feat_dim = s[-1] * num_heads
if not self.einsum:
t = t.transpose(1, 2)
return t.reshape(s[0], h_px, w_px, feat_dim).permute(0, 3, 1, 2).contiguous()
def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
"""Run layer computation."""
B, C, H, W = s = x.shape
q = self.query(x)
# desired q shape: [b, h, k, n x n] - [b, l, h, k]
q = self._reshape_projected_query(q, self.num_heads, self.key_dim)
k = self.key(x)
# output shape of k: [b, k, p], p = m x m
k = self._reshape_input(k)
v = self.value(x)
# output shape of v: [ b, p, k], p = m x m
v = self._reshape_input(v)
# desired q shape: [b, n x n, h, k]
# desired k shape: [b, m x m, k]
# desired logits shape: [b, n x n, h, m x m]
if self.einsum:
attn = torch.einsum('blhk,bpk->blhp', q, k) * self.scale
if attn_mask is not None:
# NOTE: assumes mask is float and in correct shape
attn = attn + attn_mask
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
o = torch.einsum('blhp,bpk->blhk', attn, v)
else:
if self.fused_attn:
o = F.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_mask,
dropout_p=self.attn_drop.p if self.training else 0.
)
else:
q = q * self.scale
attn = q @ k.transpose(-1, -2)
if attn_mask is not None:
# NOTE: assumes mask is float and in correct shape
attn = attn + attn_mask
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
o = attn @ v
# reshape o into [b, hk, n, n,]
o = self._reshape_output(o, self.num_heads, H // self.query_strides[0], W // self.query_strides[1])
x = self.output(o)
return x
class Attention2d(nn.Module):
fused_attn: torch.jit.Final[bool]
""" multi-head attention for 2D NCHW tensors"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
num_heads: int = 32,
bias: bool = True,
expand_first: bool = False,
head_first: bool = False,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first else dim
self.num_heads = num_heads
self.dim_head = dim_attn // num_heads
self.head_first = head_first
self.fused_attn = use_fused_attn()
self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
B, C, H, W = x.shape
if self.head_first:
q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2)
else:
q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q.transpose(-1, -2).contiguous(),
k.transpose(-1, -2).contiguous(),
v.transpose(-1, -2).contiguous(),
attn_mask=attn_mask,
dropout_p=self.attn_drop.p if self.training else 0.,
).transpose(-1, -2).reshape(B, -1, H, W)
else:
q = q.transpose(-1, -2)
v = v.transpose(-1, -2)
attn = q @ k * q.size(-1) ** -0.5
if attn_mask is not None:
# NOTE: assumes mask is float and in correct shape
attn = attn + attn_mask
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(-1, -2).reshape(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
| pytorch-image-models/timm/layers/attention2d.py/0 | {
"file_path": "pytorch-image-models/timm/layers/attention2d.py",
"repo_id": "pytorch-image-models",
"token_count": 6678
} |
""" DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .grid import ndgrid
def drop_block_2d(
x,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False
):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
# Forces the block to be inside the feature map.
w_i, h_i = ndgrid(torch.arange(W, device=x.device), torch.arange(H, device=x.device))
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size,
stride=1,
padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
def drop_block_fast_2d(
x: torch.Tensor,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
block_mask = torch.empty_like(x).bernoulli_(gamma)
block_mask = F.max_pool2d(
block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.empty_like(x).normal_()
if inplace:
x.mul_(1. - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1. - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
"""
def __init__(
self,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False,
fast: bool = True):
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast # FIXME finish comparisons of fast vs not
def forward(self, x):
if not self.training or not self.drop_prob:
return x
if self.fast:
return drop_block_fast_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace)
else:
return drop_block_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
return f'drop_prob={round(self.drop_prob,3):0.3f}'
| pytorch-image-models/timm/layers/drop.py/0 | {
"file_path": "pytorch-image-models/timm/layers/drop.py",
"repo_id": "pytorch-image-models",
"token_count": 3016
} |
import torch
from torch import nn
class LayerScale(nn.Module):
""" LayerScale on tensors with channels in last-dim.
"""
def __init__(
self,
dim: int,
init_values: float = 1e-5,
inplace: bool = False,
) -> None:
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class LayerScale2d(nn.Module):
""" LayerScale for tensors with torch 2D NCHW layout.
"""
def __init__(
self,
dim: int,
init_values: float = 1e-5,
inplace: bool = False,
):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
| pytorch-image-models/timm/layers/layer_scale.py/0 | {
"file_path": "pytorch-image-models/timm/layers/layer_scale.py",
"repo_id": "pytorch-image-models",
"token_count": 482
} |
""" Selective Kernel Convolution/Attention
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from .conv_bn_act import ConvNormAct
from .helpers import make_divisible
from .trace_utils import _assert
def _kernel_valid(k):
if isinstance(k, (list, tuple)):
for ki in k:
return _kernel_valid(ki)
assert k >= 3 and k % 2
class SelectiveKernelAttn(nn.Module):
def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
""" Selective Kernel Attention Module
Selective Kernel attention mechanism factored out into its own module.
"""
super(SelectiveKernelAttn, self).__init__()
self.num_paths = num_paths
self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False)
self.bn = norm_layer(attn_channels)
self.act = act_layer(inplace=True)
self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False)
def forward(self, x):
_assert(x.shape[1] == self.num_paths, '')
x = x.sum(1).mean((2, 3), keepdim=True)
x = self.fc_reduce(x)
x = self.bn(x)
x = self.act(x)
x = self.fc_select(x)
B, C, H, W = x.shape
x = x.view(B, self.num_paths, C // self.num_paths, H, W)
x = torch.softmax(x, dim=1)
return x
class SelectiveKernel(nn.Module):
def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1,
rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True,
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None):
""" Selective Kernel Convolution Module
As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications.
Largest change is the input split, which divides the input channels across each convolution path, this can
be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps
the parameter count from ballooning when the convolutions themselves don't have groups, but still provides
a noteworthy increase in performance over similar param count models without this attention layer. -Ross W
Args:
in_channels (int): module input (feature) channel count
out_channels (int): module output (feature) channel count
kernel_size (int, list): kernel size for each convolution branch
stride (int): stride for convolutions
dilation (int): dilation for module as a whole, impacts dilation of each branch
groups (int): number of groups for each branch
rd_ratio (int, float): reduction factor for attention features
keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations
split_input (bool): split input channels evenly across each convolution branch, keeps param count lower,
can be viewed as grouping by path, output expands to module out_channels count
act_layer (nn.Module): activation layer to use
norm_layer (nn.Module): batchnorm/norm layer to use
aa_layer (nn.Module): anti-aliasing module
drop_layer (nn.Module): spatial drop module in convs (drop block, etc)
"""
super(SelectiveKernel, self).__init__()
out_channels = out_channels or in_channels
kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation
_kernel_valid(kernel_size)
if not isinstance(kernel_size, list):
kernel_size = [kernel_size] * 2
if keep_3x3:
dilation = [dilation * (k - 1) // 2 for k in kernel_size]
kernel_size = [3] * len(kernel_size)
else:
dilation = [dilation] * len(kernel_size)
self.num_paths = len(kernel_size)
self.in_channels = in_channels
self.out_channels = out_channels
self.split_input = split_input
if self.split_input:
assert in_channels % self.num_paths == 0
in_channels = in_channels // self.num_paths
groups = min(out_channels, groups)
conv_kwargs = dict(
stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer,
aa_layer=aa_layer, drop_layer=drop_layer)
self.paths = nn.ModuleList([
ConvNormAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs)
for k, d in zip(kernel_size, dilation)])
attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor)
self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels)
def forward(self, x):
if self.split_input:
x_split = torch.split(x, self.in_channels // self.num_paths, 1)
x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)]
else:
x_paths = [op(x) for op in self.paths]
x = torch.stack(x_paths, dim=1)
x_attn = self.attn(x)
x = x * x_attn
x = torch.sum(x, dim=1)
return x
| pytorch-image-models/timm/layers/selective_kernel.py/0 | {
"file_path": "pytorch-image-models/timm/layers/selective_kernel.py",
"repo_id": "pytorch-image-models",
"token_count": 2314
} |
from .beit import *
from .byoanet import *
from .byobnet import *
from .cait import *
from .coat import *
from .convit import *
from .convmixer import *
from .convnext import *
from .crossvit import *
from .cspnet import *
from .davit import *
from .deit import *
from .densenet import *
from .dla import *
from .dpn import *
from .edgenext import *
from .efficientformer import *
from .efficientformer_v2 import *
from .efficientnet import *
from .efficientvit_mit import *
from .efficientvit_msra import *
from .eva import *
from .fastvit import *
from .focalnet import *
from .gcvit import *
from .ghostnet import *
from .hardcorenas import *
from .hgnet import *
from .hiera import *
from .hieradet_sam2 import *
from .hrnet import *
from .inception_next import *
from .inception_resnet_v2 import *
from .inception_v3 import *
from .inception_v4 import *
from .levit import *
from .maxxvit import *
from .mambaout import *
from .metaformer import *
from .mlp_mixer import *
from .mobilenetv3 import *
from .mobilevit import *
from .mvitv2 import *
from .nasnet import *
from .nest import *
from .nextvit import *
from .nfnet import *
from .pit import *
from .pnasnet import *
from .pvt_v2 import *
from .rdnet import *
from .regnet import *
from .repghost import *
from .repvit import *
from .res2net import *
from .resnest import *
from .resnet import *
from .resnetv2 import *
from .rexnet import *
from .selecsls import *
from .senet import *
from .sequencer import *
from .sknet import *
from .swin_transformer import *
from .swin_transformer_v2 import *
from .swin_transformer_v2_cr import *
from .tiny_vit import *
from .tnt import *
from .tresnet import *
from .twins import *
from .vgg import *
from .visformer import *
from .vision_transformer import *
from .vision_transformer_hybrid import *
from .vision_transformer_relpos import *
from .vision_transformer_sam import *
from .vitamin import *
from .volo import *
from .vovnet import *
from .xception import *
from .xception_aligned import *
from .xcit import *
from ._builder import build_model_with_cfg, load_pretrained, load_custom_pretrained, resolve_pretrained_cfg, \
set_pretrained_download_progress, set_pretrained_check_hash
from ._factory import create_model, parse_model_name, safe_model_name
from ._features import FeatureInfo, FeatureHooks, FeatureHookNet, FeatureListNet, FeatureDictNet
from ._features_fx import FeatureGraphNet, GraphExtractNet, create_feature_extractor, get_graph_node_names, \
register_notrace_module, is_notrace_module, get_notrace_modules, \
register_notrace_function, is_notrace_function, get_notrace_functions
from ._helpers import clean_state_dict, load_state_dict, load_checkpoint, remap_state_dict, resume_checkpoint
from ._hub import load_model_config_from_hf, load_state_dict_from_hf, push_to_hf_hub
from ._manipulate import model_parameters, named_apply, named_modules, named_modules_with_params, \
group_modules, group_parameters, checkpoint_seq, checkpoint, adapt_input_conv
from ._pretrained import PretrainedCfg, DefaultCfg, filter_pretrained_cfg
from ._prune import adapt_model_from_string
from ._registry import split_model_name_tag, get_arch_name, generate_default_cfgs, register_model, \
register_model_deprecations, model_entrypoint, list_models, list_pretrained, get_deprecated_models, \
is_model, list_modules, is_model_in_modules, is_model_pretrained, get_pretrained_cfg, get_pretrained_cfg_value, \
get_arch_pretrained_cfgs
| pytorch-image-models/timm/models/__init__.py/0 | {
"file_path": "pytorch-image-models/timm/models/__init__.py",
"repo_id": "pytorch-image-models",
"token_count": 1143
} |
""" PyTorch implementation of DualPathNetworks
Based on original MXNet implementation https://github.com/cypw/DPNs with
many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs.
This implementation is compatible with the pretrained weights from cypw's MXNet implementation.
Hacked together by / Copyright 2020 Ross Wightman
"""
from collections import OrderedDict
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import BatchNormAct2d, ConvNormAct, create_conv2d, create_classifier, get_norm_act_layer
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['DPN']
class CatBnAct(nn.Module):
def __init__(self, in_chs, norm_layer=BatchNormAct2d):
super(CatBnAct, self).__init__()
self.bn = norm_layer(in_chs, eps=0.001)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (torch.Tensor)
pass
def forward(self, x):
if isinstance(x, tuple):
x = torch.cat(x, dim=1)
return self.bn(x)
class BnActConv2d(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d):
super(BnActConv2d, self).__init__()
self.bn = norm_layer(in_chs, eps=0.001)
self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups)
def forward(self, x):
return self.conv(self.bn(x))
class DualPathBlock(nn.Module):
def __init__(
self,
in_chs,
num_1x1_a,
num_3x3_b,
num_1x1_c,
inc,
groups,
block_type='normal',
b=False,
):
super(DualPathBlock, self).__init__()
self.num_1x1_c = num_1x1_c
self.inc = inc
self.b = b
if block_type == 'proj':
self.key_stride = 1
self.has_proj = True
elif block_type == 'down':
self.key_stride = 2
self.has_proj = True
else:
assert block_type == 'normal'
self.key_stride = 1
self.has_proj = False
self.c1x1_w_s1 = None
self.c1x1_w_s2 = None
if self.has_proj:
# Using different member names here to allow easier parameter key matching for conversion
if self.key_stride == 2:
self.c1x1_w_s2 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2)
else:
self.c1x1_w_s1 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1)
self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1)
self.c3x3_b = BnActConv2d(
in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups)
if b:
self.c1x1_c = CatBnAct(in_chs=num_3x3_b)
self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1)
self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1)
else:
self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1)
self.c1x1_c1 = None
self.c1x1_c2 = None
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
pass
def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]:
if isinstance(x, tuple):
x_in = torch.cat(x, dim=1)
else:
x_in = x
if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None:
# self.has_proj == False, torchscript requires condition on module == None
x_s1 = x[0]
x_s2 = x[1]
else:
# self.has_proj == True
if self.c1x1_w_s1 is not None:
# self.key_stride = 1
x_s = self.c1x1_w_s1(x_in)
else:
# self.key_stride = 2
x_s = self.c1x1_w_s2(x_in)
x_s1 = x_s[:, :self.num_1x1_c, :, :]
x_s2 = x_s[:, self.num_1x1_c:, :, :]
x_in = self.c1x1_a(x_in)
x_in = self.c3x3_b(x_in)
x_in = self.c1x1_c(x_in)
if self.c1x1_c1 is not None:
# self.b == True, using None check for torchscript compat
out1 = self.c1x1_c1(x_in)
out2 = self.c1x1_c2(x_in)
else:
out1 = x_in[:, :self.num_1x1_c, :, :]
out2 = x_in[:, self.num_1x1_c:, :, :]
resid = x_s1 + out1
dense = torch.cat([x_s2, out2], dim=1)
return resid, dense
class DPN(nn.Module):
def __init__(
self,
k_sec=(3, 4, 20, 3),
inc_sec=(16, 32, 24, 128),
k_r=96,
groups=32,
num_classes=1000,
in_chans=3,
output_stride=32,
global_pool='avg',
small=False,
num_init_features=64,
b=False,
drop_rate=0.,
norm_layer='batchnorm2d',
act_layer='relu',
fc_act_layer='elu',
):
super(DPN, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.b = b
assert output_stride == 32 # FIXME look into dilation support
norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=act_layer), eps=.001)
fc_norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=fc_act_layer), eps=.001, inplace=False)
bw_factor = 1 if small else 4
blocks = OrderedDict()
# conv1
blocks['conv1_1'] = ConvNormAct(
in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer)
blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')]
# conv2
bw = 64 * bw_factor
inc = inc_sec[0]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[0] + 1):
blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')]
# conv3
bw = 128 * bw_factor
inc = inc_sec[1]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[1] + 1):
blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')]
# conv4
bw = 256 * bw_factor
inc = inc_sec[2]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[2] + 1):
blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')]
# conv5
bw = 512 * bw_factor
inc = inc_sec[3]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[3] + 1):
blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')]
blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer)
self.num_features = self.head_hidden_size = in_chs
self.features = nn.Sequential(blocks)
# Using 1x1 conv for the FC layer to allow the extra pooling scheme
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, use_conv=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^features\.conv1',
blocks=[
(r'^features\.conv(\d+)' if coarse else r'^features\.conv(\d+)_(\d+)', None),
(r'^features\.conv5_bn_ac', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, use_conv=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
def forward_features(self, x):
return self.features(x)
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
if pre_logits:
return self.flatten(x)
x = self.classifier(x)
return self.flatten(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_dpn(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
DPN,
variant,
pretrained,
feature_cfg=dict(feature_concat=True, flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD,
'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'dpn48b.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'dpn68.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn68b.ra_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'dpn68b.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn92.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn98.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn131.mx_in1k': _cfg(hf_hub_id='timm/'),
'dpn107.mx_in1k': _cfg(hf_hub_id='timm/')
})
@register_model
def dpn48b(pretrained=False, **kwargs) -> DPN:
model_args = dict(
small=True, num_init_features=10, k_r=128, groups=32,
b=True, k_sec=(3, 4, 6, 3), inc_sec=(16, 32, 32, 64), act_layer='silu')
return _create_dpn('dpn48b', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn68(pretrained=False, **kwargs) -> DPN:
model_args = dict(
small=True, num_init_features=10, k_r=128, groups=32,
k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64))
return _create_dpn('dpn68', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn68b(pretrained=False, **kwargs) -> DPN:
model_args = dict(
small=True, num_init_features=10, k_r=128, groups=32,
b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64))
return _create_dpn('dpn68b', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn92(pretrained=False, **kwargs) -> DPN:
model_args = dict(
num_init_features=64, k_r=96, groups=32,
k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128))
return _create_dpn('dpn92', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn98(pretrained=False, **kwargs) -> DPN:
model_args = dict(
num_init_features=96, k_r=160, groups=40,
k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128))
return _create_dpn('dpn98', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn131(pretrained=False, **kwargs) -> DPN:
model_args = dict(
num_init_features=128, k_r=160, groups=40,
k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128))
return _create_dpn('dpn131', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def dpn107(pretrained=False, **kwargs) -> DPN:
model_args = dict(
num_init_features=128, k_r=200, groups=50,
k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128))
return _create_dpn('dpn107', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/dpn.py/0 | {
"file_path": "pytorch-image-models/timm/models/dpn.py",
"repo_id": "pytorch-image-models",
"token_count": 7004
} |
""" MobileNet V3
A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl.
Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244
Hacked together by / Copyright 2019, Ross Wightman
"""
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import SelectAdaptivePool2d, Linear, LayerType, PadType, create_conv2d, get_norm_act_layer
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._efficientnet_blocks import SqueezeExcite
from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \
round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT
from ._features import FeatureInfo, FeatureHooks, feature_take_indices
from ._manipulate import checkpoint_seq, checkpoint
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['MobileNetV3', 'MobileNetV3Features']
class MobileNetV3(nn.Module):
""" MobiletNet-V3
Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific
'efficient head', where global pooling is done before the head convolution without a final batch-norm
layer before the classifier.
Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244
Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include:
* HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class)
* FBNet-V3 - https://arxiv.org/abs/2006.02049
* LCNet - https://arxiv.org/abs/2109.15099
* MobileNet-V4 - https://arxiv.org/abs/2404.10518
"""
def __init__(
self,
block_args: BlockArgs,
num_classes: int = 1000,
in_chans: int = 3,
stem_size: int = 16,
fix_stem: bool = False,
num_features: int = 1280,
head_bias: bool = True,
head_norm: bool = False,
pad_type: str = '',
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
se_from_exp: bool = True,
round_chs_fn: Callable = round_channels,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
layer_scale_init_value: Optional[float] = None,
global_pool: str = 'avg',
):
"""
Args:
block_args: Arguments for blocks of the network.
num_classes: Number of classes for classification head.
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
num_features: Number of output channels of the conv head layer.
head_bias: If True, add a learnable bias to the conv head layer.
pad_type: Type of padding to use for convolution layers.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
aa_layer: Type of anti-aliasing layer.
se_layer: Type of Squeeze-and-Excite layer.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
round_chs_fn: Callable to round number of filters based on depth multiplier.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
layer_scale_init_value: Enable layer scale on compatible blocks if not None.
global_pool: Type of pooling to use for global pooling features of the FC head.
"""
super(MobileNetV3, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=32,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
self.stage_ends = [f['stage'] for f in self.feature_info]
self.num_features = builder.in_chs # features of last stage, output of forward_features()
self.head_hidden_size = num_features # features of conv_head, pre_logits output
# Head + Pooling
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
num_pooled_chs = self.num_features * self.global_pool.feat_mult()
if head_norm:
# mobilenet-v4 post-pooling PW conv is followed by a norm+act layer
self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type) # never bias
self.norm_head = norm_act_layer(self.head_hidden_size)
self.act2 = nn.Identity()
else:
# mobilenet-v3 and others only have an activation after final PW conv
self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type, bias=head_bias)
self.norm_head = nn.Identity()
self.act2 = act_layer(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1]
layers.extend(self.blocks)
layers.extend([self.global_pool, self.conv_head, self.norm_head, self.act2])
layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse: bool = False):
return dict(
stem=r'^conv_stem|bn1',
blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
# NOTE: cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
extra_blocks: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
if stop_early:
assert intermediates_only, 'Must use intermediates_only for early stopping.'
intermediates = []
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
take_indices = [self.stage_ends[i] for i in take_indices]
max_index = self.stage_ends[max_index]
# forward pass
feat_idx = 0 # stem is index 0
x = self.conv_stem(x)
x = self.bn1(x)
if feat_idx in take_indices:
intermediates.append(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index]
for blk in blocks:
feat_idx += 1
x = blk(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
extra_blocks: bool = False,
):
""" Prune layers not required for specified intermediates.
"""
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
max_index = self.stage_ends[max_index]
self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0
if max_index < len(self.blocks):
self.conv_head = nn.Identity()
self.norm_head = nn.Identity()
if prune_head:
self.conv_head = nn.Identity()
self.norm_head = nn.Identity()
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv_stem(x)
x = self.bn1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
x = self.global_pool(x)
x = self.conv_head(x)
x = self.norm_head(x)
x = self.act2(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
if pre_logits:
return x
return self.classifier(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
class MobileNetV3Features(nn.Module):
""" MobileNetV3 Feature Extractor
A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation
and object detection models.
"""
def __init__(
self,
block_args: BlockArgs,
out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4),
feature_location: str = 'bottleneck',
in_chans: int = 3,
stem_size: int = 16,
fix_stem: bool = False,
output_stride: int = 32,
pad_type: PadType = '',
round_chs_fn: Callable = round_channels,
se_from_exp: bool = True,
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
layer_scale_init_value: Optional[float] = None,
):
"""
Args:
block_args: Arguments for blocks of the network.
out_indices: Output from stages at indices.
feature_location: Location of feature before/after each block, must be in ['bottleneck', 'expansion']
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
output_stride: Output stride of the network.
pad_type: Type of padding to use for convolution layers.
round_chs_fn: Callable to round number of filters based on depth multiplier.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
se_layer: Type of Squeeze-and-Excite layer.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
layer_scale_init_value: Enable layer scale on compatible blocks if not None.
"""
super(MobileNetV3Features, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
se_layer = se_layer or SqueezeExcite
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size)
self.act1 = act_layer(inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
feature_location=feature_location,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = FeatureInfo(builder.features, out_indices)
self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()}
efficientnet_init_weights(self)
# Register feature extraction hooks with FeatureHooks helper
self.feature_hooks = None
if feature_location != 'bottleneck':
hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.feature_hooks is None:
features = []
if 0 in self._stage_out_idx:
features.append(x) # add stem out
for i, b in enumerate(self.blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(b, x)
else:
x = b(x)
if i + 1 in self._stage_out_idx:
features.append(x)
return features
else:
self.blocks(x)
out = self.feature_hooks.get_output(x.device)
return list(out.values())
def _create_mnv3(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV3:
features_mode = ''
model_cls = MobileNetV3
kwargs_filter = None
if kwargs.pop('features_only', False):
if 'feature_cfg' in kwargs or 'feature_cls' in kwargs:
features_mode = 'cfg'
else:
kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'head_norm', 'global_pool')
model_cls = MobileNetV3Features
features_mode = 'cls'
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
features_only=features_mode == 'cfg',
pretrained_strict=features_mode != 'cls',
kwargs_filter=kwargs_filter,
**kwargs,
)
if features_mode == 'cls':
model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
return model
def _gen_mobilenet_v3_rw(
variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs
) -> MobileNetV3:
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
head_bias=False,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'),
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v3(
variant: str, channel_multiplier: float = 1.0, depth_multiplier: float = 1.0,
group_size=None, pretrained: bool = False, **kwargs
) -> MobileNetV3:
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
if 'small' in variant:
num_features = 1024
if 'minimal' in variant:
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s2_e1_c16'],
# stage 1, 56x56 in
['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'],
# stage 2, 28x28 in
['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'],
# stage 3, 14x14 in
['ir_r2_k3_s1_e3_c48'],
# stage 4, 14x14in
['ir_r3_k3_s2_e6_c96'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c576'],
]
else:
act_layer = resolve_act_layer(kwargs, 'hard_swish')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu
# stage 1, 56x56 in
['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu
# stage 2, 28x28 in
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish
# stage 3, 14x14 in
['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish
# stage 4, 14x14in
['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c576'], # hard-swish
]
else:
num_features = 1280
if 'minimal' in variant:
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16'],
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'],
# stage 2, 56x56 in
['ir_r3_k3_s2_e3_c40'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'],
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112'],
# stage 5, 14x14in
['ir_r3_k3_s2_e6_c160'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'],
]
else:
act_layer = resolve_act_layer(kwargs, 'hard_swish')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, group_size=group_size),
num_features=num_features,
stem_size=16,
fix_stem=channel_multiplier < 0.75,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
se_layer=se_layer,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_fbnetv3(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs):
""" FBNetV3
Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining`
- https://arxiv.org/abs/2006.02049
FIXME untested, this is a preliminary impl of some FBNet-V3 variants.
"""
vl = variant.split('_')[-1]
if vl in ('a', 'b'):
stem_size = 16
arch_def = [
['ds_r2_k3_s1_e1_c16'],
['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'],
['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'],
['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'],
['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'],
['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'],
['cn_r1_k1_s1_c1344'],
]
elif vl == 'd':
stem_size = 24
arch_def = [
['ds_r2_k3_s1_e1_c16'],
['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'],
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'],
['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'],
['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'],
['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'],
['cn_r1_k1_s1_c1440'],
]
elif vl == 'g':
stem_size = 32
arch_def = [
['ds_r3_k3_s1_e1_c24'],
['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'],
['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'],
['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'],
['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'],
['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'],
['cn_r1_k1_s1_c1728'],
]
else:
raise NotImplemented
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95)
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn)
act_layer = resolve_act_layer(kwargs, 'hard_swish')
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=1984,
head_bias=False,
stem_size=stem_size,
round_chs_fn=round_chs_fn,
se_from_exp=False,
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
se_layer=se_layer,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_lcnet(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs):
""" LCNet
Essentially a MobileNet-V3 crossed with a MobileNet-V1
Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['dsa_r1_k3_s1_c32'],
# stage 1, 112x112 in
['dsa_r2_k3_s2_c64'],
# stage 2, 56x56 in
['dsa_r2_k3_s2_c128'],
# stage 3, 28x28 in
['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'],
# stage 4, 14x14in
['dsa_r4_k5_s1_c256'],
# stage 5, 14x14in
['dsa_r2_k5_s2_c512_se0.25'],
# 7x7
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=16,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU),
num_features=1280,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v4(
variant: str, channel_multiplier: float = 1.0, group_size=None, pretrained: bool = False, **kwargs,
) -> MobileNetV3:
"""Creates a MobileNet-V4 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
num_features = 1280
if 'hybrid' in variant:
layer_scale_init_value = 1e-5
if 'medium' in variant:
stem_size = 32
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
[
'er_r1_k3_s2_e4_c48' # FusedIB (EdgeResidual)
],
# stage 1, 56x56 in
[
'uir_r1_a3_k5_s2_e4_c80', # ExtraDW
'uir_r1_a3_k3_s1_e2_c80', # ExtraDW
],
# stage 2, 28x28 in
[
'uir_r1_a3_k5_s2_e6_c160', # ExtraDW
'uir_r1_a0_k0_s1_e2_c160', # FFN
'uir_r1_a3_k3_s1_e4_c160', # ExtraDW
'uir_r1_a3_k5_s1_e4_c160', # ExtraDW
'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample
'uir_r1_a3_k3_s1_e4_c160', # ExtraDW
'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample
'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt
'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample
'uir_r1_a3_k3_s1_e4_c160', # ExtraDW
'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample
'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt
],
# stage 3, 14x14in
[
'uir_r1_a5_k5_s2_e6_c256', # ExtraDW
'uir_r1_a5_k5_s1_e4_c256', # ExtraDW
'uir_r2_a3_k5_s1_e4_c256', # ExtraDW
'uir_r1_a0_k0_s1_e2_c256', # FFN
'uir_r1_a3_k5_s1_e2_c256', # ExtraDW
'uir_r1_a0_k0_s1_e2_c256', # FFN
'uir_r1_a0_k0_s1_e4_c256', # FFN
'mqa_r1_k3_h4_s1_d64_c256', # MQA
'uir_r1_a3_k0_s1_e4_c256', # ConvNeXt
'mqa_r1_k3_h4_s1_d64_c256', # MQA
'uir_r1_a5_k5_s1_e4_c256', # ExtraDW
'mqa_r1_k3_h4_s1_d64_c256', # MQA
'uir_r1_a5_k0_s1_e4_c256', # ConvNeXt
'mqa_r1_k3_h4_s1_d64_c256', # MQA
'uir_r1_a5_k0_s1_e4_c256', # ConvNeXt
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960' # Conv
],
]
elif 'large' in variant:
stem_size = 24
act_layer = resolve_act_layer(kwargs, 'gelu')
arch_def = [
# stage 0, 112x112 in
[
'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual)
],
# stage 1, 56x56 in
[
'uir_r1_a3_k5_s2_e4_c96', # ExtraDW
'uir_r1_a3_k3_s1_e4_c96', # ExtraDW
],
# stage 2, 28x28 in
[
'uir_r1_a3_k5_s2_e4_c192', # ExtraDW
'uir_r3_a3_k3_s1_e4_c192', # ExtraDW
'uir_r1_a3_k5_s1_e4_c192', # ExtraDW
'uir_r2_a5_k3_s1_e4_c192', # ExtraDW
'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample
'uir_r1_a5_k3_s1_e4_c192', # ExtraDW
'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample
'uir_r1_a5_k3_s1_e4_c192', # ExtraDW
'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample
'uir_r1_a5_k3_s1_e4_c192', # ExtraDW
'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample
'uir_r1_a3_k0_s1_e4_c192', # ConvNeXt
],
# stage 3, 14x14in
[
'uir_r4_a5_k5_s2_e4_c512', # ExtraDW
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'uir_r1_a5_k3_s1_e4_c512', # ExtraDW
'uir_r2_a5_k0_s1_e4_c512', # ConvNeXt
'uir_r1_a5_k3_s1_e4_c512', # ExtraDW
'uir_r1_a5_k5_s1_e4_c512', # ExtraDW
'mqa_r1_k3_h8_s1_d64_c512', # MQA
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'mqa_r1_k3_h8_s1_d64_c512', # MQA
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'mqa_r1_k3_h8_s1_d64_c512', # MQA
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'mqa_r1_k3_h8_s1_d64_c512', # MQA
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960', # Conv
],
]
else:
assert False, f'Unknown variant {variant}.'
else:
layer_scale_init_value = None
if 'small' in variant:
stem_size = 32
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
[
'cn_r1_k3_s2_e1_c32', # Conv
'cn_r1_k1_s1_e1_c32', # Conv
],
# stage 1, 56x56 in
[
'cn_r1_k3_s2_e1_c96', # Conv
'cn_r1_k1_s1_e1_c64', # Conv
],
# stage 2, 28x28 in
[
'uir_r1_a5_k5_s2_e3_c96', # ExtraDW
'uir_r4_a0_k3_s1_e2_c96', # IR
'uir_r1_a3_k0_s1_e4_c96', # ConvNeXt
],
# stage 3, 14x14 in
[
'uir_r1_a3_k3_s2_e6_c128', # ExtraDW
'uir_r1_a5_k5_s1_e4_c128', # ExtraDW
'uir_r1_a0_k5_s1_e4_c128', # IR
'uir_r1_a0_k5_s1_e3_c128', # IR
'uir_r2_a0_k3_s1_e4_c128', # IR
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960', # Conv
],
]
elif 'medium' in variant:
stem_size = 32
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
[
'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual)
],
# stage 1, 56x56 in
[
'uir_r1_a3_k5_s2_e4_c80', # ExtraDW
'uir_r1_a3_k3_s1_e2_c80', # ExtraDW
],
# stage 2, 28x28 in
[
'uir_r1_a3_k5_s2_e6_c160', # ExtraDW
'uir_r2_a3_k3_s1_e4_c160', # ExtraDW
'uir_r1_a3_k5_s1_e4_c160', # ExtraDW
'uir_r1_a3_k3_s1_e4_c160', # ExtraDW
'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt
'uir_r1_a0_k0_s1_e2_c160', # ExtraDW
'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt
],
# stage 3, 14x14in
[
'uir_r1_a5_k5_s2_e6_c256', # ExtraDW
'uir_r1_a5_k5_s1_e4_c256', # ExtraDW
'uir_r2_a3_k5_s1_e4_c256', # ExtraDW
'uir_r1_a0_k0_s1_e4_c256', # FFN
'uir_r1_a3_k0_s1_e4_c256', # ConvNeXt
'uir_r1_a3_k5_s1_e2_c256', # ExtraDW
'uir_r1_a5_k5_s1_e4_c256', # ExtraDW
'uir_r2_a0_k0_s1_e4_c256', # FFN
'uir_r1_a5_k0_s1_e2_c256', # ConvNeXt
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960', # Conv
],
]
elif 'large' in variant:
stem_size = 24
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
[
'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual)
],
# stage 1, 56x56 in
[
'uir_r1_a3_k5_s2_e4_c96', # ExtraDW
'uir_r1_a3_k3_s1_e4_c96', # ExtraDW
],
# stage 2, 28x28 in
[
'uir_r1_a3_k5_s2_e4_c192', # ExtraDW
'uir_r3_a3_k3_s1_e4_c192', # ExtraDW
'uir_r1_a3_k5_s1_e4_c192', # ExtraDW
'uir_r5_a5_k3_s1_e4_c192', # ExtraDW
'uir_r1_a3_k0_s1_e4_c192', # ConvNeXt
],
# stage 3, 14x14in
[
'uir_r4_a5_k5_s2_e4_c512', # ExtraDW
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'uir_r1_a5_k3_s1_e4_c512', # ExtraDW
'uir_r2_a5_k0_s1_e4_c512', # ConvNeXt
'uir_r1_a5_k3_s1_e4_c512', # ExtraDW
'uir_r1_a5_k5_s1_e4_c512', # ExtraDW
'uir_r3_a5_k0_s1_e4_c512', # ConvNeXt
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960', # Conv
],
]
else:
assert False, f'Unknown variant {variant}.'
model_kwargs = dict(
block_args=decode_arch_def(arch_def, group_size=group_size),
head_bias=False,
head_norm=True,
num_features=num_features,
stem_size=stem_size,
fix_stem=channel_multiplier < 1.0,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
layer_scale_init_value=layer_scale_init_value,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _cfg(url: str = '', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'mobilenetv3_large_075.untrained': _cfg(url=''),
'mobilenetv3_large_100.ra_in1k': _cfg(
interpolation='bicubic',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth',
hf_hub_id='timm/'),
'mobilenetv3_large_100.ra4_e3600_r224_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0),
'mobilenetv3_large_100.miil_in21k_ft_in1k': _cfg(
interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.),
origin_url='https://github.com/Alibaba-MIIL/ImageNet21K',
paper_ids='arXiv:2104.10972v4',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_1k_miil_78_0-66471c13.pth',
hf_hub_id='timm/'),
'mobilenetv3_large_100.miil_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_in21k_miil-d71cc17b.pth',
hf_hub_id='timm/',
origin_url='https://github.com/Alibaba-MIIL/ImageNet21K',
paper_ids='arXiv:2104.10972v4',
interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), num_classes=11221),
'mobilenetv3_large_150d.ra4_e3600_r256_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8), test_input_size=(3, 320, 320), test_crop_pct=1.0),
'mobilenetv3_small_050.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_small_075.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_small_100.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_rw.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'tf_mobilenetv3_large_075.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_minimal_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_075.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_100.in1k': _cfg(
url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_minimal_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'fbnetv3_b.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth',
hf_hub_id='timm/',
test_input_size=(3, 256, 256), crop_pct=0.95),
'fbnetv3_d.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth',
hf_hub_id='timm/',
test_input_size=(3, 256, 256), crop_pct=0.95),
'fbnetv3_g.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95, pool_size=(8, 8)),
"lcnet_035.untrained": _cfg(),
"lcnet_050.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_075.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_100.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_150.untrained": _cfg(),
'mobilenetv4_conv_small_035.untrained': _cfg(
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_small_050.e3000_r224_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_small.e2400_r224_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_small.e1200_r224_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_small.e3600_r256_in1k': _cfg(
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95,
test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_medium.e500_r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_medium.e500_r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_medium.e250_r384_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_medium.e180_r384_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_medium.e180_ad_r384_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_medium.e250_r384_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_large.e600_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_large.e500_r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.ix_e550_r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.ix_e550_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.e500_r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.e200_r256_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_large.ix_e600_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_large.e600_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
# experimental
'mobilenetv4_conv_aa_medium.untrained': _cfg(
# hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_blur_medium.e500_r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14),
crop_pct=0.95, test_input_size=(3, 544, 544), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_aa_large.e600_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_aa_large.e230_r384_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium_075.untrained': _cfg(
# hf_hub_id='timm/',
crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_hybrid_large_075.untrained': _cfg(
# hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'),
})
@register_model
def mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_large_150d(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_150d', 1.5, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_050(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.50, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_rw(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_b(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-B """
model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_d(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-D """
model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_g(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-G """
model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_035(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 0.35"""
model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_050(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 0.5"""
model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.0"""
model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.0"""
model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_150(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.5"""
model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_small_035(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 """
model = _gen_mobilenet_v4('mobilenetv4_conv_small_035', 0.35, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_small_050(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 """
model = _gen_mobilenet_v4('mobilenetv4_conv_small_050', 0.50, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_small(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 """
model = _gen_mobilenet_v4('mobilenetv4_conv_small', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_medium(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 """
model = _gen_mobilenet_v4('mobilenetv4_conv_medium', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_large(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 """
model = _gen_mobilenet_v4('mobilenetv4_conv_large', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_hybrid_medium(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Hybrid """
model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_hybrid_large(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Hybrid"""
model = _gen_mobilenet_v4('mobilenetv4_hybrid_large', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_aa_medium(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 w/ AvgPool AA """
model = _gen_mobilenet_v4('mobilenetv4_conv_aa_medium', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs)
return model
@register_model
def mobilenetv4_conv_blur_medium(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Conv w/ Blur AA """
model = _gen_mobilenet_v4('mobilenetv4_conv_blur_medium', 1.0, pretrained=pretrained, aa_layer='blurpc', **kwargs)
return model
@register_model
def mobilenetv4_conv_aa_large(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 w/ AvgPool AA """
model = _gen_mobilenet_v4('mobilenetv4_conv_aa_large', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs)
return model
@register_model
def mobilenetv4_hybrid_medium_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Hybrid """
model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_hybrid_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Hybrid"""
model = _gen_mobilenet_v4('mobilenetv4_hybrid_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
register_model_deprecations(__name__, {
'mobilenetv3_large_100_miil': 'mobilenetv3_large_100.miil_in21k_ft_in1k',
'mobilenetv3_large_100_miil_in21k': 'mobilenetv3_large_100.miil_in21k',
})
| pytorch-image-models/timm/models/mobilenetv3.py/0 | {
"file_path": "pytorch-image-models/timm/models/mobilenetv3.py",
"repo_id": "pytorch-image-models",
"token_count": 31009
} |
""" ResNeSt Models
Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955
Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang
Modified for torchscript compat, and consistency with timm by Ross Wightman
"""
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SplitAttn
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .resnet import ResNet
class ResNestBottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
radix=1,
cardinality=1,
base_width=64,
avd=False,
avd_first=False,
is_first=False,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None,
):
super(ResNestBottleneck, self).__init__()
assert reduce_first == 1 # not supported
assert attn_layer is None # not supported
assert aa_layer is None # TODO not yet supported
assert drop_path is None # TODO not yet supported
group_width = int(planes * (base_width / 64.)) * cardinality
first_dilation = first_dilation or dilation
if avd and (stride > 1 or is_first):
avd_stride = stride
stride = 1
else:
avd_stride = 0
self.radix = radix
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.act1 = act_layer(inplace=True)
self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None
if self.radix >= 1:
self.conv2 = SplitAttn(
group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block)
self.bn2 = nn.Identity()
self.drop_block = nn.Identity()
self.act2 = nn.Identity()
else:
self.conv2 = nn.Conv2d(
group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, bias=False)
self.bn2 = norm_layer(group_width)
self.drop_block = drop_block() if drop_block is not None else nn.Identity()
self.act2 = act_layer(inplace=True)
self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None
self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes*4)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
def zero_init_last(self):
if getattr(self.bn3, 'weight', None) is not None:
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
if self.avd_first is not None:
out = self.avd_first(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.drop_block(out)
out = self.act2(out)
if self.avd_last is not None:
out = self.avd_last(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out += shortcut
out = self.act3(out)
return out
def _create_resnest(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet,
variant,
pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'resnest14d.gluon_in1k': _cfg(hf_hub_id='timm/'),
'resnest26d.gluon_in1k': _cfg(hf_hub_id='timm/'),
'resnest50d.in1k': _cfg(hf_hub_id='timm/'),
'resnest101e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8)),
'resnest200e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'),
'resnest269e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'),
'resnest50d_4s2x40d.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'resnest50d_1s4x24d.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic')
})
@register_model
def resnest14d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-14d model. Weights ported from GluonCV.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[1, 1, 1, 1],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest14d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest26d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-26d model. Weights ported from GluonCV.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[2, 2, 2, 2],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest26d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest50d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest101e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 23, 3],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest101e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest200e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 24, 36, 3],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest200e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest269e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 30, 48, 8],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest269e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d_4s2x40d(pretrained=False, **kwargs) -> ResNet:
"""ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2,
block_args=dict(radix=4, avd=True, avd_first=True))
return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d_1s4x24d(pretrained=False, **kwargs) -> ResNet:
"""ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4,
block_args=dict(radix=1, avd=True, avd_first=True))
return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
| pytorch-image-models/timm/models/resnest.py/0 | {
"file_path": "pytorch-image-models/timm/models/resnest.py",
"repo_id": "pytorch-image-models",
"token_count": 4439
} |
""" Visformer
Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533
From original at https://github.com/danczs/Visformer
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['Visformer']
class SpatialMlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.,
group=8,
spatial_conv=False,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = to_2tuple(drop)
self.in_features = in_features
self.out_features = out_features
self.spatial_conv = spatial_conv
if self.spatial_conv:
if group < 2: # net setting
hidden_features = in_features * 5 // 6
else:
hidden_features = in_features * 2
self.hidden_features = hidden_features
self.group = group
self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False)
self.act1 = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
if self.spatial_conv:
self.conv2 = nn.Conv2d(
hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False)
self.act2 = act_layer()
else:
self.conv2 = None
self.act2 = None
self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False)
self.drop3 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.drop1(x)
if self.conv2 is not None:
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop3(x)
return x
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = round(dim // num_heads * head_dim_ratio)
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn(experimental=True)
self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, C, H, W = x.shape
x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3)
q, k, v = x.unbind(0)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q.contiguous(), k.contiguous(), v.contiguous(),
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
head_dim_ratio=1.,
mlp_ratio=4.,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=LayerNorm2d,
group=8,
attn_disabled=False,
spatial_conv=False,
):
super().__init__()
self.spatial_conv = spatial_conv
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if attn_disabled:
self.norm1 = None
self.attn = None
else:
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
head_dim_ratio=head_dim_ratio,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.mlp = SpatialMlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
group=group,
spatial_conv=spatial_conv,
)
def forward(self, x):
if self.attn is not None:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Visformer(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
init_channels=32,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4.,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm2d,
attn_stage='111',
use_pos_embed=True,
spatial_conv='111',
vit_stem=False,
group=8,
global_pool='avg',
conv_init=False,
embed_norm=None,
):
super().__init__()
img_size = to_2tuple(img_size)
self.num_classes = num_classes
self.embed_dim = embed_dim
self.init_channels = init_channels
self.img_size = img_size
self.vit_stem = vit_stem
self.conv_init = conv_init
if isinstance(depth, (list, tuple)):
self.stage_num1, self.stage_num2, self.stage_num3 = depth
depth = sum(depth)
else:
self.stage_num1 = self.stage_num3 = depth // 3
self.stage_num2 = depth - self.stage_num1 - self.stage_num3
self.use_pos_embed = use_pos_embed
self.grad_checkpointing = False
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# stage 1
if self.vit_stem:
self.stem = None
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // patch_size for x in img_size]
else:
if self.init_channels is None:
self.stem = None
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 2,
in_chans=in_chans,
embed_dim=embed_dim // 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 2) for x in img_size]
else:
self.stem = nn.Sequential(
nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(self.init_channels),
nn.ReLU(inplace=True)
)
img_size = [x // 2 for x in img_size]
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 4,
in_chans=self.init_channels,
embed_dim=embed_dim // 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 4) for x in img_size]
if self.use_pos_embed:
if self.vit_stem:
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
else:
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
else:
self.pos_embed1 = None
self.stage1 = nn.Sequential(*[
Block(
dim=embed_dim//2,
num_heads=num_heads,
head_dim_ratio=0.5,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[0] == '0'),
spatial_conv=(spatial_conv[0] == '1'),
)
for i in range(self.stage_num1)
])
# stage2
if not self.vit_stem:
self.patch_embed2 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 8,
in_chans=embed_dim // 2,
embed_dim=embed_dim,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 8) for x in img_size]
if self.use_pos_embed:
self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
else:
self.pos_embed2 = None
else:
self.patch_embed2 = None
self.stage2 = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=num_heads,
head_dim_ratio=1.0,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[1] == '0'),
spatial_conv=(spatial_conv[1] == '1'),
)
for i in range(self.stage_num1, self.stage_num1+self.stage_num2)
])
# stage 3
if not self.vit_stem:
self.patch_embed3 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 8,
in_chans=embed_dim,
embed_dim=embed_dim * 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 8) for x in img_size]
if self.use_pos_embed:
self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size))
else:
self.pos_embed3 = None
else:
self.patch_embed3 = None
self.stage3 = nn.Sequential(*[
Block(
dim=embed_dim * 2,
num_heads=num_heads,
head_dim_ratio=1.0,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[2] == '0'),
spatial_conv=(spatial_conv[2] == '1'),
)
for i in range(self.stage_num1+self.stage_num2, depth)
])
self.num_features = self.head_hidden_size = embed_dim if self.vit_stem else embed_dim * 2
self.norm = norm_layer(self.num_features)
# head
global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
self.global_pool = global_pool
self.head_drop = nn.Dropout(drop_rate)
self.head = head
# weights init
if self.use_pos_embed:
trunc_normal_(self.pos_embed1, std=0.02)
if not self.vit_stem:
trunc_normal_(self.pos_embed2, std=0.02)
trunc_normal_(self.pos_embed3, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
if self.conv_init:
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
else:
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^patch_embed1|pos_embed1|stem', # stem and embed
blocks=[
(r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None),
(r'^(?:patch_embed|pos_embed)(\d+)', (0,)),
(r'^norm', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
if self.stem is not None:
x = self.stem(x)
# stage 1
x = self.patch_embed1(x)
if self.pos_embed1 is not None:
x = self.pos_drop(x + self.pos_embed1)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage1, x)
else:
x = self.stage1(x)
# stage 2
if self.patch_embed2 is not None:
x = self.patch_embed2(x)
if self.pos_embed2 is not None:
x = self.pos_drop(x + self.pos_embed2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage2, x)
else:
x = self.stage2(x)
# stage3
if self.patch_embed3 is not None:
x = self.patch_embed3(x)
if self.pos_embed3 is not None:
x = self.pos_drop(x + self.pos_embed3)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage3, x)
else:
x = self.stage3(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'),
'visformer_small.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def visformer_tiny(pretrained=False, **kwargs) -> Visformer:
model_cfg = dict(
init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8,
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
embed_norm=nn.BatchNorm2d)
model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def visformer_small(pretrained=False, **kwargs) -> Visformer:
model_cfg = dict(
init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8,
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
embed_norm=nn.BatchNorm2d)
model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
# @register_model
# def visformer_net1(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net2(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net3(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net4(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net5(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net6(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net7(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000',
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
| pytorch-image-models/timm/models/visformer.py/0 | {
"file_path": "pytorch-image-models/timm/models/visformer.py",
"repo_id": "pytorch-image-models",
"token_count": 10151
} |
""" Adafactor Optimizer
Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py
Modified by Ross Wightman to fix some issues with factorization dims for non nn.Linear layers
Original header/copyright below.
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional, Tuple
import torch
from ._types import ParamsT
class Adafactor(torch.optim.Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate depending on the
*scale_parameter*, *relative_step* and *warmup_init* options.
To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
`relative_step=False`.
Ags:
params: iterable of parameters to optimize or dicts defining parameter groups
lr: external learning rate
eps: regularization constants for square gradient and parameter scale respectively
eps_scale: regularization constants for parameter scale respectively
clip_threshold: threshold of root-mean-square of final gradient update
decay_rate: coefficient used to compute running averages of square gradient
beta1: coefficient used for computing running averages of gradient
weight_decay: weight decay
scale_parameter: if True, learning rate is scaled by root-mean-square of parameter
warmup_init: time-dependent learning rate computation depends on whether warm-up initialization is being used
"""
def __init__(
self,
params: ParamsT,
lr: Optional[float] = None,
eps: float = 1e-30,
eps_scale: float = 1e-3,
clip_threshold: float = 1.0,
decay_rate: float = -0.8,
betas: Optional[Tuple[float, float]] = None,
weight_decay: float = 0.0,
scale_parameter: bool = True,
warmup_init: bool = False,
min_dim_size_to_factor: int = 16,
caution: bool = False,
):
relative_step = not lr
if warmup_init and not relative_step:
raise ValueError('warmup_init requires relative_step=True')
beta1 = None if betas is None else betas[0] # make it compat with standard betas arg
defaults = dict(
lr=lr,
eps=eps,
eps_scale=eps_scale,
clip_threshold=clip_threshold,
decay_rate=decay_rate,
beta1=beta1,
weight_decay=weight_decay,
scale_parameter=scale_parameter,
relative_step=relative_step,
warmup_init=warmup_init,
min_dim_size_to_factor=min_dim_size_to_factor,
caution=caution,
)
super(Adafactor, self).__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('caution', False)
group.setdefault('min_dim_size_to_factor', 16)
@staticmethod
def _get_lr(param_group, param_state):
if param_group['relative_step']:
min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2
lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps_scale'], param_state['RMS'])
param_group['lr'] = lr_t * param_scale
return param_group['lr']
@staticmethod
def _get_options(param_group, param_shape, min_size_to_factor=16):
use_first_moment = param_group['beta1'] is not None
factored = None
ndim = len(param_shape)
# Use a simple heuristic to pick factorization row & col, note other PyTorch impl tend to
# always use -2, -1 BUT this will not pick correct dims for convolutions. This is a simple
# approach that should work in most cases, compare to the slightly more involved approach
# in AdafactorBigVision that sorts dims by size, please report if wrong dims chosen.
if ndim > 2 and param_shape[0] > min_size_to_factor and param_shape[1] > min_size_to_factor:
# nD convs in torch are ND + 2 dim weights with leading in/out chs
factored = 0, 1
elif ndim >= 2 and param_shape[-2] > min_size_to_factor and param_shape[-1] > min_size_to_factor:
# if the criteria above didn't match, test trailing dims for eligibility as per original impl
factored = ndim - 2, ndim - 1
return factored, use_first_moment
@staticmethod
def _rms(tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, dim_col, dim_row):
# from our dim heuristic, always dim_col < dim_row, so col reduction dim for factored row = dim_col
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=dim_col, keepdim=True)).rsqrt_().unsqueeze(dim_row)
c_factor = exp_avg_sq_col.unsqueeze(dim_col).rsqrt()
return torch.mul(r_factor, c_factor)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
factored_dims, use_first_moment = self._get_options(
group,
grad.shape,
min_size_to_factor=group['min_dim_size_to_factor'],
)
# State Initialization
if len(state) == 0:
state['step'] = 0
if use_first_moment:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(grad)
if factored_dims is not None:
dim_col, dim_row = factored_dims
def _remove_dim(shape, dim):
return shape[:dim] + shape[dim + 1:]
state['exp_avg_sq_row'] = torch.zeros(_remove_dim(grad.shape, dim_row)).to(grad)
state['exp_avg_sq_col'] = torch.zeros(_remove_dim(grad.shape, dim_col)).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored_dims is not None:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_fp32 = p
if p.dtype in {torch.float16, torch.bfloat16}:
p_fp32 = p_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_fp32)
lr_t = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = grad ** 2 + group['eps']
if factored_dims is not None:
dim_col, dim_row = factored_dims
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=dim_row), alpha=1.0 - beta2t)
exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=dim_col), alpha=1.0 - beta2t)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, dim_col, dim_row)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(lr_t)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1'])
if group['caution']:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
update = exp_avg * mask
else:
update = exp_avg
if group['weight_decay'] != 0:
p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t)
p_fp32.add_(-update)
if p.dtype in {torch.float16, torch.bfloat16}:
p.copy_(p_fp32)
return loss
| pytorch-image-models/timm/optim/adafactor.py/0 | {
"file_path": "pytorch-image-models/timm/optim/adafactor.py",
"repo_id": "pytorch-image-models",
"token_count": 4921
} |
""" NAdamW Optimizer
Based on simplified algorithm in https://github.com/mlcommons/algorithmic-efficiency/tree/main/baselines/nadamw
Added multi-tensor (foreach) path.
"""
import math
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from ._types import ParamsT
# Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py.
class NAdamW(torch.optim.Optimizer):
""" Implements NAdamW algorithm.
See Table 1 in https://arxiv.org/abs/1910.05446 for the implementation of
the NAdam algorithm (there is also a comment in the code which highlights
the only difference of NAdamW and AdamW).
For further details regarding the algorithm we refer to
- Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101
- On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ
Args:
params: iterable of parameters to optimize or dicts defining parameter groups
lr: learning rate
betas: coefficients used for computing running averages of gradient and its square
eps: term added to the denominator to improve numerical stability
weight_decay: weight decay coefficient
caution: enable caution
"""
def __init__(
self,
params: ParamsT,
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 1e-2,
caution: bool = False,
maximize: bool = False,
foreach: Optional[bool] = None,
capturable: bool = False,
):
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
caution=caution,
foreach=foreach,
maximize=maximize,
capturable=capturable,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
for group in self.param_groups:
group.setdefault('caution', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('NAdamW does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = torch.tensor(0.)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
state_steps.append(state['step'])
nadamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
caution=group['caution'],
maximize=group['maximize'],
capturable=group['capturable'],
)
return loss
def nadamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
foreach: Optional[bool] = None,
capturable: bool = False,
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
caution: bool,
maximize: bool,
) -> None:
r"""Functional API that performs NAdamW algorithm computation.
See NAdamW class for details.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError(
'API has changed, `state_steps` argument must contain a list of' +
' singleton tensors')
if foreach is None:
try:
# cannot do foreach if this overload doesn't exist when caution enabled
foreach = not caution or 'Scalar' in torch.ops.aten._foreach_maximum_.overloads()
except:
foreach = False
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_nadamw
else:
func = _single_tensor_nadamw
func(
params,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
eps=eps,
caution=caution,
maximize=maximize,
capturable=capturable,
)
def _single_tensor_nadamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
caution: bool,
maximize: bool,
capturable: bool
):
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# Update step.
step_t += 1
# Perform stepweight decay.
param.mul_(1. - lr * weight_decay)
# Decay the first and second moment running average coefficient.
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if capturable:
step = step_t
# 1 - beta1 ** step can't be captured in a CUDA graph, even if step is a CUDA tensor
# (incurs "RuntimeError: CUDA error: operation not permitted when stream is capturing")
bias_correction1 = 1 - torch.pow(beta1, step)
bias_correction2 = 1 - torch.pow(beta2, step)
step_size = lr / bias_correction1
step_size_neg = step_size.neg()
bias_correction2_sqrt = bias_correction2.sqrt()
# Only difference between NAdamW and AdamW in this implementation.
# The official PyTorch implementation of NAdam uses a different algorithm.
exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1)
denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
# FIXME not 100% sure if this remains capturable?
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg.mul_(mask)
param.addcdiv_(exp_avg, denom)
else:
step = step_t.item()
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
step_size = lr / bias_correction1
bias_correction2_sqrt = math.sqrt(bias_correction2)
# Apply Nesterov. Only difference between NAdamW and AdamW in this implementation.
# The official PyTorch implementation of NAdam uses a different algorithm.
exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1)
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg.mul_(mask)
param.addcdiv_(exp_avg, denom, value=-step_size)
def _multi_tensor_nadamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
caution: bool,
maximize: bool,
capturable: bool,
):
if len(params) == 0:
return
if capturable:
assert all(
p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)
), "If capturable=True, params and state_steps must be CUDA tensors."
if maximize:
grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment]
grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads]
exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs]
exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs]
params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params]
# update steps
torch._foreach_add_(state_steps, 1)
# Perform stepweight decay
torch._foreach_mul_(params, 1 - lr * weight_decay)
# Decay the first and second moment running average coefficient
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1)
torch._foreach_mul_(exp_avg_sqs, beta2)
torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2)
if capturable:
# TODO: use foreach_pow if/when foreach_pow is added
bias_correction1 = [torch.pow(beta1, step) for step in state_steps]
bias_correction2 = [torch.pow(beta2, step) for step in state_steps]
# foreach_sub doesn't allow a scalar as the first arg
torch._foreach_sub_(bias_correction1, 1)
torch._foreach_sub_(bias_correction2, 1)
torch._foreach_neg_(bias_correction1)
torch._foreach_neg_(bias_correction2)
# foreach_div doesn't allow a scalar as the first arg
step_size = torch._foreach_div(bias_correction1, lr)
torch._foreach_reciprocal_(step_size)
torch._foreach_neg_(step_size)
bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2)
# Only difference between NAdamW and AdamW in this implementation.
# The official PyTorch implementation of NAdam uses a different algorithm.
exp_avgs = torch._foreach_mul(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1)
exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(
exp_avg_sq_sqrt,
torch._foreach_mul(bias_correction2_sqrt, step_size)
)
eps_over_step_size = torch._foreach_div(step_size, eps)
torch._foreach_reciprocal_(eps_over_step_size)
denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
masks = torch._foreach_mul(exp_avgs, grads)
masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)] # capturable?
mask_scale = [m.mean() for m in masks]
torch._foreach_maximum_(mask_scale, 1e-3)
torch._foreach_div_(masks, mask_scale)
torch._foreach_mul_(exp_avgs, masks)
torch._foreach_addcdiv_(params, exp_avgs, denom)
else:
bias_correction1 = [1 - beta1 ** step.item() for step in state_steps]
bias_correction2 = [1 - beta2 ** step.item() for step in state_steps]
step_size = [(lr / bc) * -1 for bc in bias_correction1]
bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2]
# Apply Nesterov. Only difference between NAdamW and AdamW in this implementation.
# The official PyTorch implementation of NAdam uses a different algorithm.
exp_avgs = torch._foreach_mul(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1)
exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
denom = torch._foreach_add(exp_avg_sq_sqrt, eps)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
masks = torch._foreach_mul(exp_avgs, grads)
masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)]
mask_scale = [m.mean() for m in masks]
torch._foreach_maximum_(mask_scale, 1e-3)
torch._foreach_div_(masks, mask_scale)
torch._foreach_mul_(exp_avgs, masks)
torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
| pytorch-image-models/timm/optim/nadamw.py/0 | {
"file_path": "pytorch-image-models/timm/optim/nadamw.py",
"repo_id": "pytorch-image-models",
"token_count": 6905
} |
""" TanH Scheduler
TanH schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2021 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from typing import List
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class TanhLRScheduler(Scheduler):
"""
Hyberbolic-Tangent decay with restarts.
This is described in the paper https://arxiv.org/abs/1806.01593
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lb: float = -7.,
ub: float = 3.,
lr_min: float = 0.,
cycle_mul: float = 1.,
cycle_decay: float = 1.,
cycle_limit: int = 1,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
assert t_initial > 0
assert lr_min >= 0
assert lb < ub
assert cycle_limit >= 0
assert warmup_t >= 0
assert warmup_lr_init >= 0
self.lb = lb
self.ub = ub
self.t_initial = t_initial
self.lr_min = lr_min
self.cycle_mul = cycle_mul
self.cycle_decay = cycle_decay
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
if self.warmup_t:
t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t)
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t: int) -> List[float]:
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.cycle_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul))
t_i = self.cycle_mul ** i * self.t_initial
t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
if i < self.cycle_limit:
gamma = self.cycle_decay ** i
lr_max_values = [v * gamma for v in self.base_values]
tr = t_curr / t_i
lrs = [
self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_cycle_length(self, cycles=0):
cycles = max(1, cycles or self.cycle_limit)
if self.cycle_mul == 1.0:
t = self.t_initial * cycles
else:
t = int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
return t + self.warmup_t if self.warmup_prefix else t
| pytorch-image-models/timm/scheduler/tanh_lr.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/tanh_lr.py",
"repo_id": "pytorch-image-models",
"token_count": 2000
} |
<!---
Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<p align="center">
<!-- Uncomment when CircleCI is set up
<a href="https://circleci.com/gh/huggingface/accelerate"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master"></a>
-->
<a href="https://github.com/huggingface/smolagents/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/huggingface/smolagents.svg?color=blue"></a>
<a href="https://huggingface.co/docs/smolagents"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/smolagents/index.html.svg?down_color=red&down_message=offline&up_message=online"></a>
<a href="https://github.com/huggingface/smolagents/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/smolagents.svg"></a>
<a href="https://github.com/huggingface/smolagents/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
</p>
<h3 align="center">
<div style="display:flex;flex-direction:row;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot.png" alt="Hugging Face mascot as James Bond" width=100px>
<p>smolagents - a smol library to build great agents!</p>
</div>
</h3>
`smolagents` is a library that enables you to run powerful agents in a few lines of code. It offers:
✨ **Simplicity**: the logic for agents fits in 1,000 lines of code (see [agents.py](https://github.com/huggingface/smolagents/blob/main/src/smolagents/agents.py)). We kept abstractions to their minimal shape above raw code!
🧑💻 **First-class support for Code Agents**. Our [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent) writes its actions in code (as opposed to "agents being used to write code"). To make it secure, we support executing in sandboxed environments via [E2B](https://e2b.dev/).
🤗 **Hub integrations**: you can [share/pull tools to/from the Hub](https://huggingface.co/docs/smolagents/reference/tools#smolagents.Tool.from_hub), and more is to come!
🌐 **Model-agnostic**: smolagents supports any LLM. It can be a local `transformers` or `ollama` model, one of [many providers on the Hub](https://huggingface.co/blog/inference-providers), or any model from OpenAI, Anthropic and many others via our [LiteLLM](https://www.litellm.ai/) integration.
👁️ **Modality-agnostic**: Agents support text, vision, video, even audio inputs! Cf [this tutorial](https://huggingface.co/docs/smolagents/examples/web_browser) for vision.
🛠️ **Tool-agnostic**: you can use tools from [LangChain](https://huggingface.co/docs/smolagents/reference/tools#smolagents.Tool.from_langchain), [Anthropic's MCP](https://huggingface.co/docs/smolagents/reference/tools#smolagents.ToolCollection.from_mcp), you can even use a [Hub Space](https://huggingface.co/docs/smolagents/reference/tools#smolagents.Tool.from_space) as a tool.
Full documentation can be found [here](https://huggingface.co/docs/smolagents/index).
> [!NOTE]
> Check the our [launch blog post](https://huggingface.co/blog/smolagents) to learn more about `smolagents`!
## Table of Contents
- [Introduction](#introduction)
- [Quick Demo](#quick-demo)
- [Command Line Interface](#command-line-interface)
- [Code Agents](#code-agents)
- [How smol is this library?](#how-smol-is-this-library)
- [How Strong are Open Models for Agentic Workflows?](#how-strong-are-open-models-for-agentic-workflows)
- [Contributing](#contributing)
- [Citing smolagents](#citing-smolagents)
## Quick demo
First install the package.
```bash
pip install smolagents
```
Then define your agent, give it the tools it needs and run it!
```py
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel
model = HfApiModel()
agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model)
agent.run("How many seconds would it take for a leopard at full speed to run through Pont des Arts?")
```
https://github.com/user-attachments/assets/cd0226e2-7479-4102-aea0-57c22ca47884
Our library is LLM-agnostic: you could switch the example above to any inference provider.
<details>
<summary> <b>HfApiModel, gateway for 4 inference providers</b></summary>
```py
from smolagents import HfApiModel
model = HfApiModel(
model_id="deepseek-ai/DeepSeek-R1",
provider="together",
)
```
</details>
<details>
<summary> <b>LiteLLM to access 100+ LLMs</b></summary>
```py
from smolagents import LiteLLMModel
model = LiteLLMModel(
"anthropic/claude-3-5-sonnet-latest",
temperature=0.2,
api_key=os.environ["ANTHROPIC_API_KEY"]
)
```
</details>
<details>
<summary> <b>OpenAI-compatible servers</b></summary>
```py
import os
from smolagents import OpenAIServerModel
model = OpenAIServerModel(
model_id="deepseek-ai/DeepSeek-R1",
api_base="https://api.together.xyz/v1/", # Leave this blank to query OpenAI servers.
api_key=os.environ["TOGETHER_API_KEY"], # Switch to the API key for the server you're targeting.
)
```
</details>
<details>
<summary> <b>Local `transformers` model</b></summary>
```py
from smolagents import TransformersModel
model = TransformersModel(
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
max_new_tokens=4096,
device_map="auto"
)
```
</details>
<details>
<summary> <b>Azure models</b></summary>
```py
import os
from smolagents import AzureOpenAIServerModel
model = AzureOpenAIServerModel(
model_id = os.environ.get("AZURE_OPENAI_MODEL"),
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
api_version=os.environ.get("OPENAI_API_VERSION")
)
```
</details>
## Command Line Interface
You can run agents from CLI using two commands: `smolagent` and `webagent`. `smolagent` is a generalist command to run a multi-step `CodeAgent` that can be equipped with various tools, meanwhile `webagent` is a specific web-browsing agent using [helium](https://github.com/mherrmann/helium).
**Web Browser Agent in CLI**
`webagent` allows users to automate web browsing tasks. It uses the [helium](https://github.com/mherrmann/helium) library to interact with web pages and uses defined tools to browse the web. Read more about this agent [here](https://github.com/huggingface/smolagents/blob/main/src/smolagents/vision_web_browser.py).
Run the following command to get started:
```bash
webagent {YOUR_PROMPT_HERE} --model "LiteLLMModel" --model-id "gpt-4o"
```
For instance:
```bash
webagent --prompt "go to xyz.com/women, get to sale section, click the first clothing item you see. Get the product details, and the price, return them. note that I'm shopping from France"
```
We redacted the website here, modify it with the website of your choice.
**CodeAgent in CLI**
Use `smolagent` to run a multi-step agent with [tools](https://huggingface.co/docs/smolagents/en/reference/tools). It uses web search tool by default.
You can easily get started with `$ smolagent {YOUR_PROMPT_HERE}`. You can customize this as follows (more details [here](https://github.com/huggingface/smolagents/blob/main/src/smolagents/cli.py)).
```bash
smolagent {YOUR_PROMPT_HERE} --model-type "HfApiModel" --model-id "Qwen/Qwen2.5-Coder-32B-Instruct" --imports "pandas numpy" --tools "web_search translation"
```
For instance:
```bash
smolagent "Plan a trip to Tokyo, Kyoto and Osaka between Mar 28 and Apr 7. Allocate time according to number of public attraction in each, and optimize for distance and travel time. Bring all the public transportation options."
```
## Code agents?
In our [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent), the LLM engine writes its actions in code. This approach is demonstrated to work better than the current industry practice of letting the LLM output a dictionary of the tools it wants to calls: [uses 30% fewer steps](https://huggingface.co/papers/2402.01030) (thus 30% fewer LLM calls) and [reaches higher performance on difficult benchmarks](https://huggingface.co/papers/2411.01747). Head to [our high-level intro to agents](https://huggingface.co/docs/smolagents/conceptual_guides/intro_agents) to learn more on that.
Especially, since code execution can be a security concern (arbitrary code execution!), we provide options at runtime:
- a secure python interpreter to run code more safely in your environment (more secure than raw code execution but still risky)
- a sandboxed environment using [E2B](https://e2b.dev/) (removes the risk to your own system).
On top of this [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent) class, we still support the standard [`ToolCallingAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.ToolCallingAgent) that writes actions as JSON/text blobs. But we recommend always using `CodeAgent`.
## How smol is this library?
We strived to keep abstractions to a strict minimum: the main code in `agents.py` has <1,000 lines of code.
Still, we implement several types of agents: `CodeAgent` writes its actions as Python code snippets, and the more classic `ToolCallingAgent` leverages built-in tool calling methods. We also have multi-agent hierarchies, import from tool collections, remote code execution, vision models...
By the way, why use a framework at all? Well, because a big part of this stuff is non-trivial. For instance, the code agent has to keep a consistent format for code throughout its system prompt, its parser, the execution. So our framework handles this complexity for you. But of course we still encourage you to hack into the source code and use only the bits that you need, to the exclusion of everything else!
## How strong are open models for agentic workflows?
We've created [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent) instances with some leading models, and compared them on [this benchmark](https://huggingface.co/datasets/m-ric/agents_medium_benchmark_2) that gathers questions from a few different benchmarks to propose a varied blend of challenges.
[Find the benchmarking code here](https://github.com/huggingface/smolagents/blob/main/examples/benchmark.ipynb) for more detail on the agentic setup used, and see a comparison of using LLMs code agents compared to vanilla (spoilers: code agents works better).
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/benchmark_code_agents.jpeg" alt="benchmark of different models on agentic workflows. Open model DeepSeek-R1 beats closed-source models." width=60% max-width=500px>
</p>
This comparison shows that open-source models can now take on the best closed models!
## Contribute
To contribute, follow our [contribution guide](https://github.com/huggingface/smolagents/blob/main/CONTRIBUTING.md).
At any moment, feel welcome to open an issue, citing your exact error traces and package versions if it's a bug.
It's often even better to open a PR with your proposed fixes/changes!
To install dev dependencies, run:
```
pip install -e ".[dev]"
```
When making changes to the codebase, please check that it follows the repo's code quality requirements by running:
To check code quality of the source code:
```
make quality
```
If the checks fail, you can run the formatter with:
```
make style
```
And commit the changes.
To run tests locally, run this command:
```bash
make test
```
</details>
## Cite smolagents
If you use `smolagents` in your publication, please cite it by using the following BibTeX entry.
```bibtex
@Misc{smolagents,
title = {`smolagents`: a smol library to build great agentic systems.},
author = {Aymeric Roucher and Albert Villanova del Moral and Thomas Wolf and Leandro von Werra and Erik Kaunismäki},
howpublished = {\url{https://github.com/huggingface/smolagents}},
year = {2025}
}
```
| smolagents/README.md/0 | {
"file_path": "smolagents/README.md",
"repo_id": "smolagents",
"token_count": 4021
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Inspecting runs with OpenTelemetry
[[open-in-colab]]
> [!TIP]
> If you're new to building agents, make sure to first read the [intro to agents](../conceptual_guides/intro_agents) and the [guided tour of smolagents](../guided_tour).
### Why log your agent runs?
Agent runs are complicated to debug.
Validating that a run went properly is hard, since agent workflows are [unpredictable by design](../conceptual_guides/intro_agents) (if they were predictable, you'd just be using good old code).
And inspecting a run is hard as well: multi-step agents tend to quickly fill a console with logs, and most of the errors are just "LLM dumb" kind of errors, from which the LLM auto-corrects in the next step by writing better code or tool calls.
So using instrumentation to record agent runs is necessary in production for later inspection and monitoring!
We've adopted the [OpenTelemetry](https://opentelemetry.io/) standard for instrumenting agent runs.
This means that you can just run some instrumentation code, then run your agents normally, and everything gets logged into your platform.
Here's how it then looks like on the platform:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/inspect_run_phoenix.gif"/>
</div>
### Setting up telemetry with Arize AI Phoenix
First install the required packages. Here we install [Phoenix by Arize AI](https://github.com/Arize-ai/phoenix) because that's a good solution to collect and inspect the logs, but there are other OpenTelemetry-compatible platforms that you could use for this collection & inspection part.
```shell
pip install smolagents
pip install arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp openinference-instrumentation-smolagents
```
Then run the collector in the background.
```shell
python -m phoenix.server.main serve
```
Finally, set up `SmolagentsInstrumentor` to trace your agents and send the traces to Phoenix at the endpoint defined below.
```python
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
endpoint = "http://0.0.0.0:6006/v1/traces"
trace_provider = TracerProvider()
trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint)))
SmolagentsInstrumentor().instrument(tracer_provider=trace_provider)
```
Then you can run your agents!
```py
from smolagents import (
CodeAgent,
ToolCallingAgent,
DuckDuckGoSearchTool,
VisitWebpageTool,
HfApiModel,
)
model = HfApiModel()
search_agent = ToolCallingAgent(
tools=[DuckDuckGoSearchTool(), VisitWebpageTool()],
model=model,
name="search_agent",
description="This is an agent that can do web search.",
)
manager_agent = CodeAgent(
tools=[],
model=model,
managed_agents=[search_agent],
)
manager_agent.run(
"If the US keeps its 2024 growth rate, how many years will it take for the GDP to double?"
)
```
Voilà!
You can then navigate to `http://0.0.0.0:6006/projects/` to inspect your run!
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/inspect_run_phoenix.png">
You can see that the CodeAgent called its managed ToolCallingAgent (by the way, the managed agent could be have been a CodeAgent as well) to ask it to run the web search for the U.S. 2024 growth rate. Then the managed agent returned its report and the manager agent acted upon it to calculate the economy doubling time! Sweet, isn't it? | smolagents/docs/source/en/tutorials/inspect_runs.md/0 | {
"file_path": "smolagents/docs/source/en/tutorials/inspect_runs.md",
"repo_id": "smolagents",
"token_count": 1324
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# सुरक्षित कोड एक्जीक्यूशन
[[open-in-colab]]
> [!TIP]
> यदि आप एजेंट्स बनाने में नए हैं, तो सबसे पहले [एजेंट्स का परिचय](../conceptual_guides/intro_agents) और [smolagents की गाइडेड टूर](../guided_tour) पढ़ना सुनिश्चित करें।
### कोड Agents
[कई](https://huggingface.co/papers/2402.01030) [शोध](https://huggingface.co/papers/2411.01747) [पत्रों](https://huggingface.co/papers/2401.00812) ने दिखाया है कि LLM द्वारा अपनी क्रियाओं (टूल कॉल्स) को कोड में लिखना, टूल कॉलिंग के वर्तमान मानक प्रारूप से बहुत बेहतर है, जो industry में "टूल्स नेम्स और आर्ग्यूमेंट्स को JSON के रूप में लिखने" के विभिन्न रूप हैं।
कोड बेहतर क्यों है? क्योंकि हमने अपनी कोड भाषाओं को विशेष रूप से कंप्यूटर द्वारा की जाने वाली क्रियाओं को व्यक्त करने के लिए तैयार किया है। यदि JSON स्निपेट्स एक बेहतर तरीका होता, तो यह पैकेज JSON स्निपेट्स में लिखा गया होता और शैतान हम पर हंस रहा होता।
कोड कंप्यूटर पर क्रियाएँ व्यक्त करने का बेहतर तरीका है। इसमें बेहतर है:
- **कंपोज़ेबिलिटी:** क्या आप JSON क्रियाओं को एक-दूसरे के भीतर नेस्ट कर सकते हैं, या बाद में पुन: उपयोग करने के लिए JSON क्रियाओं का एक सेट परिभाषित कर सकते हैं, जैसे आप बस एक पायथन फ़ंक्शन परिभाषित कर सकते हैं?
- **ऑब्जेक्ट प्रबंधन:** JSON में `generate_image` जैसी क्रिया का आउटपुट कैसे स्टोर करें?
- **सामान्यता:** कोड किसी भी कंप्यूटर कार्य को व्यक्त करने के लिए बनाया गया है।
- **LLM प्रशिक्षण कॉर्पस में प्रतिनिधित्व:** क्यों न इस आशीर्वाद का लाभ उठाएं कि उच्च गुणवत्ता वाले कोड उदाहरण पहले से ही LLM प्रशिक्षण डेटा में शामिल हैं?
यह नीचे दी गई छवि में दर्शाया गया है, जो [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030) से ली गई है।
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/code_vs_json_actions.png">
यही कारण है कि हमने कोड एजेंट्स, इस मामले में पायथन एजेंट्स पर जोर दिया, जिसका मतलब सुरक्षित पायथन इंटरप्रेटर बनाने पर अधिक प्रयास करना था।
### लोकल पायथन इंटरप्रेटर
डिफ़ॉल्ट रूप से, `CodeAgent` LLM-जनरेटेड कोड को आपके एनवायरनमेंट में चलाता है।
यह एक्जीक्यूशन वैनिला पायथन इंटरप्रेटर द्वारा नहीं किया जाता: हमने एक अधिक सुरक्षित `LocalPythonInterpreter` को शुरू से फिर से बनाया है।
यह इंटरप्रेटर सुरक्षा के लिए डिज़ाइन किया गया है:
- इम्पोर्ट्स को उपयोगकर्ता द्वारा स्पष्ट रूप से पास की गई सूची तक सीमित करना
- इनफिनिट लूप्स और रिसोर्स ब्लोटिंग को रोकने के लिए ऑपरेशंस की संख्या को कैप करना
- कोई भी ऐसा ऑपरेशन नहीं करेगा जो पूर्व-परिभाषित नहीं है
हमने इसे कई उपयोग मामलों में इस्तेमाल किया है, और कभी भी एनवायरनमेंट को कोई नुकसान नहीं देखा।
हालांकि यह समाधान पूरी तरह से सुरक्षित नहीं है: कोई ऐसे अवसरों की कल्पना कर सकता है जहां दुर्भावनापूर्ण कार्यों के लिए फाइन-ट्यून किए गए LLM अभी भी आपके एनवायरनमेंट को नुकसान पहुंचा सकते हैं। उदाहरण के लिए यदि आपने छवियों को प्रोसेस करने के लिए `Pillow` जैसे मासूम पैकेज की अनुमति दी है, तो LLM आपकी हार्ड ड्राइव को ब्लोट करने के लिए हजारों छवियों को सेव कर सकता है।
यदि आपने खुद LLM इंजन चुना है तो यह निश्चित रूप से संभावित नहीं है, लेकिन यह हो सकता है।
तो यदि आप अतिरिक्त सावधानी बरतना चाहते हैं, तो आप नीचे वर्णित रिमोट कोड एक्जीक्यूशन विकल्प का उपयोग कर सकते हैं।
### E2B कोड एक्जीक्यूटर
अधिकतम सुरक्षा के लिए, आप कोड को सैंडबॉक्स्ड एनवायरनमेंट में चलाने के लिए E2B के साथ हमारे एकीकरण का उपयोग कर सकते हैं। यह एक रिमोट एक्जीक्यूशन सेवा है जो आपके कोड को एक आइसोलेटेड कंटेनर में चलाती है, जिससे कोड का आपके स्थानीय एनवायरनमेंट को प्रभावित करना असंभव हो जाता है।
इसके लिए, आपको अपना E2B अकाउंट सेटअप करने और अपने एनवायरनमेंट वेरिएबल्स में अपना `E2B_API_KEY` सेट करने की आवश्यकता होगी। अधिक जानकारी के लिए [E2B की क्विकस्टार्ट डॉक्यूमेंटेशन](https://e2b.dev/docs/quickstart) पर जाएं।
फिर आप इसे `pip install e2b-code-interpreter python-dotenv` के साथ इंस्टॉल कर सकते हैं।
अब आप तैयार हैं!
कोड एक्जीक्यूटर को E2B पर सेट करने के लिए, बस अपने `CodeAgent` को इनिशियलाइज़ करते समय `use_e2b_executor=True` फ्लैग पास करें।
ध्यान दें कि आपको `additional_authorized_imports` में सभी टूल की डिपेंडेंसीज़ जोड़नी चाहिए, ताकि एक्जीक्यूटर उन्हें इंस्टॉल करे।
```py
from smolagents import CodeAgent, VisitWebpageTool, HfApiModel
agent = CodeAgent(
tools = [VisitWebpageTool()],
model=HfApiModel(),
additional_authorized_imports=["requests", "markdownify"],
use_e2b_executor=True
)
agent.run("What was Abraham Lincoln's preferred pet?")
```
E2B कोड एक्जीक्यूशन वर्तमान में मल्टी-एजेंट्स के साथ काम नहीं करता है - क्योंकि कोड ब्लॉब में एक एजेंट कॉल करना जो रिमोटली एक्जीक्यूट किया जाना चाहिए, यह एक गड़बड़ है। लेकिन हम इसे जोड़ने पर काम कर रहे हैं!
| smolagents/docs/source/hi/tutorials/secure_code_execution.md/0 | {
"file_path": "smolagents/docs/source/hi/tutorials/secure_code_execution.md",
"repo_id": "smolagents",
"token_count": 5833
} |
import json
import os
import shutil
import textwrap
from pathlib import Path
# import tqdm.asyncio
from smolagents.utils import AgentError
def serialize_agent_error(obj):
if isinstance(obj, AgentError):
return {"error_type": obj.__class__.__name__, "message": obj.message}
else:
return str(obj)
def get_image_description(file_name: str, question: str, visual_inspection_tool) -> str:
prompt = f"""Write a caption of 5 sentences for this image. Pay special attention to any details that might be useful for someone answering the following question:
{question}. But do not try to answer the question directly!
Do not add any information that is not present in the image."""
return visual_inspection_tool(image_path=file_name, question=prompt)
def get_document_description(file_path: str, question: str, document_inspection_tool) -> str:
prompt = f"""Write a caption of 5 sentences for this document. Pay special attention to any details that might be useful for someone answering the following question:
{question}. But do not try to answer the question directly!
Do not add any information that is not present in the document."""
return document_inspection_tool.forward_initial_exam_mode(file_path=file_path, question=prompt)
def get_single_file_description(file_path: str, question: str, visual_inspection_tool, document_inspection_tool):
file_extension = file_path.split(".")[-1]
if file_extension in ["png", "jpg", "jpeg"]:
file_description = f" - Attached image: {file_path}"
file_description += (
f"\n -> Image description: {get_image_description(file_path, question, visual_inspection_tool)}"
)
return file_description
elif file_extension in ["pdf", "xls", "xlsx", "docx", "doc", "xml"]:
file_description = f" - Attached document: {file_path}"
image_path = file_path.split(".")[0] + ".png"
if os.path.exists(image_path):
description = get_image_description(image_path, question, visual_inspection_tool)
else:
description = get_document_description(file_path, question, document_inspection_tool)
file_description += f"\n -> File description: {description}"
return file_description
elif file_extension in ["mp3", "m4a", "wav"]:
return f" - Attached audio: {file_path}"
else:
return f" - Attached file: {file_path}"
def get_zip_description(file_path: str, question: str, visual_inspection_tool, document_inspection_tool):
folder_path = file_path.replace(".zip", "")
os.makedirs(folder_path, exist_ok=True)
shutil.unpack_archive(file_path, folder_path)
prompt_use_files = ""
for root, dirs, files in os.walk(folder_path):
for file in files:
file_path = os.path.join(root, file)
prompt_use_files += "\n" + textwrap.indent(
get_single_file_description(file_path, question, visual_inspection_tool, document_inspection_tool),
prefix=" ",
)
return prompt_use_files
def get_tasks_to_run(data, total: int, base_filename: Path, tasks_ids: list[int]):
f = base_filename.parent / f"{base_filename.stem}_answers.jsonl"
done = set()
if f.exists():
with open(f, encoding="utf-8") as fh:
done = {json.loads(line)["task_id"] for line in fh if line.strip()}
tasks = []
for i in range(total):
task_id = int(data[i]["task_id"])
if task_id not in done:
if tasks_ids is not None:
if task_id in tasks_ids:
tasks.append(data[i])
else:
tasks.append(data[i])
return tasks
| smolagents/examples/open_deep_research/scripts/run_agents.py/0 | {
"file_path": "smolagents/examples/open_deep_research/scripts/run_agents.py",
"repo_id": "smolagents",
"token_count": 1425
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mimetypes
import os
import re
import shutil
from typing import Optional
from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
from smolagents.agents import ActionStep, MultiStepAgent
from smolagents.memory import MemoryStep
from smolagents.utils import _is_package_available
def pull_messages_from_step(
step_log: MemoryStep,
):
"""Extract ChatMessage objects from agent steps with proper nesting"""
import gradio as gr
if isinstance(step_log, ActionStep):
# Output the step number
step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
# First yield the thought/reasoning from the LLM
if hasattr(step_log, "model_output") and step_log.model_output is not None:
# Clean up the LLM output
model_output = step_log.model_output.strip()
# Remove any trailing <end_code> and extra backticks, handling multiple possible formats
model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
model_output = model_output.strip()
yield gr.ChatMessage(role="assistant", content=model_output)
# For tool calls, create a parent message
if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
first_tool_call = step_log.tool_calls[0]
used_code = first_tool_call.name == "python_interpreter"
parent_id = f"call_{len(step_log.tool_calls)}"
# Tool call becomes the parent message with timing info
# First we will handle arguments based on type
args = first_tool_call.arguments
if isinstance(args, dict):
content = str(args.get("answer", str(args)))
else:
content = str(args).strip()
if used_code:
# Clean up the content by removing any end code tags
content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
content = content.strip()
if not content.startswith("```python"):
content = f"```python\n{content}\n```"
parent_message_tool = gr.ChatMessage(
role="assistant",
content=content,
metadata={
"title": f"🛠️ Used tool {first_tool_call.name}",
"id": parent_id,
"status": "pending",
},
)
yield parent_message_tool
# Nesting execution logs under the tool call if they exist
if hasattr(step_log, "observations") and (
step_log.observations is not None and step_log.observations.strip()
): # Only yield execution logs if there's actual content
log_content = step_log.observations.strip()
if log_content:
log_content = re.sub(r"^Execution logs:\s*", "", log_content)
yield gr.ChatMessage(
role="assistant",
content=f"{log_content}",
metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
)
# Nesting any errors under the tool call
if hasattr(step_log, "error") and step_log.error is not None:
yield gr.ChatMessage(
role="assistant",
content=str(step_log.error),
metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
)
# Update parent message metadata to done status without yielding a new message
parent_message_tool.metadata["status"] = "done"
# Handle standalone errors but not from tool calls
elif hasattr(step_log, "error") and step_log.error is not None:
yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
# Calculate duration and token information
step_footnote = f"{step_number}"
if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
token_str = (
f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
)
step_footnote += token_str
if hasattr(step_log, "duration"):
step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
step_footnote += step_duration
step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
yield gr.ChatMessage(role="assistant", content="-----")
def stream_to_gradio(
agent,
task: str,
reset_agent_memory: bool = False,
additional_args: Optional[dict] = None,
):
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
if not _is_package_available("gradio"):
raise ModuleNotFoundError(
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
)
import gradio as gr
total_input_tokens = 0
total_output_tokens = 0
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
# Track tokens if model provides them
if hasattr(agent.model, "last_input_token_count"):
total_input_tokens += agent.model.last_input_token_count
total_output_tokens += agent.model.last_output_token_count
if isinstance(step_log, ActionStep):
step_log.input_token_count = agent.model.last_input_token_count
step_log.output_token_count = agent.model.last_output_token_count
for message in pull_messages_from_step(
step_log,
):
yield message
final_answer = step_log # Last log is the run's final_answer
final_answer = handle_agent_output_types(final_answer)
if isinstance(final_answer, AgentText):
yield gr.ChatMessage(
role="assistant",
content=f"**Final answer:**\n{final_answer.to_string()}\n",
)
elif isinstance(final_answer, AgentImage):
yield gr.ChatMessage(
role="assistant",
content={"path": final_answer.to_string(), "mime_type": "image/png"},
)
elif isinstance(final_answer, AgentAudio):
yield gr.ChatMessage(
role="assistant",
content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
)
else:
yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
class GradioUI:
"""A one-line interface to launch your agent in Gradio"""
def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
if not _is_package_available("gradio"):
raise ModuleNotFoundError(
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
)
self.agent = agent
self.file_upload_folder = file_upload_folder
if self.file_upload_folder is not None:
if not os.path.exists(file_upload_folder):
os.mkdir(file_upload_folder)
def interact_with_agent(self, prompt, messages):
import gradio as gr
messages.append(gr.ChatMessage(role="user", content=prompt))
yield messages
for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
messages.append(msg)
yield messages
yield messages
def upload_file(
self,
file,
file_uploads_log,
allowed_file_types=[
"application/pdf",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain",
],
):
"""
Handle file uploads, default allowed types are .pdf, .docx, and .txt
"""
import gradio as gr
if file is None:
return gr.Textbox("No file uploaded", visible=True), file_uploads_log
try:
mime_type, _ = mimetypes.guess_type(file.name)
except Exception as e:
return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
if mime_type not in allowed_file_types:
return gr.Textbox("File type disallowed", visible=True), file_uploads_log
# Sanitize file name
original_name = os.path.basename(file.name)
sanitized_name = re.sub(
r"[^\w\-.]", "_", original_name
) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
type_to_ext = {}
for ext, t in mimetypes.types_map.items():
if t not in type_to_ext:
type_to_ext[t] = ext
# Ensure the extension correlates to the mime type
sanitized_name = sanitized_name.split(".")[:-1]
sanitized_name.append("" + type_to_ext[mime_type])
sanitized_name = "".join(sanitized_name)
# Save the uploaded file to the specified folder
file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
shutil.copy(file.name, file_path)
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
def log_user_message(self, text_input, file_uploads_log):
return (
text_input
+ (
f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
if len(file_uploads_log) > 0
else ""
),
"",
)
def launch(self, **kwargs):
import gradio as gr
with gr.Blocks(fill_height=True) as demo:
stored_messages = gr.State([])
file_uploads_log = gr.State([])
chatbot = gr.Chatbot(
label="Agent",
type="messages",
avatar_images=(
None,
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
),
resizeable=True,
scale=1,
)
# If an upload folder is provided, enable the upload feature
if self.file_upload_folder is not None:
upload_file = gr.File(label="Upload a file")
upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
upload_file.change(
self.upload_file,
[upload_file, file_uploads_log],
[upload_status, file_uploads_log],
)
text_input = gr.Textbox(lines=1, label="Chat Message")
text_input.submit(
self.log_user_message,
[text_input, file_uploads_log],
[stored_messages, text_input],
).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
demo.launch(debug=True, share=True, **kwargs)
__all__ = ["stream_to_gradio", "GradioUI"]
| smolagents/src/smolagents/gradio_ui.py/0 | {
"file_path": "smolagents/src/smolagents/gradio_ui.py",
"repo_id": "smolagents",
"token_count": 5557
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from smolagents.agent_types import _AGENT_TYPE_MAPPING
from smolagents.default_tools import DuckDuckGoSearchTool, PythonInterpreterTool, SpeechToTextTool, VisitWebpageTool
from .test_tools import ToolTesterMixin
class DefaultToolTests(unittest.TestCase):
def test_visit_webpage(self):
arguments = {"url": "https://en.wikipedia.org/wiki/United_States_Secretary_of_Homeland_Security"}
result = VisitWebpageTool()(arguments)
assert isinstance(result, str)
assert "* [About Wikipedia](/wiki/Wikipedia:About)" in result # Proper wikipedia pages have an About
def test_ddgs_with_kwargs(self):
result = DuckDuckGoSearchTool(timeout=20)("DeepSeek parent company")
assert isinstance(result, str)
class PythonInterpreterToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = PythonInterpreterTool(authorized_imports=["numpy"])
self.tool.setup()
def test_exact_match_arg(self):
result = self.tool("(2 / 2) * 4")
self.assertEqual(result, "Stdout:\n\nOutput: 4.0")
def test_exact_match_kwarg(self):
result = self.tool(code="(2 / 2) * 4")
self.assertEqual(result, "Stdout:\n\nOutput: 4.0")
def test_agent_type_output(self):
inputs = ["2 * 2"]
output = self.tool(*inputs, sanitize_inputs_outputs=True)
output_type = _AGENT_TYPE_MAPPING[self.tool.output_type]
self.assertTrue(isinstance(output, output_type))
def test_agent_types_inputs(self):
inputs = ["2 * 2"]
_inputs = []
for _input, expected_input in zip(inputs, self.tool.inputs.values()):
input_type = expected_input["type"]
if isinstance(input_type, list):
_inputs.append([_AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(_AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
output = self.tool(*inputs, sanitize_inputs_outputs=True)
output_type = _AGENT_TYPE_MAPPING[self.tool.output_type]
self.assertTrue(isinstance(output, output_type))
def test_imports_work(self):
result = self.tool("import numpy as np")
assert "import from numpy is not allowed" not in result.lower()
def test_unauthorized_imports_fail(self):
with pytest.raises(Exception) as e:
self.tool("import sympy as sp")
assert "sympy" in str(e).lower()
class TestSpeechToTextTool:
def test_new_instance(self):
from transformers.models.whisper import WhisperForConditionalGeneration, WhisperProcessor
tool = SpeechToTextTool()
assert tool is not None
assert tool.pre_processor_class == WhisperProcessor
assert tool.model_class == WhisperForConditionalGeneration
| smolagents/tests/test_default_tools.py/0 | {
"file_path": "smolagents/tests/test_default_tools.py",
"repo_id": "smolagents",
"token_count": 1333
} |
[package]
name = "text-generation-client"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[dependencies]
async-trait = "^0.1"
base64 = { workspace = true }
futures = "^0.3"
grpc-metadata = { path = "../grpc-metadata" }
prost = "^0.12"
thiserror = "^1.0"
tokio = { version = "^1.32", features = ["sync"] }
tonic = "^0.10"
tower = "^0.4"
tracing = "^0.1"
[build-dependencies]
tonic-build = "0.10.1"
prost-build = "0.12.1"
| text-generation-inference/backends/client/Cargo.toml/0 | {
"file_path": "text-generation-inference/backends/client/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 202
} |
set(SPDLOG_USE_FMT ON)
set(SPDLOG_BUILD_SHARED OFF)
set(SPDLOG_FMT_EXTERNAL OFF)
# Define the level at which SPDLOG_ compilation level is defined
if (${CMAKE_BUILD_TYPE} STREQUAL "Debug")
add_compile_definitions(SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_TRACE)
else ()
add_compile_definitions(SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_DEBUG)
endif ()
fetchcontent_declare(
spdlog
# DOWNLOAD_EXTRACT_TIMESTAMP
URL https://github.com/gabime/spdlog/archive/refs/tags/v1.15.0.tar.gz
)
fetchcontent_makeavailable(spdlog)
| text-generation-inference/backends/trtllm/cmake/spdlog.cmake/0 | {
"file_path": "text-generation-inference/backends/trtllm/cmake/spdlog.cmake",
"repo_id": "text-generation-inference",
"token_count": 245
} |
# Legacy warning ⚠️
The inference clients from [huggingface_hub](https://huggingface.co/docs/huggingface_hub/guides/inference) are recommended over `text_generation`.
# Text Generation
The Hugging Face Text Generation Python library provides a convenient way of interfacing with a
`text-generation-inference` instance running on
[Hugging Face Inference Endpoints](https://huggingface.co/inference-endpoints) or on the Hugging Face Hub.
## Get Started
### Install
```shell
pip install text-generation
```
### Inference API Usage
```python
from text_generation import InferenceAPIClient
client = InferenceAPIClient("bigscience/bloomz")
text = client.generate("Why is the sky blue?").generated_text
print(text)
# ' Rayleigh scattering'
# Token Streaming
text = ""
for response in client.generate_stream("Why is the sky blue?"):
if not response.token.special:
text += response.token.text
print(text)
# ' Rayleigh scattering'
```
or with the asynchronous client:
```python
from text_generation import InferenceAPIAsyncClient
client = InferenceAPIAsyncClient("bigscience/bloomz")
response = await client.generate("Why is the sky blue?")
print(response.generated_text)
# ' Rayleigh scattering'
# Token Streaming
text = ""
async for response in client.generate_stream("Why is the sky blue?"):
if not response.token.special:
text += response.token.text
print(text)
# ' Rayleigh scattering'
```
Check all currently deployed models on the Huggingface Inference API with `Text Generation` support:
```python
from text_generation.inference_api import deployed_models
print(deployed_models())
```
### Hugging Face Inference Endpoint usage
```python
from text_generation import Client
endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud"
client = Client(endpoint_url)
text = client.generate("Why is the sky blue?").generated_text
print(text)
# ' Rayleigh scattering'
# Token Streaming
text = ""
for response in client.generate_stream("Why is the sky blue?"):
if not response.token.special:
text += response.token.text
print(text)
# ' Rayleigh scattering'
```
or with the asynchronous client:
```python
from text_generation import AsyncClient
endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud"
client = AsyncClient(endpoint_url)
response = await client.generate("Why is the sky blue?")
print(response.generated_text)
# ' Rayleigh scattering'
# Token Streaming
text = ""
async for response in client.generate_stream("Why is the sky blue?"):
if not response.token.special:
text += response.token.text
print(text)
# ' Rayleigh scattering'
```
### Types
```python
# enum for grammar type
class GrammarType(Enum):
Json = "json"
Regex = "regex"
# Grammar type and value
class Grammar:
# Grammar type
type: GrammarType
# Grammar value
value: Union[str, dict]
class Parameters:
# Activate logits sampling
do_sample: bool
# Maximum number of generated tokens
max_new_tokens: int
# The parameter for repetition penalty. 1.0 means no penalty.
# See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
repetition_penalty: Optional[float]
# The parameter for frequency penalty. 1.0 means no penalty
# Penalize new tokens based on their existing frequency in the text so far,
# decreasing the model's likelihood to repeat the same line verbatim.
frequency_penalty: Optional[float]
# Whether to prepend the prompt to the generated text
return_full_text: bool
# Stop generating tokens if a member of `stop_sequences` is generated
stop: List[str]
# Random sampling seed
seed: Optional[int]
# The value used to module the logits distribution.
temperature: Optional[float]
# The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_k: Optional[int]
# If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
# higher are kept for generation.
top_p: Optional[float]
# truncate inputs tokens to the given size
truncate: Optional[int]
# Typical Decoding mass
# See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
typical_p: Optional[float]
# Generate best_of sequences and return the one if the highest token logprobs
best_of: Optional[int]
# Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
watermark: bool
# Get generation details
details: bool
# Get decoder input token logprobs and ids
decoder_input_details: bool
# Return the N most likely tokens at each step
top_n_tokens: Optional[int]
# grammar to use for generation
grammar: Optional[Grammar]
class Request:
# Prompt
inputs: str
# Generation parameters
parameters: Optional[Parameters]
# Whether to stream output tokens
stream: bool
# Decoder input tokens
class InputToken:
# Token ID from the model tokenizer
id: int
# Token text
text: str
# Logprob
# Optional since the logprob of the first token cannot be computed
logprob: Optional[float]
# Generated tokens
class Token:
# Token ID from the model tokenizer
id: int
# Token text
text: str
# Logprob
logprob: Optional[float]
# Is the token a special token
# Can be used to ignore tokens when concatenating
special: bool
# Generation finish reason
class FinishReason(Enum):
# number of generated tokens == `max_new_tokens`
Length = "length"
# the model generated its end of sequence token
EndOfSequenceToken = "eos_token"
# the model generated a text included in `stop_sequences`
StopSequence = "stop_sequence"
# Additional sequences when using the `best_of` parameter
class BestOfSequence:
# Generated text
generated_text: str
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int]
# Decoder input tokens, empty if decoder_input_details is False
prefill: List[InputToken]
# Generated tokens
tokens: List[Token]
# Most likely tokens
top_tokens: Optional[List[List[Token]]]
# `generate` details
class Details:
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int]
# Decoder input tokens, empty if decoder_input_details is False
prefill: List[InputToken]
# Generated tokens
tokens: List[Token]
# Most likely tokens
top_tokens: Optional[List[List[Token]]]
# Additional sequences when using the `best_of` parameter
best_of_sequences: Optional[List[BestOfSequence]]
# `generate` return value
class Response:
# Generated text
generated_text: str
# Generation details
details: Details
# `generate_stream` details
class StreamDetails:
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int]
# `generate_stream` return value
class StreamResponse:
# Generated token
token: Token
# Most likely tokens
top_tokens: Optional[List[Token]]
# Complete generated text
# Only available when the generation is finished
generated_text: Optional[str]
# Generation details
# Only available when the generation is finished
details: Optional[StreamDetails]
# Inference API currently deployed model
class DeployedModel:
model_id: str
sha: str
```
| text-generation-inference/clients/python/README.md/0 | {
"file_path": "text-generation-inference/clients/python/README.md",
"repo_id": "text-generation-inference",
"token_count": 2491
} |
{
"openapi": "3.0.3",
"info": {
"title": "Text Generation Inference",
"description": "Text Generation Webserver",
"contact": {
"name": "Olivier Dehaene"
},
"license": {
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0"
},
"version": "3.1.1-dev0"
},
"paths": {
"/": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`",
"operationId": "compat_generate",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CompatGenerateRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Text",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"$ref": "#/components/schemas/GenerateResponse"
}
}
},
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/StreamResponse"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/chat_tokenize": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Template and tokenize ChatRequest",
"operationId": "get_chat_tokenize",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ChatRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Templated and tokenized ChatRequest",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ChatTokenizeResponse"
}
}
}
},
"404": {
"description": "Failed to tokenize ChatRequest",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
}
}
}
}
}
}
},
"/generate": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens",
"operationId": "generate",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Text",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateResponse"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/generate_stream": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate a stream of token using Server-Sent Events",
"operationId": "generate_stream",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Text",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/StreamResponse"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/health": {
"get": {
"tags": [
"Text Generation Inference"
],
"summary": "Health check method",
"operationId": "health",
"responses": {
"200": {
"description": "Everything is working fine"
},
"503": {
"description": "Text generation inference is down",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "unhealthy",
"error_type": "healthcheck"
}
}
}
}
}
}
},
"/info": {
"get": {
"tags": [
"Text Generation Inference"
],
"summary": "Text Generation Inference endpoint info",
"operationId": "get_model_info",
"responses": {
"200": {
"description": "Served model info",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Info"
}
}
}
}
}
}
},
"/invocations": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens from Sagemaker request",
"operationId": "sagemaker_compatibility",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/SagemakerRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Chat Completion",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/SagemakerResponse"
}
},
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/SagemakerStreamResponse"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error",
"error_type": "validation"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation",
"error_type": "generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded",
"error_type": "overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation",
"error_type": "incomplete_generation"
}
}
}
}
}
}
},
"/metrics": {
"get": {
"tags": [
"Text Generation Inference"
],
"summary": "Prometheus metrics scrape endpoint",
"operationId": "metrics",
"responses": {
"200": {
"description": "Prometheus Metrics",
"content": {
"text/plain": {
"schema": {
"type": "string"
}
}
}
}
}
}
},
"/tokenize": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Tokenize inputs",
"operationId": "tokenize",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Tokenized ids",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/TokenizeResponse"
}
}
}
},
"404": {
"description": "No tokenizer found",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "No fast tokenizer available"
}
}
}
}
}
}
},
"/v1/chat/completions": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens",
"operationId": "chat_completions",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ChatRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Chat Completion",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ChatCompletion"
}
},
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ChatCompletionChunk"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/v1/completions": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens",
"operationId": "completions",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CompletionRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Chat Completion",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CompletionFinal"
}
},
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/Chunk"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/v1/models": {
"get": {
"tags": [
"Text Generation Inference"
],
"summary": "Get model info",
"operationId": "openai_get_model_info",
"responses": {
"200": {
"description": "Served model info",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ModelInfo"
}
}
}
},
"404": {
"description": "Model not found",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
}
}
}
}
}
}
}
},
"components": {
"schemas": {
"BestOfSequence": {
"type": "object",
"required": [
"generated_text",
"finish_reason",
"generated_tokens",
"prefill",
"tokens"
],
"properties": {
"finish_reason": {
"$ref": "#/components/schemas/FinishReason"
},
"generated_text": {
"type": "string",
"example": "test"
},
"generated_tokens": {
"type": "integer",
"format": "int32",
"example": 1,
"minimum": 0
},
"prefill": {
"type": "array",
"items": {
"$ref": "#/components/schemas/PrefillToken"
}
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
},
"tokens": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
},
"top_tokens": {
"type": "array",
"items": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
}
}
}
},
"ChatCompletion": {
"type": "object",
"required": [
"id",
"created",
"model",
"system_fingerprint",
"choices",
"usage"
],
"properties": {
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ChatCompletionComplete"
}
},
"created": {
"type": "integer",
"format": "int64",
"example": "1706270835",
"minimum": 0
},
"id": {
"type": "string"
},
"model": {
"type": "string",
"example": "mistralai/Mistral-7B-Instruct-v0.2"
},
"system_fingerprint": {
"type": "string"
},
"usage": {
"$ref": "#/components/schemas/Usage"
}
}
},
"ChatCompletionChoice": {
"type": "object",
"required": [
"index",
"delta"
],
"properties": {
"delta": {
"$ref": "#/components/schemas/ChatCompletionDelta"
},
"finish_reason": {
"type": "string",
"nullable": true
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"logprobs": {
"allOf": [
{
"$ref": "#/components/schemas/ChatCompletionLogprobs"
}
],
"nullable": true
}
}
},
"ChatCompletionChunk": {
"type": "object",
"required": [
"id",
"created",
"model",
"system_fingerprint",
"choices"
],
"properties": {
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ChatCompletionChoice"
}
},
"created": {
"type": "integer",
"format": "int64",
"example": "1706270978",
"minimum": 0
},
"id": {
"type": "string"
},
"model": {
"type": "string",
"example": "mistralai/Mistral-7B-Instruct-v0.2"
},
"system_fingerprint": {
"type": "string"
},
"usage": {
"allOf": [
{
"$ref": "#/components/schemas/Usage"
}
],
"nullable": true
}
}
},
"ChatCompletionComplete": {
"type": "object",
"required": [
"index",
"message",
"finish_reason"
],
"properties": {
"finish_reason": {
"type": "string"
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"logprobs": {
"allOf": [
{
"$ref": "#/components/schemas/ChatCompletionLogprobs"
}
],
"nullable": true
},
"message": {
"$ref": "#/components/schemas/OutputMessage"
}
}
},
"ChatCompletionDelta": {
"oneOf": [
{
"$ref": "#/components/schemas/TextMessage"
},
{
"$ref": "#/components/schemas/ToolCallDelta"
}
]
},
"ChatCompletionLogprob": {
"type": "object",
"required": [
"token",
"logprob",
"top_logprobs"
],
"properties": {
"logprob": {
"type": "number",
"format": "float"
},
"token": {
"type": "string"
},
"top_logprobs": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ChatCompletionTopLogprob"
}
}
}
},
"ChatCompletionLogprobs": {
"type": "object",
"required": [
"content"
],
"properties": {
"content": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ChatCompletionLogprob"
}
}
}
},
"ChatCompletionTopLogprob": {
"type": "object",
"required": [
"token",
"logprob"
],
"properties": {
"logprob": {
"type": "number",
"format": "float"
},
"token": {
"type": "string"
}
}
},
"ChatRequest": {
"type": "object",
"required": [
"messages"
],
"properties": {
"frequency_penalty": {
"type": "number",
"format": "float",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
"example": "1.0",
"nullable": true
},
"logit_bias": {
"type": "array",
"items": {
"type": "number",
"format": "float"
},
"description": "UNUSED\nModify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens\n(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,\nbut values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should\nresult in a ban or exclusive selection of the relevant token.",
"nullable": true
},
"logprobs": {
"type": "boolean",
"description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each\noutput token returned in the content of message.",
"example": "false",
"nullable": true
},
"max_tokens": {
"type": "integer",
"format": "int32",
"description": "The maximum number of tokens that can be generated in the chat completion.",
"default": "1024",
"example": "32",
"nullable": true,
"minimum": 0
},
"messages": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
"description": "A list of messages comprising the conversation so far.",
"example": "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]"
},
"model": {
"type": "string",
"description": "[UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
"example": "mistralai/Mistral-7B-Instruct-v0.2",
"nullable": true
},
"n": {
"type": "integer",
"format": "int32",
"description": "UNUSED\nHow many chat completion choices to generate for each input message. Note that you will be charged based on the\nnumber of generated tokens across all of the choices. Keep n as 1 to minimize costs.",
"example": "2",
"nullable": true,
"minimum": 0
},
"presence_penalty": {
"type": "number",
"format": "float",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\nincreasing the model's likelihood to talk about new topics",
"example": 0.1,
"nullable": true
},
"response_format": {
"allOf": [
{
"$ref": "#/components/schemas/GrammarType"
}
],
"default": "null",
"nullable": true
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
},
"stop": {
"type": "array",
"items": {
"type": "string"
},
"description": "Up to 4 sequences where the API will stop generating further tokens.",
"example": "null",
"nullable": true
},
"stream": {
"type": "boolean"
},
"stream_options": {
"allOf": [
{
"$ref": "#/components/schemas/StreamOptions"
}
],
"nullable": true
},
"temperature": {
"type": "number",
"format": "float",
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.",
"example": 1.0,
"nullable": true
},
"tool_choice": {
"allOf": [
{
"$ref": "#/components/schemas/ToolChoice"
}
],
"default": "auto",
"nullable": true
},
"tool_prompt": {
"type": "string",
"description": "A prompt to be appended before the tools",
"example": "Given the functions available, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. Respond in the format {name: function name, parameters: dictionary of argument name and its value}.Do not use variables.",
"nullable": true
},
"tools": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Tool"
},
"description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of\nfunctions the model may generate JSON inputs for.",
"example": "null",
"nullable": true
},
"top_logprobs": {
"type": "integer",
"format": "int32",
"description": "An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with\nan associated log probability. logprobs must be set to true if this parameter is used.",
"example": "5",
"nullable": true,
"minimum": 0
},
"top_p": {
"type": "number",
"format": "float",
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
"example": 0.95,
"nullable": true
}
}
},
"ChatTokenizeResponse": {
"type": "object",
"required": [
"tokenize_response",
"templated_text"
],
"properties": {
"templated_text": {
"type": "string"
},
"tokenize_response": {
"$ref": "#/components/schemas/TokenizeResponse"
}
}
},
"Chunk": {
"type": "object",
"required": [
"id",
"created",
"choices",
"model",
"system_fingerprint"
],
"properties": {
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/CompletionComplete"
}
},
"created": {
"type": "integer",
"format": "int64",
"minimum": 0
},
"id": {
"type": "string"
},
"model": {
"type": "string"
},
"system_fingerprint": {
"type": "string"
}
}
},
"CompatGenerateRequest": {
"type": "object",
"required": [
"inputs"
],
"properties": {
"inputs": {
"type": "string",
"example": "My name is Olivier and I"
},
"parameters": {
"$ref": "#/components/schemas/GenerateParameters"
},
"stream": {
"type": "boolean",
"default": "false"
}
}
},
"Completion": {
"oneOf": [
{
"allOf": [
{
"$ref": "#/components/schemas/Chunk"
},
{
"type": "object",
"required": [
"object"
],
"properties": {
"object": {
"type": "string",
"enum": [
"text_completion"
]
}
}
}
]
},
{
"allOf": [
{
"$ref": "#/components/schemas/CompletionFinal"
},
{
"type": "object",
"required": [
"object"
],
"properties": {
"object": {
"type": "string",
"enum": [
"text_completion"
]
}
}
}
]
}
],
"discriminator": {
"propertyName": "object"
}
},
"CompletionComplete": {
"type": "object",
"required": [
"index",
"text",
"finish_reason"
],
"properties": {
"finish_reason": {
"type": "string"
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"logprobs": {
"type": "array",
"items": {
"type": "number",
"format": "float"
},
"nullable": true
},
"text": {
"type": "string"
}
}
},
"CompletionFinal": {
"type": "object",
"required": [
"id",
"created",
"model",
"system_fingerprint",
"choices",
"usage"
],
"properties": {
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/CompletionComplete"
}
},
"created": {
"type": "integer",
"format": "int64",
"example": "1706270835",
"minimum": 0
},
"id": {
"type": "string"
},
"model": {
"type": "string",
"example": "mistralai/Mistral-7B-Instruct-v0.2"
},
"system_fingerprint": {
"type": "string"
},
"usage": {
"$ref": "#/components/schemas/Usage"
}
}
},
"CompletionRequest": {
"type": "object",
"required": [
"prompt"
],
"properties": {
"frequency_penalty": {
"type": "number",
"format": "float",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
"example": "1.0",
"nullable": true
},
"max_tokens": {
"type": "integer",
"format": "int32",
"description": "The maximum number of tokens that can be generated in the chat completion.",
"default": "1024",
"example": "32",
"nullable": true,
"minimum": 0
},
"model": {
"type": "string",
"description": "UNUSED\nID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
"example": "mistralai/Mistral-7B-Instruct-v0.2",
"nullable": true
},
"prompt": {
"$ref": "#/components/schemas/Prompt"
},
"repetition_penalty": {
"type": "number",
"format": "float",
"nullable": true
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
},
"stop": {
"type": "array",
"items": {
"type": "string"
},
"description": "Up to 4 sequences where the API will stop generating further tokens.",
"example": "null",
"nullable": true
},
"stream": {
"type": "boolean"
},
"suffix": {
"type": "string",
"description": "The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.\nplease see the completion_template field in the model's tokenizer_config.json file for completion template.",
"nullable": true
},
"temperature": {
"type": "number",
"format": "float",
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.",
"example": 1.0,
"nullable": true
},
"top_p": {
"type": "number",
"format": "float",
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
"example": 0.95,
"nullable": true
}
}
},
"DeltaToolCall": {
"type": "object",
"required": [
"index",
"id",
"type",
"function"
],
"properties": {
"function": {
"$ref": "#/components/schemas/Function"
},
"id": {
"type": "string"
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"type": {
"type": "string"
}
}
},
"Details": {
"type": "object",
"required": [
"finish_reason",
"generated_tokens",
"prefill",
"tokens"
],
"properties": {
"best_of_sequences": {
"type": "array",
"items": {
"$ref": "#/components/schemas/BestOfSequence"
},
"nullable": true
},
"finish_reason": {
"$ref": "#/components/schemas/FinishReason"
},
"generated_tokens": {
"type": "integer",
"format": "int32",
"example": 1,
"minimum": 0
},
"prefill": {
"type": "array",
"items": {
"$ref": "#/components/schemas/PrefillToken"
}
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
},
"tokens": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
},
"top_tokens": {
"type": "array",
"items": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
}
}
}
},
"ErrorResponse": {
"type": "object",
"required": [
"error",
"error_type"
],
"properties": {
"error": {
"type": "string"
},
"error_type": {
"type": "string"
}
}
},
"FinishReason": {
"type": "string",
"enum": [
"length",
"eos_token",
"stop_sequence"
],
"example": "Length"
},
"Function": {
"type": "object",
"required": [
"arguments"
],
"properties": {
"arguments": {
"type": "string"
},
"name": {
"type": "string",
"nullable": true
}
}
},
"FunctionDefinition": {
"type": "object",
"required": [
"name",
"arguments"
],
"properties": {
"arguments": {},
"description": {
"type": "string",
"nullable": true
},
"name": {
"type": "string"
}
}
},
"FunctionName": {
"type": "object",
"required": [
"name"
],
"properties": {
"name": {
"type": "string"
}
}
},
"GenerateParameters": {
"type": "object",
"properties": {
"adapter_id": {
"type": "string",
"description": "Lora adapter id",
"default": "null",
"example": "null",
"nullable": true
},
"best_of": {
"type": "integer",
"description": "Generate best_of sequences and return the one if the highest token logprobs.",
"default": "null",
"example": 1,
"nullable": true,
"minimum": 0,
"exclusiveMinimum": 0
},
"decoder_input_details": {
"type": "boolean",
"description": "Whether to return decoder input token logprobs and ids.",
"default": "false"
},
"details": {
"type": "boolean",
"description": "Whether to return generation details.",
"default": "true"
},
"do_sample": {
"type": "boolean",
"description": "Activate logits sampling.",
"default": "false",
"example": true
},
"frequency_penalty": {
"type": "number",
"format": "float",
"description": "The parameter for frequency penalty. 1.0 means no penalty\nPenalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
"default": "null",
"example": 0.1,
"nullable": true,
"exclusiveMinimum": -2
},
"grammar": {
"allOf": [
{
"$ref": "#/components/schemas/GrammarType"
}
],
"default": "null",
"nullable": true
},
"max_new_tokens": {
"type": "integer",
"format": "int32",
"description": "Maximum number of tokens to generate.",
"default": "1024",
"example": "20",
"nullable": true,
"minimum": 0
},
"repetition_penalty": {
"type": "number",
"format": "float",
"description": "The parameter for repetition penalty. 1.0 means no penalty.\nSee [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.",
"default": "null",
"example": 1.03,
"nullable": true,
"exclusiveMinimum": 0
},
"return_full_text": {
"type": "boolean",
"description": "Whether to prepend the prompt to the generated text",
"default": "null",
"example": false,
"nullable": true
},
"seed": {
"type": "integer",
"format": "int64",
"description": "Random sampling seed.",
"default": "null",
"example": "null",
"nullable": true,
"minimum": 0,
"exclusiveMinimum": 0
},
"stop": {
"type": "array",
"items": {
"type": "string"
},
"description": "Stop generating tokens if a member of `stop` is generated.",
"example": [
"photographer"
],
"maxItems": 4
},
"temperature": {
"type": "number",
"format": "float",
"description": "The value used to module the logits distribution.",
"default": "null",
"example": 0.5,
"nullable": true,
"exclusiveMinimum": 0
},
"top_k": {
"type": "integer",
"format": "int32",
"description": "The number of highest probability vocabulary tokens to keep for top-k-filtering.",
"default": "null",
"example": 10,
"nullable": true,
"exclusiveMinimum": 0
},
"top_n_tokens": {
"type": "integer",
"format": "int32",
"description": "The number of highest probability vocabulary tokens to keep for top-n-filtering.",
"default": "null",
"example": 5,
"nullable": true,
"minimum": 0,
"exclusiveMinimum": 0
},
"top_p": {
"type": "number",
"format": "float",
"description": "Top-p value for nucleus sampling.",
"default": "null",
"example": 0.95,
"nullable": true,
"maximum": 1,
"exclusiveMinimum": 0
},
"truncate": {
"type": "integer",
"description": "Truncate inputs tokens to the given size.",
"default": "null",
"example": "null",
"nullable": true,
"minimum": 0
},
"typical_p": {
"type": "number",
"format": "float",
"description": "Typical Decoding mass\nSee [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information.",
"default": "null",
"example": 0.95,
"nullable": true,
"maximum": 1,
"exclusiveMinimum": 0
},
"watermark": {
"type": "boolean",
"description": "Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226).",
"default": "false",
"example": true
}
}
},
"GenerateRequest": {
"type": "object",
"required": [
"inputs"
],
"properties": {
"inputs": {
"type": "string",
"example": "My name is Olivier and I"
},
"parameters": {
"$ref": "#/components/schemas/GenerateParameters"
}
}
},
"GenerateResponse": {
"type": "object",
"required": [
"generated_text"
],
"properties": {
"details": {
"allOf": [
{
"$ref": "#/components/schemas/Details"
}
],
"nullable": true
},
"generated_text": {
"type": "string",
"example": "test"
}
}
},
"GrammarType": {
"oneOf": [
{
"type": "object",
"required": [
"type",
"value"
],
"properties": {
"type": {
"type": "string",
"enum": [
"json"
]
},
"value": {
"description": "A string that represents a [JSON Schema](https://json-schema.org/).\n\nJSON Schema is a declarative language that allows to annotate JSON documents\nwith types and descriptions."
}
}
},
{
"type": "object",
"required": [
"type",
"value"
],
"properties": {
"type": {
"type": "string",
"enum": [
"regex"
]
},
"value": {
"type": "string"
}
}
}
],
"discriminator": {
"propertyName": "type"
}
},
"Info": {
"type": "object",
"required": [
"model_id",
"max_concurrent_requests",
"max_best_of",
"max_stop_sequences",
"max_input_tokens",
"max_total_tokens",
"validation_workers",
"max_client_batch_size",
"router",
"version"
],
"properties": {
"docker_label": {
"type": "string",
"example": "null",
"nullable": true
},
"max_best_of": {
"type": "integer",
"example": "2",
"minimum": 0
},
"max_client_batch_size": {
"type": "integer",
"example": "32",
"minimum": 0
},
"max_concurrent_requests": {
"type": "integer",
"description": "Router Parameters",
"example": "128",
"minimum": 0
},
"max_input_tokens": {
"type": "integer",
"example": "1024",
"minimum": 0
},
"max_stop_sequences": {
"type": "integer",
"example": "4",
"minimum": 0
},
"max_total_tokens": {
"type": "integer",
"example": "2048",
"minimum": 0
},
"model_id": {
"type": "string",
"description": "Model info",
"example": "bigscience/blomm-560m"
},
"model_pipeline_tag": {
"type": "string",
"example": "text-generation",
"nullable": true
},
"model_sha": {
"type": "string",
"example": "e985a63cdc139290c5f700ff1929f0b5942cced2",
"nullable": true
},
"router": {
"type": "string",
"description": "Router Info",
"example": "text-generation-router"
},
"sha": {
"type": "string",
"example": "null",
"nullable": true
},
"validation_workers": {
"type": "integer",
"example": "2",
"minimum": 0
},
"version": {
"type": "string",
"example": "0.5.0"
}
}
},
"Message": {
"type": "object",
"required": [
"role",
"content"
],
"properties": {
"content": {
"$ref": "#/components/schemas/MessageContent"
},
"name": {
"type": "string",
"example": "\"David\"",
"nullable": true
},
"role": {
"type": "string",
"example": "user"
}
}
},
"MessageChunk": {
"oneOf": [
{
"type": "object",
"required": [
"text",
"type"
],
"properties": {
"text": {
"type": "string"
},
"type": {
"type": "string",
"enum": [
"text"
]
}
}
},
{
"type": "object",
"required": [
"image_url",
"type"
],
"properties": {
"image_url": {
"$ref": "#/components/schemas/Url"
},
"type": {
"type": "string",
"enum": [
"image_url"
]
}
}
}
],
"discriminator": {
"propertyName": "type"
}
},
"MessageContent": {
"oneOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"$ref": "#/components/schemas/MessageChunk"
}
}
]
},
"ModelInfo": {
"type": "object",
"required": [
"id",
"object",
"created",
"owned_by"
],
"properties": {
"created": {
"type": "integer",
"format": "int64",
"example": 1686935002,
"minimum": 0
},
"id": {
"type": "string",
"example": "gpt2"
},
"object": {
"type": "string",
"example": "model"
},
"owned_by": {
"type": "string",
"example": "openai"
}
}
},
"OutputMessage": {
"oneOf": [
{
"$ref": "#/components/schemas/TextMessage"
},
{
"$ref": "#/components/schemas/ToolCallMessage"
}
]
},
"PrefillToken": {
"type": "object",
"required": [
"id",
"text",
"logprob"
],
"properties": {
"id": {
"type": "integer",
"format": "int32",
"example": 0,
"minimum": 0
},
"logprob": {
"type": "number",
"format": "float",
"example": -0.34,
"nullable": true
},
"text": {
"type": "string",
"example": "test"
}
}
},
"Prompt": {
"type": "array",
"items": {
"type": "string"
}
},
"SagemakerRequest": {
"oneOf": [
{
"$ref": "#/components/schemas/CompatGenerateRequest"
},
{
"$ref": "#/components/schemas/ChatRequest"
},
{
"$ref": "#/components/schemas/CompletionRequest"
}
]
},
"SagemakerResponse": {
"oneOf": [
{
"$ref": "#/components/schemas/GenerateResponse"
},
{
"$ref": "#/components/schemas/ChatCompletion"
},
{
"$ref": "#/components/schemas/CompletionFinal"
}
]
},
"SagemakerStreamResponse": {
"oneOf": [
{
"$ref": "#/components/schemas/StreamResponse"
},
{
"$ref": "#/components/schemas/ChatCompletionChunk"
},
{
"$ref": "#/components/schemas/Chunk"
}
]
},
"SimpleToken": {
"type": "object",
"required": [
"id",
"text",
"start",
"stop"
],
"properties": {
"id": {
"type": "integer",
"format": "int32",
"example": 0,
"minimum": 0
},
"start": {
"type": "integer",
"example": 0,
"minimum": 0
},
"stop": {
"type": "integer",
"example": 2,
"minimum": 0
},
"text": {
"type": "string",
"example": "test"
}
}
},
"StreamDetails": {
"type": "object",
"required": [
"finish_reason",
"generated_tokens",
"input_length"
],
"properties": {
"finish_reason": {
"$ref": "#/components/schemas/FinishReason"
},
"generated_tokens": {
"type": "integer",
"format": "int32",
"example": 1,
"minimum": 0
},
"input_length": {
"type": "integer",
"format": "int32",
"example": 1,
"minimum": 0
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
}
}
},
"StreamOptions": {
"type": "object",
"required": [
"include_usage"
],
"properties": {
"include_usage": {
"type": "boolean",
"description": "If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value.",
"example": "true"
}
}
},
"StreamResponse": {
"type": "object",
"required": [
"index",
"token"
],
"properties": {
"details": {
"allOf": [
{
"$ref": "#/components/schemas/StreamDetails"
}
],
"default": "null",
"nullable": true
},
"generated_text": {
"type": "string",
"default": "null",
"example": "test",
"nullable": true
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"token": {
"$ref": "#/components/schemas/Token"
},
"top_tokens": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
}
}
},
"TextMessage": {
"type": "object",
"required": [
"role",
"content"
],
"properties": {
"content": {
"type": "string",
"example": "My name is David and I"
},
"role": {
"type": "string",
"example": "user"
}
}
},
"Token": {
"type": "object",
"required": [
"id",
"text",
"logprob",
"special"
],
"properties": {
"id": {
"type": "integer",
"format": "int32",
"example": 0,
"minimum": 0
},
"logprob": {
"type": "number",
"format": "float",
"example": -0.34,
"nullable": true
},
"special": {
"type": "boolean",
"example": "false"
},
"text": {
"type": "string",
"example": "test"
}
}
},
"TokenizeResponse": {
"type": "array",
"items": {
"$ref": "#/components/schemas/SimpleToken"
}
},
"Tool": {
"type": "object",
"required": [
"type",
"function"
],
"properties": {
"function": {
"$ref": "#/components/schemas/FunctionDefinition"
},
"type": {
"type": "string",
"example": "function"
}
}
},
"ToolCall": {
"type": "object",
"required": [
"id",
"type",
"function"
],
"properties": {
"function": {
"$ref": "#/components/schemas/FunctionDefinition"
},
"id": {
"type": "string"
},
"type": {
"type": "string"
}
}
},
"ToolCallDelta": {
"type": "object",
"required": [
"role",
"tool_calls"
],
"properties": {
"role": {
"type": "string",
"example": "assistant"
},
"tool_calls": {
"$ref": "#/components/schemas/DeltaToolCall"
}
}
},
"ToolCallMessage": {
"type": "object",
"required": [
"role",
"tool_calls"
],
"properties": {
"role": {
"type": "string",
"example": "assistant"
},
"tool_calls": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ToolCall"
}
}
}
},
"ToolChoice": {
"oneOf": [
{
"type": "string",
"description": "Means the model can pick between generating a message or calling one or more tools.",
"enum": [
"auto"
]
},
{
"type": "string",
"description": "Means the model will not call any tool and instead generates a message.",
"enum": [
"none"
]
},
{
"type": "string",
"description": "Means the model must call one or more tools.",
"enum": [
"required"
]
},
{
"type": "object",
"required": [
"function"
],
"properties": {
"function": {
"$ref": "#/components/schemas/FunctionName"
}
}
}
],
"description": "<https://platform.openai.com/docs/guides/function-calling/configuring-function-calling-behavior-using-the-tool_choice-parameter>"
},
"Url": {
"type": "object",
"required": [
"url"
],
"properties": {
"url": {
"type": "string"
}
}
},
"Usage": {
"type": "object",
"required": [
"prompt_tokens",
"completion_tokens",
"total_tokens"
],
"properties": {
"completion_tokens": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"prompt_tokens": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"total_tokens": {
"type": "integer",
"format": "int32",
"minimum": 0
}
}
}
}
},
"tags": [
{
"name": "Text Generation Inference",
"description": "Hugging Face Text Generation Inference API"
}
]
}
| text-generation-inference/docs/openapi.json/0 | {
"file_path": "text-generation-inference/docs/openapi.json",
"repo_id": "text-generation-inference",
"token_count": 38596
} |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.4375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83984375,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1328125,
"special": false,
"text": " is"
},
{
"id": 254,
"logprob": -1.515625,
"special": false,
"text": " the"
},
{
"id": 1022,
"logprob": -1.15625,
"special": false,
"text": " first"
},
{
"id": 3458,
"logprob": -0.3671875,
"special": false,
"text": " step"
},
{
"id": 279,
"logprob": -0.88671875,
"special": false,
"text": " in"
},
{
"id": 254,
"logprob": -0.69140625,
"special": false,
"text": " the"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is the first step in the"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.4375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83984375,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1328125,
"special": false,
"text": " is"
},
{
"id": 254,
"logprob": -1.515625,
"special": false,
"text": " the"
},
{
"id": 1022,
"logprob": -1.15625,
"special": false,
"text": " first"
},
{
"id": 3458,
"logprob": -0.3671875,
"special": false,
"text": " step"
},
{
"id": 279,
"logprob": -0.88671875,
"special": false,
"text": " in"
},
{
"id": 254,
"logprob": -0.69140625,
"special": false,
"text": " the"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is the first step in the"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.4375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83984375,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1328125,
"special": false,
"text": " is"
},
{
"id": 254,
"logprob": -1.515625,
"special": false,
"text": " the"
},
{
"id": 1022,
"logprob": -1.15625,
"special": false,
"text": " first"
},
{
"id": 3458,
"logprob": -0.3671875,
"special": false,
"text": " step"
},
{
"id": 279,
"logprob": -0.88671875,
"special": false,
"text": " in"
},
{
"id": 254,
"logprob": -0.69140625,
"special": false,
"text": " the"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is the first step in the"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 185,
"logprob": -1.546875,
"special": false,
"text": "\n"
},
{
"id": 549,
"logprob": -2.859375,
"special": false,
"text": "The"
},
{
"id": 1727,
"logprob": -2.4375,
"special": false,
"text": " test"
},
{
"id": 3102,
"logprob": -0.83984375,
"special": false,
"text": " request"
},
{
"id": 317,
"logprob": -1.1328125,
"special": false,
"text": " is"
},
{
"id": 254,
"logprob": -1.515625,
"special": false,
"text": " the"
},
{
"id": 1022,
"logprob": -1.15625,
"special": false,
"text": " first"
},
{
"id": 3458,
"logprob": -0.3671875,
"special": false,
"text": " step"
},
{
"id": 279,
"logprob": -0.88671875,
"special": false,
"text": " in"
},
{
"id": 254,
"logprob": -0.69140625,
"special": false,
"text": " the"
}
],
"top_tokens": null
},
"generated_text": "\nThe test request is the first step in the"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_deepseek_v2/test_flash_deepseek_v2_load.json",
"repo_id": "text-generation-inference",
"token_count": 4036
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 13,
"logprob": -2.2539062,
"special": false,
"text": "."
},
{
"id": 578,
"logprob": -0.15563965,
"special": false,
"text": " The"
},
{
"id": 3622,
"logprob": -0.8203125,
"special": false,
"text": " server"
},
{
"id": 706,
"logprob": 0.0,
"special": false,
"text": " has"
},
{
"id": 539,
"logprob": 0.0,
"special": false,
"text": " not"
},
{
"id": 3686,
"logprob": 0.0,
"special": false,
"text": " yet"
},
{
"id": 3288,
"logprob": 0.0,
"special": false,
"text": " sent"
},
{
"id": 904,
"logprob": 0.0,
"special": false,
"text": " any"
},
{
"id": 828,
"logprob": 0.0,
"special": false,
"text": " data"
},
{
"id": 382,
"logprob": -1.5517578,
"special": false,
"text": ".\n\n"
}
],
"top_tokens": null
},
"generated_text": "Test request. The server has not yet sent any data.\n\n"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 859
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 12,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 450,
"logprob": -0.26342773,
"special": false,
"text": " The"
},
{
"id": 21282,
"logprob": -0.01838684,
"special": false,
"text": " cow"
},
{
"id": 322,
"logprob": -0.18041992,
"special": false,
"text": " and"
},
{
"id": 521,
"logprob": -0.62841797,
"special": false,
"text": " ch"
},
{
"id": 21475,
"logprob": -0.0037956238,
"special": false,
"text": "icken"
},
{
"id": 526,
"logprob": -0.018737793,
"special": false,
"text": " are"
},
{
"id": 373,
"logprob": -1.0820312,
"special": false,
"text": " on"
},
{
"id": 263,
"logprob": -0.5083008,
"special": false,
"text": " a"
},
{
"id": 25695,
"logprob": -0.07128906,
"special": false,
"text": " beach"
},
{
"id": 29889,
"logprob": -0.12573242,
"special": false,
"text": "."
},
{
"id": 32002,
"logprob": -0.0029792786,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 2,
"logprob": -0.00024962425,
"special": true,
"text": "</s>"
}
],
"top_tokens": null
},
"generated_text": " The cow and chicken are on a beach."
}
| text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics_two_images.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics_two_images.json",
"repo_id": "text-generation-inference",
"token_count": 1028
} |
[
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "In a small town, a chicken named Cluck",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1738753835,
"id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion",
"system_fingerprint": "3.1.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 50,
"total_tokens": 60
}
},
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"message": {
"content": "In a small town, a chicken named Cluck",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1738753835,
"id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion",
"system_fingerprint": "3.1.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 50,
"total_tokens": 60
}
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_mllama/test_mllama_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mllama/test_mllama_load.json",
"repo_id": "text-generation-inference",
"token_count": 664
} |
import pytest
@pytest.fixture(scope="module")
def compressed_tensors_w8a8_int_dynamic_weight_handle(launcher):
with launcher(
"danieldk/Qwen2.5-1.5B-Instruct-w8a8-int-dynamic-weight",
num_shard=2,
quantize="compressed-tensors",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def compressed_tensors_w8a8_int_dynamic_weight(
compressed_tensors_w8a8_int_dynamic_weight_handle,
):
await compressed_tensors_w8a8_int_dynamic_weight_handle.health(300)
return compressed_tensors_w8a8_int_dynamic_weight_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_compressed_tensors_w8a8_int_dynamic_weight(
compressed_tensors_w8a8_int_dynamic_weight, response_snapshot
):
response = await compressed_tensors_w8a8_int_dynamic_weight.generate(
"What is deep learning?",
# prefer a longer response than the default, allow the llm to end generation
max_new_tokens=1000,
decoder_input_details=True,
)
assert (
response.generated_text
== " Deep learning is a subset of machine learning that uses neural networks to learn from data. It is a type of artificial intelligence that can learn from and make predictions on large amounts of data. Deep learning is used in a variety of applications, including image and speech recognition, natural language processing, and autonomous vehicles. It is a rapidly growing field with many potential applications in the future."
)
assert response.details.generated_tokens == 76
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_compressed_tensors_w8a8_int_dynamic_weight_all_params(
compressed_tensors_w8a8_int_dynamic_weight, response_snapshot
):
response = await compressed_tensors_w8a8_int_dynamic_weight.generate(
"What is deep learning",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "What is deep learning?\nDeep Learning (DL), or artificial neural networks"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_compressed_tensors_w8a8_int_dynamic_weight_load(
compressed_tensors_w8a8_int_dynamic_weight, generate_load, response_snapshot
):
responses = await generate_load(
compressed_tensors_w8a8_int_dynamic_weight,
"What is deep learning?",
max_new_tokens=10,
n=4,
)
assert (
responses[0].generated_text
== " Deep learning is a subset of machine learning that uses"
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_compressed_tensors_w8a8_int_dynamic_weight.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_compressed_tensors_w8a8_int_dynamic_weight.py",
"repo_id": "text-generation-inference",
"token_count": 1234
} |
import pytest
@pytest.fixture(scope="module")
def flash_llama_fp8_handle(launcher):
with launcher("meta-llama/Meta-Llama-3-8B", num_shard=2, quantize="fp8") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_fp8(flash_llama_fp8_handle):
await flash_llama_fp8_handle.health(300)
return flash_llama_fp8_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_fp8(flash_llama_fp8, response_snapshot):
response = await flash_llama_fp8.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.generated_text == " for the 2019-2020 school year"
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_fp8_all_params(flash_llama_fp8, response_snapshot):
response = await flash_llama_fp8.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_fp8_load(flash_llama_fp8, generate_load, response_snapshot):
responses = await generate_load(
flash_llama_fp8, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert responses[0].generated_text == " for the 2019-2020 school year"
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"Different messages : {[r.generated_text for r in responses]}"
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_llama_fp8.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama_fp8.py",
"repo_id": "text-generation-inference",
"token_count": 802
} |
import pytest
@pytest.fixture(scope="module")
def flash_phi_handle(launcher):
with launcher("microsoft/phi-2", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_phi(flash_phi_handle):
await flash_phi_handle.health(300)
return flash_phi_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_phi(flash_phi, response_snapshot):
response = await flash_phi.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response.generated_text == ': {request}")\n response = self'
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_phi_all_params(flash_phi, response_snapshot):
response = await flash_phi.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["network"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 6
assert response.generated_text == "Test request to send data over a network"
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_phi_load(flash_phi, generate_load, response_snapshot):
responses = await generate_load(flash_phi, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"{[r.generated_text for r in responses]}"
assert responses[0].generated_text == ': {request}")\n response = self'
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_phi.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_phi.py",
"repo_id": "text-generation-inference",
"token_count": 749
} |
import pytest
@pytest.fixture(scope="module")
def fused_kernel_mamba_handle(launcher):
with launcher("state-spaces/mamba-130m-hf", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def fused_kernel_mamba(fused_kernel_mamba_handle):
await fused_kernel_mamba_handle.health(300)
return fused_kernel_mamba_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_mamba(fused_kernel_mamba, response_snapshot):
response = await fused_kernel_mamba.generate(
"What is Deep Learning?", max_new_tokens=10
)
assert response.details.generated_tokens == 10
assert response.generated_text == "\n\nDeep learning is a new type of machine"
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_mamba_all_params(fused_kernel_mamba, response_snapshot):
response = await fused_kernel_mamba.generate(
"blue, red, yellow, ",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "blue, red, yellow, \nand blue colors. A number of different color"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_mamba_load(
fused_kernel_mamba, generate_load, generous_response_snapshot
):
responses = await generate_load(
fused_kernel_mamba, "What is Deep Learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert responses[0].generated_text == "\n\nDeep learning is a new type of machine"
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses[0].generated_text == "\n\nDeep learning is a new type of machine"
assert responses == generous_response_snapshot
| text-generation-inference/integration-tests/models/test_mamba.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_mamba.py",
"repo_id": "text-generation-inference",
"token_count": 825
} |
use std::fmt;
use std::process::Command;
pub(crate) struct Env {
cargo_target: &'static str,
cargo_version: &'static str,
git_sha: &'static str,
docker_label: &'static str,
nvidia_env: String,
xpu_env: String,
}
impl Env {
pub fn new() -> Self {
let nvidia_env = nvidia_smi();
let xpu_env = xpu_smi();
Self {
nvidia_env: nvidia_env.unwrap_or("N/A".to_string()),
xpu_env: xpu_env.unwrap_or("N/A".to_string()),
cargo_target: env!("VERGEN_CARGO_TARGET_TRIPLE"),
cargo_version: env!("VERGEN_RUSTC_SEMVER"),
git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("N/A"),
docker_label: option_env!("DOCKER_LABEL").unwrap_or("N/A"),
}
}
}
impl fmt::Display for Env {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Runtime environment:")?;
writeln!(f, "Target: {}", self.cargo_target)?;
writeln!(f, "Cargo version: {}", self.cargo_version)?;
writeln!(f, "Commit sha: {}", self.git_sha)?;
writeln!(f, "Docker label: {}", self.docker_label)?;
writeln!(f, "nvidia-smi:\n{}", self.nvidia_env)?;
write!(f, "xpu-smi:\n{}", self.xpu_env)?;
Ok(())
}
}
fn nvidia_smi() -> Option<String> {
let output = Command::new("nvidia-smi").output().ok()?;
let nvidia_smi = String::from_utf8(output.stdout).ok()?;
let output = nvidia_smi.replace('\n', "\n ");
Some(output.trim().to_string())
}
fn xpu_smi() -> Option<String> {
let output = Command::new("xpu-smi").arg("discovery").output().ok()?;
let xpu_smi = String::from_utf8(output.stdout).ok()?;
let output = xpu_smi.replace('\n', "\n ");
Some(output.trim().to_string())
}
| text-generation-inference/launcher/src/env_runtime.rs/0 | {
"file_path": "text-generation-inference/launcher/src/env_runtime.rs",
"repo_id": "text-generation-inference",
"token_count": 861
} |
{
lib,
mkShell,
black,
cmake,
isort,
ninja,
which,
cudaPackages,
openssl,
pkg-config,
poetry,
protobuf,
python3,
pyright,
redocly,
ruff,
rust-bin,
server,
# Enable dependencies for building CUDA packages. Useful for e.g.
# developing marlin/moe-kernels in-place.
withCuda ? false,
}:
mkShell {
nativeBuildInputs =
[
black
isort
pkg-config
poetry
(rust-bin.stable.latest.default.override {
extensions = [
"rust-analyzer"
"rust-src"
];
})
protobuf
pyright
redocly
ruff
]
++ (lib.optionals withCuda [
cmake
ninja
which
# For most Torch-based extensions, setting CUDA_HOME is enough, but
# some custom CMake builds (e.g. vLLM) also need to have nvcc in PATH.
cudaPackages.cuda_nvcc
]);
buildInputs =
[
openssl.dev
]
++ (with python3.pkgs; [
venvShellHook
docker
pip
ipdb
click
pytest
pytest-asyncio
syrupy
])
++ (lib.optionals withCuda (
with cudaPackages;
[
cuda_cccl
cuda_cudart
cuda_nvrtc
cuda_nvtx
cuda_profiler_api
cudnn
libcublas
libcusolver
libcusparse
]
));
inputsFrom = [ server ];
env = lib.optionalAttrs withCuda {
CUDA_HOME = "${lib.getDev cudaPackages.cuda_nvcc}";
TORCH_CUDA_ARCH_LIST = lib.concatStringsSep ";" python3.pkgs.torch.cudaCapabilities;
};
venvDir = "./.venv";
postVenvCreation = ''
unset SOURCE_DATE_EPOCH
( cd server ; python -m pip install --no-dependencies -e . )
( cd clients/python ; python -m pip install --no-dependencies -e . )
'';
postShellHook =
''
unset SOURCE_DATE_EPOCH
export PATH=${cudaPackages.backendStdenv.cc}/bin:$PATH:~/.cargo/bin
''
# At various points in time, the latest gcc supported by CUDA differs
# from the default version in nixpkgs. A lot of the dependencies in
# the impure environment pull in the default gcc from nixpkgs, so we
# end up with the CUDA-supported gcc and the nixpkgs default gcc in
# the path. To ensure that we can build CUDA kernels, put the CUDA
# first in the path. It's a hack, but it works.
+ lib.optionalString withCuda ''
export PATH=${cudaPackages.backendStdenv.cc}/bin:$PATH
'';
}
| text-generation-inference/nix/impure-shell.nix/0 | {
"file_path": "text-generation-inference/nix/impure-shell.nix",
"repo_id": "text-generation-inference",
"token_count": 1104
} |
/// HTTP Server logic
use crate::config::Config;
use crate::infer::{Backend, Infer, InferError, InferResponse, InferStreamResponse};
#[cfg(feature = "kserve")]
use crate::kserve::{
kerve_server_metadata, kserve_health_live, kserve_health_ready, kserve_model_infer,
kserve_model_metadata, kserve_model_metadata_ready,
};
use crate::sagemaker::{
sagemaker_compatibility, SagemakerRequest, SagemakerResponse, SagemakerStreamResponse,
__path_sagemaker_compatibility,
};
use crate::validation::ValidationError;
use crate::vertex::vertex_compatibility;
use crate::ChatTokenizeResponse;
use crate::{
usage_stats, BestOfSequence, Details, ErrorResponse, FinishReason, FunctionName,
GenerateParameters, GenerateRequest, GenerateResponse, GrammarType, HubModelInfo,
HubProcessorConfig, HubTokenizerConfig, Info, Message, MessageChunk, MessageContent,
OutputMessage, PrefillToken, SimpleToken, StreamDetails, StreamOptions, StreamResponse,
TextMessage, Token, TokenizeResponse, Tokenizer, ToolCallDelta, ToolCallMessage, Url, Usage,
Validation,
};
use crate::{
ChatCompletion, ChatCompletionChoice, ChatCompletionChunk, ChatCompletionComplete,
ChatCompletionDelta, ChatCompletionLogprob, ChatCompletionLogprobs, ChatCompletionTopLogprob,
ChatRequest, Chunk, CompatGenerateRequest, Completion, CompletionComplete, CompletionFinal,
CompletionRequest, CompletionType, DeltaToolCall, Function, Prompt, Tool,
};
use crate::{FunctionDefinition, HubPreprocessorConfig, ToolCall, ToolChoice};
use crate::{ModelInfo, ModelsInfo};
use async_stream::__private::AsyncStream;
use axum::extract::{DefaultBodyLimit, Extension};
use axum::http::{HeaderMap, HeaderValue, Method, StatusCode};
use axum::response::sse::{Event, KeepAlive, Sse};
use axum::response::{IntoResponse, Response};
use axum::routing::{get, post};
use axum::{http, Json, Router};
use axum_tracing_opentelemetry::middleware::OtelAxumLayer;
use futures::stream::StreamExt;
use futures::stream::{FuturesOrdered, FuturesUnordered};
use futures::Stream;
use futures::TryStreamExt;
use hf_hub::api::tokio::{Api, ApiBuilder, ApiRepo};
use hf_hub::{Cache, Repo, RepoType};
use http::header::AUTHORIZATION;
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
use pyo3::prelude::*;
use pyo3::types::IntoPyDict;
use regex::Regex;
use serde_json::Value;
use std::convert::Infallible;
use std::fs::File;
use std::io::BufReader;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error;
use tokio::select;
use tokio::signal;
use tokio::sync::oneshot;
use tokio::time::Instant;
use tower_http::cors::{AllowOrigin, CorsLayer};
use tracing::{info_span, instrument, Instrument};
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
fn encoding_to_tokens(encoding: &tokenizers::Encoding, input: &str) -> Vec<SimpleToken> {
let offsets = encoding.get_offsets();
let input_ids = encoding.get_ids();
if offsets.len() == input_ids.len() {
input_ids
.iter()
.zip(offsets)
.map(|(&id, &(start, stop))| {
let text = input
.chars()
.skip(start)
.take(stop - start)
.collect::<String>();
SimpleToken {
id,
text,
start,
stop,
}
})
.collect()
} else {
encoding
.get_ids()
.iter()
.map(|&id| SimpleToken {
id,
text: "".to_string(),
start: 0,
stop: 0,
})
.collect()
}
}
/// Generate tokens if `stream == false` or a stream of token if `stream == true`
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/",
request_body = CompatGenerateRequest,
responses(
(status = 200, description = "Generated Text",
content(
("application/json" = Vec<GenerateResponse>),
("text/event-stream" = StreamResponse),
)),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
)]
#[instrument(skip(infer, req))]
pub(crate) async fn compat_generate(
Extension(default_return_full_text): Extension<bool>,
infer: Extension<Infer>,
compute_type: Extension<ComputeType>,
Json(mut req): Json<CompatGenerateRequest>,
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
// default return_full_text given the pipeline_tag
if req.parameters.return_full_text.is_none() {
req.parameters.return_full_text = Some(default_return_full_text)
}
// switch on stream
if req.stream {
Ok(generate_stream(infer, compute_type, Json(req.into()))
.await
.into_response())
} else {
let (headers, Json(generation)) = generate(infer, compute_type, Json(req.into())).await?;
// wrap generation inside a Vec to match api-inference
Ok((headers, Json(vec![generation])).into_response())
}
}
/// Text Generation Inference endpoint info
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/info",
responses((status = 200, description = "Served model info", body = Info))
)]
#[instrument]
async fn get_model_info(info: Extension<Info>) -> Json<Info> {
Json(info.0)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v1/models",
responses(
(status = 200, description = "Served model info", body = ModelInfo),
(status = 404, description = "Model not found", body = ErrorResponse),
)
)]
#[instrument(skip(info))]
/// Get model info
async fn openai_get_model_info(info: Extension<Info>) -> Json<ModelsInfo> {
Json(ModelsInfo {
data: vec![ModelInfo {
id: info.0.model_id.clone(),
object: "model".to_string(),
created: 0, // TODO: determine how to get this
owned_by: info.0.model_id.clone(),
}],
..Default::default()
})
}
/// Template and tokenize ChatRequest
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/chat_tokenize",
request_body = ChatRequest,
responses(
(status = 200, description = "Templated and tokenized ChatRequest", body = ChatTokenizeResponse),
(status = 404, description = "Failed to tokenize ChatRequest", body = ErrorResponse),
)
)]
async fn get_chat_tokenize(
Extension(infer): Extension<Infer>,
Json(chat): Json<ChatRequest>,
) -> Result<(HeaderMap, Json<ChatTokenizeResponse>), (StatusCode, Json<ErrorResponse>)> {
metrics::counter!("tgi_request_count").increment(1);
let generate_request: GenerateRequest = chat.try_into_generate(&infer)?.0;
let input = generate_request.inputs.clone();
let encoding = infer.tokenize(generate_request).await?;
let tokens = encoding_to_tokens(&encoding, &input);
let resp = ChatTokenizeResponse {
tokenize_response: TokenizeResponse(tokens),
templated_text: input,
};
Ok((HeaderMap::new(), Json(resp)))
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/health",
responses(
(status = 200, description = "Everything is working fine"),
(status = 503, description = "Text generation inference is down", body = ErrorResponse,
example = json ! ({"error": "unhealthy", "error_type": "healthcheck"})),
)
)]
#[instrument(skip(infer))]
/// Health check method
async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
match infer.health().await {
true => Ok(()),
false => Err((
StatusCode::SERVICE_UNAVAILABLE,
Json(ErrorResponse {
error: "unhealthy".to_string(),
error_type: "healthcheck".to_string(),
}),
)),
}
}
/// Generate tokens
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/generate",
request_body = GenerateRequest,
responses(
(status = 200, description = "Generated Text", body = GenerateResponse),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
)]
#[instrument(
skip_all,
fields(
parameters = ? req.parameters,
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
)]
async fn generate(
infer: Extension<Infer>,
Extension(ComputeType(compute_type)): Extension<ComputeType>,
Json(req): Json<GenerateRequest>,
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
let span = tracing::Span::current();
let (headers, _, response) =
generate_internal(infer, ComputeType(compute_type), Json(req), span).await?;
Ok((headers, response))
}
pub(crate) async fn generate_internal(
infer: Extension<Infer>,
ComputeType(compute_type): ComputeType,
Json(req): Json<GenerateRequest>,
span: tracing::Span,
) -> Result<(HeaderMap, u32, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
let start_time = Instant::now();
metrics::counter!("tgi_request_count").increment(1);
// Do not long ultra long inputs, like image payloads.
tracing::debug!(
"Input: {}",
&req.inputs.chars().take(1000).collect::<String>()
);
let compute_characters = req.inputs.chars().count();
let mut add_prompt = None;
if req.parameters.return_full_text.unwrap_or(false) {
add_prompt = Some(req.inputs.clone());
}
let details: bool = req.parameters.details || req.parameters.decoder_input_details;
// Inference
let (response, best_of_responses) = match req.parameters.best_of {
Some(best_of) if best_of > 1 => {
let (response, best_of_responses) = infer.generate_best_of(req, best_of).await?;
(response, Some(best_of_responses))
}
_ => (infer.generate(req).await?, None),
};
// Token details
let input_length = response._input_length;
let details = match details {
true => {
// convert best_of_responses
let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
responses
.into_iter()
.map(|response: InferResponse| {
// Add prompt if return_full_text
let mut output_text = response.generated_text.text;
if let Some(prompt) = &add_prompt {
output_text = prompt.clone() + &output_text;
}
BestOfSequence {
generated_text: output_text,
finish_reason: response.generated_text.finish_reason,
generated_tokens: response.generated_text.generated_tokens,
prefill: response.prefill,
tokens: response.tokens,
top_tokens: response.top_tokens,
seed: response.generated_text.seed,
}
})
.collect()
});
Some(Details {
finish_reason: response.generated_text.finish_reason,
generated_tokens: response.generated_text.generated_tokens,
prefill: response.prefill,
tokens: response.tokens,
seed: response.generated_text.seed,
best_of_sequences,
top_tokens: response.top_tokens,
})
}
false => None,
};
// Timings
let total_time = start_time.elapsed();
let validation_time = response.queued - start_time;
let queue_time = response.start - response.queued;
let inference_time = Instant::now() - response.start;
let time_per_token = inference_time / response.generated_text.generated_tokens;
// Tracing metadata
span.record("total_time", format!("{total_time:?}"));
span.record("validation_time", format!("{validation_time:?}"));
span.record("queue_time", format!("{queue_time:?}"));
span.record("inference_time", format!("{inference_time:?}"));
span.record("time_per_token", format!("{time_per_token:?}"));
span.record("seed", format!("{:?}", response.generated_text.seed));
// Headers
let mut headers = HeaderMap::new();
headers.insert("x-compute-type", compute_type.parse().unwrap());
headers.insert(
"x-compute-time",
total_time.as_secs_f64().to_string().parse().unwrap(),
);
headers.insert(
"x-compute-characters",
compute_characters.to_string().parse().unwrap(),
);
headers.insert(
"x-total-time",
total_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-validation-time",
validation_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-queue-time",
queue_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-inference-time",
inference_time.as_millis().to_string().parse().unwrap(),
);
headers.insert(
"x-time-per-token",
time_per_token.as_millis().to_string().parse().unwrap(),
);
headers.insert("x-prompt-tokens", input_length.into());
headers.insert(
"x-generated-tokens",
response.generated_text.generated_tokens.into(),
);
// Metrics
metrics::counter!("tgi_request_success").increment(1);
metrics::histogram!("tgi_request_duration").record(total_time.as_secs_f64());
metrics::histogram!("tgi_request_validation_duration").record(validation_time.as_secs_f64());
metrics::histogram!("tgi_request_queue_duration").record(queue_time.as_secs_f64());
metrics::histogram!("tgi_request_inference_duration").record(inference_time.as_secs_f64());
metrics::histogram!("tgi_request_mean_time_per_token_duration")
.record(time_per_token.as_secs_f64());
metrics::histogram!("tgi_request_generated_tokens")
.record(response.generated_text.generated_tokens as f64);
// Send response
let mut output_text = response.generated_text.text;
if let Some(prompt) = add_prompt {
output_text = prompt + &output_text;
}
tracing::debug!("Output: {}", output_text);
tracing::info!("Success");
let response = GenerateResponse {
generated_text: output_text,
details,
};
Ok((headers, input_length, Json(response)))
}
/// Generate a stream of token using Server-Sent Events
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/generate_stream",
request_body = GenerateRequest,
responses(
(status = 200, description = "Generated Text", body = StreamResponse,
content_type = "text/event-stream"),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"}),
content_type = "text/event-stream"),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"}),
content_type = "text/event-stream"),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"}),
content_type = "text/event-stream"),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"}),
content_type = "text/event-stream"),
)
)]
#[instrument(
skip_all,
fields(
parameters = ? req.parameters,
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
)]
async fn generate_stream(
Extension(infer): Extension<Infer>,
Extension(compute_type): Extension<ComputeType>,
Json(req): Json<GenerateRequest>,
) -> (
HeaderMap,
Sse<impl Stream<Item = Result<Event, Infallible>>>,
) {
let span = tracing::Span::current();
let (headers, response_stream) =
generate_stream_internal(infer, compute_type, Json(req), span).await;
let response_stream = async_stream::stream! {
let mut response_stream = Box::pin(response_stream);
while let Some(raw_event) = response_stream.next().await {
yield Ok(raw_event.map_or_else(Event::from, |token| {
Event::default()
.json_data(token)
.unwrap_or_else(|e| InferError::StreamSerializationError(e.to_string()).into())
}));
}
};
let sse = Sse::new(response_stream).keep_alive(KeepAlive::default());
(headers, sse)
}
async fn generate_stream_internal(
infer: Infer,
ComputeType(compute_type): ComputeType,
Json(req): Json<GenerateRequest>,
span: tracing::Span,
) -> (
HeaderMap,
impl Stream<Item = Result<StreamResponse, InferError>>,
) {
let start_time = Instant::now();
metrics::counter!("tgi_request_count").increment(1);
tracing::debug!("Input: {}", req.inputs);
let compute_characters = req.inputs.chars().count();
let mut headers = HeaderMap::new();
headers.insert("x-compute-type", compute_type.parse().unwrap());
headers.insert(
"x-compute-characters",
compute_characters.to_string().parse().unwrap(),
);
headers.insert("X-Accel-Buffering", "no".parse().unwrap());
let stream = async_stream::stream! {
// Inference
let mut end_reached = false;
let mut error = false;
let mut add_prompt = None;
if req.parameters.return_full_text.unwrap_or(false) {
add_prompt = Some(req.inputs.clone());
}
let details = req.parameters.details;
let best_of = req.parameters.best_of.unwrap_or(1);
if best_of != 1 {
let err = InferError::from(ValidationError::BestOfStream);
metrics::counter!("tgi_request_failure", "err" => "validation").increment(1);
tracing::error!("{err}");
yield Err(err);
} else if req.parameters.decoder_input_details {
let err = InferError::from(ValidationError::PrefillDetailsStream);
metrics::counter!("tgi_request_failure", "err" => "validation").increment(1);
tracing::error!("{err}");
yield Err(err);
} else {
match infer.generate_stream(req).instrument(info_span!(parent: &span, "async_stream")).await {
// Keep permit as long as generate_stream lives
Ok((_permit, input_length, response_stream)) => {
let mut index = 0;
let mut response_stream = Box::pin(response_stream);
// Server-Sent Event stream
while let Some(response) = response_stream.next().await {
index += 1;
match response {
Ok(response) => {
match response {
// Prefill is ignored
InferStreamResponse::Prefill(_) => {}
// Yield event for every new token
InferStreamResponse::Intermediate{
token,
top_tokens,
} => {
tracing::debug!(parent: &span, "Token: {:?}", token);
// StreamResponse
let stream_token = StreamResponse {
index,
token,
top_tokens,
generated_text: None,
details: None,
};
yield Ok(stream_token);
}
// Yield event for last token and compute timings
InferStreamResponse::End {
token,
generated_text,
start,
queued,
top_tokens,
} => {
// Token details
let details = match details {
true => Some(StreamDetails {
finish_reason: generated_text.finish_reason,
generated_tokens: generated_text.generated_tokens,
seed: generated_text.seed,
input_length,
}),
false => None,
};
// Timings
let total_time = start_time.elapsed();
let validation_time = queued - start_time;
let queue_time = start - queued;
let inference_time = Instant::now() - start;
let time_per_token = inference_time / generated_text.generated_tokens;
// Tracing metadata
span.record("total_time", format!("{total_time:?}"));
span.record("validation_time", format!("{validation_time:?}"));
span.record("queue_time", format!("{queue_time:?}"));
span.record("inference_time", format!("{inference_time:?}"));
span.record("time_per_token", format!("{time_per_token:?}"));
span.record("seed", format!("{:?}", generated_text.seed));
// Metrics
metrics::counter!("tgi_request_success").increment(1);
metrics::histogram!("tgi_request_duration").record(total_time.as_secs_f64());
metrics::histogram!("tgi_request_validation_duration").record(validation_time.as_secs_f64());
metrics::histogram!("tgi_request_queue_duration").record(queue_time.as_secs_f64());
metrics::histogram!("tgi_request_inference_duration").record(inference_time.as_secs_f64());
metrics::histogram!("tgi_request_mean_time_per_token_duration").record(time_per_token.as_secs_f64());
metrics::histogram!("tgi_request_generated_tokens").record(generated_text.generated_tokens as f64);
// StreamResponse
end_reached = true;
let mut output_text = generated_text.text;
if let Some(prompt) = add_prompt {
output_text = prompt + &output_text;
}
tracing::debug!(parent: &span, "Output: {}", output_text);
tracing::info!(parent: &span, "Success");
let stream_token = StreamResponse {
index,
token,
top_tokens,
generated_text: Some(output_text),
details
};
yield Ok(stream_token);
break;
}
}
}
// yield error
Err(err) => {
error = true;
yield Err(err);
break;
}
}
}
},
// yield error
Err(err) => {
error = true;
yield Err(err);
}
}
// Check if generation reached the end
// Skip if we already sent an error
if !end_reached && !error {
let err = InferError::IncompleteGenerationStream;
metrics::counter!("tgi_request_failure", "err" => "incomplete").increment(1);
tracing::error!("{err}");
yield Err(err);
}
}
};
(headers, stream)
}
/// Generate tokens
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/v1/completions",
request_body = CompletionRequest,
responses(
(status = 200, description = "Generated Chat Completion",
content(
("application/json" = CompletionFinal),
("text/event-stream" = Chunk),
)),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
)]
#[instrument(
skip_all,
fields(
// parameters = ? req.parameters,
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
)]
pub(crate) async fn completions(
Extension(infer): Extension<Infer>,
Extension(compute_type): Extension<ComputeType>,
Extension(info): Extension<Info>,
Json(req): Json<CompletionRequest>,
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
let span = tracing::Span::current();
metrics::counter!("tgi_request_count").increment(1);
let CompletionRequest {
model,
max_tokens,
seed,
stop,
stream,
temperature,
..
} = req;
let max_new_tokens = max_tokens;
let stop = stop.unwrap_or_default();
// enable greedy only when temperature is 0
let (do_sample, temperature) = match temperature {
Some(temperature) if temperature == 0.0 => (false, None),
other => (true, other),
};
// if suffix is present throw an error
if req.suffix.is_some() {
metrics::counter!("tgi_request_failure", "err" => "validation").increment(1);
return Err((
StatusCode::UNPROCESSABLE_ENTITY,
Json(ErrorResponse {
error: "Suffix is not supported and can be achieved by preprocessing the prompt."
.to_string(),
error_type: "suffix not supported".to_string(),
}),
));
}
if req.prompt.0.len() > info.max_client_batch_size {
metrics::counter!("tgi_request_failure", "err" => "validation").increment(1);
return Err((
StatusCode::UNPROCESSABLE_ENTITY,
Json(ErrorResponse {
error: format!(
"Number of prompts exceeds the maximum allowed batch size of {}",
info.max_client_batch_size
),
error_type: "batch size exceeded".to_string(),
}),
));
}
let generate_requests: Vec<GenerateRequest> = req
.prompt
.0
.iter()
.map(|prompt| GenerateRequest {
inputs: prompt.to_string(),
add_special_tokens: true,
parameters: GenerateParameters {
best_of: None,
temperature,
repetition_penalty: req.repetition_penalty,
frequency_penalty: req.frequency_penalty,
top_k: None,
top_p: req.top_p,
typical_p: None,
do_sample,
max_new_tokens,
return_full_text: None,
stop: stop.clone(),
truncate: None,
watermark: false,
details: true,
decoder_input_details: !stream,
seed,
top_n_tokens: None,
grammar: None,
adapter_id: model.as_ref().filter(|m| *m != "tgi").map(String::from),
},
})
.collect();
let mut x_compute_type = None;
let mut x_compute_characters = 0u32;
let mut x_accel_buffering = None;
if stream {
let mut response_streams = FuturesOrdered::new();
for (index, generate_request) in generate_requests.into_iter().enumerate() {
let model_id = info.model_id.clone();
let system_fingerprint =
format!("{}-{}", info.version, info.docker_label.unwrap_or("native"));
let infer_clone = infer.clone();
let compute_type_clone = compute_type.clone();
let span_clone = span.clone();
// Create a future for each generate_stream_internal call.
let generate_future = async move {
let (header_tx, header_rx) = oneshot::channel();
let (sse_tx, sse_rx) = tokio::sync::mpsc::unbounded_channel();
tokio::spawn(async move {
let (headers, response_stream) = generate_stream_internal(
infer_clone.clone(),
compute_type_clone.clone(),
Json(generate_request),
span_clone.clone(),
)
.await;
let response_stream = async_stream::stream! {
let mut response_stream = Box::pin(response_stream);
while let Some(stream_token) = response_stream.next().await {
match stream_token {
Ok(stream_token) => {
let event = Event::default();
let current_time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_else(|_| std::time::Duration::from_secs(0))
.as_secs();
let message = match stream_token.details {
Some(details) => {
let completion_tokens = details.generated_tokens;
let prompt_tokens = details.input_length;
let total_tokens = prompt_tokens + completion_tokens;
Completion::Final(CompletionFinal {
id: String::new(),
created: current_time,
model: model_id.clone(),
system_fingerprint: system_fingerprint.clone(),
choices: vec![CompletionComplete {
finish_reason: details.finish_reason.to_string(),
index: index as u32,
logprobs: None,
text: stream_token.token.text,
}],
usage: Usage {
prompt_tokens,
completion_tokens,
total_tokens,
},
})
}
None => Completion::Chunk(Chunk {
id: String::new(),
created: current_time,
choices: vec![CompletionComplete {
finish_reason: String::new(),
index: index as u32,
logprobs: None,
text: stream_token.token.text,
}],
model: model_id.clone(),
system_fingerprint: system_fingerprint.clone(),
}),
};
let event = event
.json_data(message)
.unwrap_or_else(|_e| Event::default());
yield Ok(event);
}
Err(err) => yield Ok(err.into_openai_event()),
}
}
};
// send and dont wait for response
let _ = header_tx.send(headers);
// pin an emit messages to the sse_tx
let mut sse = Box::pin(response_stream);
while let Some(event) = sse.next().await {
if sse_tx.send(event).is_err() {
tracing::error!("Failed to send event. Receiver dropped.");
break;
}
}
});
(header_rx, sse_rx)
};
response_streams.push_back(generate_future);
}
let mut all_rxs = vec![];
while let Some((header_rx, sse_rx)) = response_streams.next().await {
all_rxs.push(sse_rx);
// get the headers from the first response of each stream
let headers = header_rx.await.map_err(|e| {
tracing::error!("Failed to get headers: {:?}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: "Failed to get headers".to_string(),
error_type: "headers".to_string(),
}),
)
})?;
if x_compute_type.is_none() {
x_compute_type = headers
.get("x-compute-type")
.and_then(|v| v.to_str().ok())
.map(|v| v.to_string());
x_accel_buffering = headers
.get("x-accel-buffering")
.and_then(|v| v.to_str().ok())
.map(|v| v.to_string());
}
x_compute_characters += headers
.get("x-compute-characters")
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse().ok())
.unwrap_or(0);
}
let mut headers = HeaderMap::new();
if let Some(x_compute_type) = x_compute_type {
headers.insert("x-compute-type", x_compute_type.parse().unwrap());
}
headers.insert("x-compute-characters", x_compute_characters.into());
if let Some(x_accel_buffering) = x_accel_buffering {
headers.insert("x-accel-buffering", x_accel_buffering.parse().unwrap());
}
// now sink the sse streams into a single stream and remove the ones that are done
let stream: AsyncStream<Result<Event, Infallible>, _> = async_stream::stream! {
loop {
let mut i = 0;
while i < all_rxs.len() {
let rx = &mut all_rxs[i];
select! {
Some(event) = rx.recv() => {
yield event;
}
else => {
all_rxs.remove(i);
continue; // skip the increment to handle the next element at the same index
}
}
i += 1; // only increment when no element was removed
}
if all_rxs.is_empty() {
break;
}
}
};
let stream = stream.chain(futures::stream::once(async {
Ok(Event::default().data("[DONE]"))
}));
let sse = Sse::new(stream).keep_alive(KeepAlive::default());
Ok((headers, sse).into_response())
} else {
let current_time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_else(|_| std::time::Duration::from_secs(0))
.as_secs();
let responses = FuturesUnordered::new();
for (index, generate_request) in generate_requests.into_iter().enumerate() {
let infer_clone = infer.clone();
let compute_type_clone = compute_type.clone();
let span_clone = span.clone();
let response_future = async move {
let result = generate_internal(
Extension(infer_clone),
compute_type_clone,
Json(generate_request),
span_clone,
)
.await;
result.map(|(headers, input_length, generation)| {
(index, headers, input_length, generation)
})
};
responses.push(response_future);
}
let generate_responses = responses.try_collect::<Vec<_>>().await?;
let mut prompt_tokens = 0u32;
let mut completion_tokens = 0u32;
let mut total_tokens = 0u32;
let mut x_compute_time = 0u32;
let mut x_total_time = 0u32;
let mut x_validation_time = 0u32;
let mut x_queue_time = 0u32;
let mut x_inference_time = 0u32;
let mut x_time_per_token = 0u32;
let mut x_prompt_tokens = 0u32;
let mut x_generated_tokens = 0u32;
let choices = generate_responses
.into_iter()
.map(|(index, headers, input_length, Json(generation))| {
let details = generation.details.ok_or((
// this should never happen but handle if details are missing unexpectedly
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: "No details in generation".to_string(),
error_type: "no details".to_string(),
}),
))?;
if x_compute_type.is_none() {
x_compute_type = headers
.get("x-compute-type")
.and_then(|v| v.to_str().ok())
.map(|v| v.to_string());
}
// accumulate headers and usage from each response
x_compute_time += headers
.get("x-compute-time")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
x_compute_characters += headers
.get("x-compute-characters")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
x_total_time += headers
.get("x-total-time")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
x_validation_time += headers
.get("x-validation-time")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
x_queue_time += headers
.get("x-queue-time")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
x_inference_time += headers
.get("x-inference-time")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
x_time_per_token += headers
.get("x-time-per-token")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
x_prompt_tokens += headers
.get("x-prompt-tokens")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
x_generated_tokens += headers
.get("x-generated-tokens")
.and_then(|v| v.to_str().ok()?.parse().ok())
.unwrap_or(0);
prompt_tokens += input_length;
completion_tokens += details.generated_tokens;
total_tokens += input_length + details.generated_tokens;
Ok(CompletionComplete {
finish_reason: details.finish_reason.format(true),
index: index as u32,
logprobs: None,
text: generation.generated_text,
})
})
.collect::<Result<Vec<_>, _>>()
.map_err(|(status, Json(err))| (status, Json(err)))?;
let response = Completion::Final(CompletionFinal {
id: "".to_string(),
created: current_time,
model: info.model_id.clone(),
system_fingerprint: format!(
"{}-{}",
info.version,
info.docker_label.unwrap_or("native")
),
choices,
usage: Usage {
prompt_tokens,
completion_tokens,
total_tokens,
},
});
// headers similar to `generate` but aggregated
let mut headers = HeaderMap::new();
if let Some(x_compute_type) = x_compute_type {
headers.insert("x-compute-type", x_compute_type.parse().unwrap());
}
headers.insert("x-compute-characters", x_compute_characters.into());
headers.insert("x-total-time", x_total_time.into());
headers.insert("x-validation-time", x_validation_time.into());
headers.insert("x-queue-time", x_queue_time.into());
headers.insert("x-inference-time", x_inference_time.into());
headers.insert("x-time-per-token", x_time_per_token.into());
headers.insert("x-prompt-tokens", x_prompt_tokens.into());
headers.insert("x-generated-tokens", x_generated_tokens.into());
if let Some(x_accel_buffering) = x_accel_buffering {
headers.insert("x-accel-buffering", x_accel_buffering.parse().unwrap());
}
Ok((headers, Json(response)).into_response())
}
}
enum StreamState {
Buffering,
BufferTrailing,
Content { skip_close_quote: bool },
}
/// Convert a StreamResponse into an Event to be sent over SSE
fn create_event_from_stream_token(
stream_token: &StreamResponse,
logprobs: bool,
stream_options: Option<StreamOptions>,
inner_using_tools: bool,
system_fingerprint: String,
model_id: String,
) -> Event {
let event = Event::default();
let current_time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_else(|_| std::time::Duration::from_secs(0))
.as_secs();
let logprobs = logprobs.then(|| {
ChatCompletionLogprobs::from((stream_token.token.clone(), stream_token.top_tokens.clone()))
});
// replace the content with the tool calls if grammar is present
let (content, tool_calls) = if inner_using_tools {
(None, Some(vec![stream_token.token.text.clone()]))
} else {
let content = if !stream_token.token.special {
Some(stream_token.token.text.clone())
} else {
None
};
(content, None)
};
let (usage, finish_reason) = match &stream_token.details {
Some(details) => {
let usage = if stream_options
.as_ref()
.map(|s| s.include_usage)
.unwrap_or(false)
{
let completion_tokens = details.generated_tokens;
let prompt_tokens = details.input_length;
let total_tokens = prompt_tokens + completion_tokens;
Some(Usage {
completion_tokens,
prompt_tokens,
total_tokens,
})
} else {
None
};
(usage, Some(details.finish_reason.format(true)))
}
None => (None, None),
};
let chat_complete = CompletionType::ChatCompletionChunk(ChatCompletionChunk::new(
model_id.clone(),
system_fingerprint.clone(),
content,
tool_calls,
current_time,
logprobs,
finish_reason,
usage,
));
event.json_data(chat_complete).unwrap_or_else(|e| {
println!("Failed to serialize ChatCompletionChunk: {:?}", e);
Event::default()
})
}
/// Generate tokens
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/v1/chat/completions",
request_body = ChatRequest,
responses(
(status = 200, description = "Generated Chat Completion",
content(
("application/json" = ChatCompletion),
("text/event-stream" = ChatCompletionChunk),
)),
(status = 424, description = "Generation Error", body = ErrorResponse,
example = json ! ({"error": "Request failed during generation"})),
(status = 429, description = "Model is overloaded", body = ErrorResponse,
example = json ! ({"error": "Model is overloaded"})),
(status = 422, description = "Input validation error", body = ErrorResponse,
example = json ! ({"error": "Input validation error"})),
(status = 500, description = "Incomplete generation", body = ErrorResponse,
example = json ! ({"error": "Incomplete generation"})),
)
)]
#[instrument(
skip_all,
fields(
// parameters = ? req.parameters,
total_time,
validation_time,
queue_time,
inference_time,
time_per_token,
seed,
)
)]
pub(crate) async fn chat_completions(
Extension(infer): Extension<Infer>,
Extension(compute_type): Extension<ComputeType>,
Extension(info): Extension<Info>,
Json(chat): Json<ChatRequest>,
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
let span = tracing::Span::current();
metrics::counter!("tgi_request_count").increment(1);
let ChatRequest {
model,
stream,
stream_options,
logprobs,
..
} = chat.clone();
let (generate_request, using_tools): (GenerateRequest, bool) =
chat.try_into_generate(&infer)?;
let logprobs = logprobs.unwrap_or_default();
// extract model id from request if specified
let model_id = match model.as_deref() {
Some("tgi") | None => info.model_id.clone(),
Some(m_id) => m_id.to_string(),
};
let system_fingerprint = format!("{}-{}", info.version, info.docker_label.unwrap_or("native"));
// switch on stream
if stream {
let (headers, response_stream) =
generate_stream_internal(infer, compute_type, Json(generate_request), span).await;
// regex to match any function name
let function_regex = match Regex::new(r#"\{"function":\{"_name":"([^"]+)""#) {
Ok(regex) => regex,
Err(e) => {
return Err((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: format!("Failed to compile regex: {}", e),
error_type: "regex".to_string(),
}),
))
}
};
let response_stream = async_stream::stream! {
let mut response_stream = Box::pin(response_stream);
let mut buffer = Vec::new();
let mut json_buffer = String::new();
let mut state = if using_tools {
StreamState::Buffering
} else {
StreamState::Content {
skip_close_quote: false,
}
};
let mut response_as_tool = using_tools;
while let Some(result) = response_stream.next().await {
match result{
Ok(stream_token) => {
let token_text = &stream_token.token.text.clone();
match state {
StreamState::Buffering => {
json_buffer.push_str(&token_text.replace(" ", ""));
buffer.push(stream_token);
if let Some(captures) = function_regex.captures(&json_buffer) {
let function_name = captures[1].to_string();
if function_name == "no_tool" {
state = StreamState::BufferTrailing;
response_as_tool = false;
buffer.clear();
json_buffer.clear();
} else {
state = StreamState::Content {
skip_close_quote: false,
};
// send all the buffered messages
for stream_token in &buffer {
let event = create_event_from_stream_token(
stream_token,
logprobs,
stream_options.clone(),
response_as_tool,
system_fingerprint.clone(),
model_id.clone(),
);
yield Ok::<Event, Infallible>(event);
}
}
}
}
// if we skipped sending the buffer we need to avoid sending the following json key and quotes
StreamState::BufferTrailing => {
let infix_text = "\"content\":\"";
json_buffer.push_str(&token_text.replace(" ", ""));
// keep capturing until we find the infix text
match json_buffer.find(infix_text) {
Some(content_key_index) => {
json_buffer =
json_buffer[content_key_index + infix_text.len()..].to_string();
}
None => {
continue;
}
}
// if there is leftover text after removing the infix text, we need to send it
if !json_buffer.is_empty() {
let event = Event::default();
let current_time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_else(|_| std::time::Duration::from_secs(0))
.as_secs();
let chat_complete =
CompletionType::ChatCompletionChunk(ChatCompletionChunk::new(
model_id.clone(),
system_fingerprint.clone(),
Some(json_buffer.clone()),
None,
current_time,
None,
None,
None,
));
yield Ok(event.json_data(chat_complete).unwrap_or_else(|e| {
InferError::StreamSerializationError(e.to_string()).into()
}));
}
// cleanup the buffers
buffer.clear();
json_buffer.clear();
state = StreamState::Content {
skip_close_quote: true,
};
}
StreamState::Content { skip_close_quote } => {
if skip_close_quote && token_text.contains('"') {
break;
}
// send the content
let event = create_event_from_stream_token(
&stream_token,
logprobs,
stream_options.clone(),
response_as_tool,
system_fingerprint.clone(),
model_id.clone(),
);
yield Ok::<Event, Infallible>(event);
}
}
}
Err(err) => yield Ok(err.into_openai_event())
}
}
yield Ok::<Event, Infallible>(Event::default().data("[DONE]"));
};
let sse = Sse::new(response_stream).keep_alive(KeepAlive::default());
Ok((headers, sse).into_response())
} else {
let (headers, input_length, Json(generation)) =
generate_internal(Extension(infer), compute_type, Json(generate_request), span).await?;
let current_time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_else(|_| std::time::Duration::from_secs(0))
.as_secs();
let (tool_calls, output) = if using_tools {
let gen_text_value: Value =
serde_json::from_str(&generation.generated_text).map_err(|e| {
InferError::ToolError(format!(
"Failed to parse generated text: {} {:?}",
e, generation.generated_text
))
})?;
let function = gen_text_value.get("function").ok_or(InferError::ToolError(
"No function found in generated text".to_string(),
))?;
let name = function
.get("_name")
.and_then(Value::as_str)
.ok_or(InferError::ToolError(
"No _name found in generated text".to_string(),
))?
.to_string();
let mut arguments = function.clone();
if let Value::Object(ref mut props) = arguments {
props.remove("_name");
}
match name.as_str() {
"no_tool" => {
// parse the content message
let content_message = arguments
.get("content")
.and_then(Value::as_str)
.ok_or_else(|| {
InferError::ToolError(
"No `content` found in generated text".to_string(),
)
})?
.to_string();
(None, Some(content_message))
}
_ => {
let tool_calls = vec![ToolCall {
id: "0".to_string(),
r#type: "function".to_string(),
function: FunctionDefinition {
description: None,
name,
arguments,
},
}];
(Some(tool_calls), None)
}
}
} else {
(None, Some(generation.generated_text))
};
// build the complete response object with the full text
let response = CompletionType::ChatCompletion(ChatCompletion::new(
model_id,
system_fingerprint,
output,
current_time,
generation.details.unwrap(),
logprobs,
tool_calls,
input_length,
));
// wrap generation inside a Vec to match api-inference
Ok((headers, Json(response)).into_response())
}
}
/// Tokenize inputs
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/tokenize",
request_body = GenerateRequest,
responses(
(status = 200, description = "Tokenized ids", body = TokenizeResponse),
(status = 404, description = "No tokenizer found", body = ErrorResponse,
example = json ! ({"error": "No fast tokenizer available"})),
)
)]
#[instrument(skip_all)]
async fn tokenize(
Extension(infer): Extension<Infer>,
Json(req): Json<GenerateRequest>,
) -> Result<Json<TokenizeResponse>, (StatusCode, Json<ErrorResponse>)> {
let input = req.inputs.clone();
let encoding = infer.tokenize(req).await?;
let tokens = encoding_to_tokens(&encoding, &input);
Ok(Json(TokenizeResponse(tokens)))
}
/// Prometheus metrics scrape endpoint
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/metrics",
responses((status = 200, description = "Prometheus Metrics", body = String))
)]
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
prom_handle.render()
}
#[derive(Clone, Debug)]
pub(crate) struct ComputeType(String);
// OpenAPI documentation
#[derive(OpenApi)]
#[openapi(
paths(
health,
get_model_info,
compat_generate,
generate,
generate_stream,
chat_completions,
completions,
tokenize,
metrics,
openai_get_model_info,
sagemaker_compatibility,
get_chat_tokenize,
),
components(
schemas(
Info,
CompatGenerateRequest,
SagemakerRequest,
GenerateRequest,
GrammarType,
ChatRequest,
Message,
MessageContent,
MessageChunk,
Url,
FunctionName,
OutputMessage,
TextMessage,
ToolCallMessage,
ToolCallDelta,
ChatCompletionComplete,
ChatCompletionChoice,
ChatCompletionDelta,
ChatCompletionChunk,
ChatCompletionLogprob,
ChatCompletionLogprobs,
ChatCompletionTopLogprob,
ChatCompletion,
CompletionRequest,
CompletionComplete,
SagemakerResponse,
SagemakerStreamResponse,
Chunk,
Completion,
CompletionFinal,
Prompt,
GenerateParameters,
PrefillToken,
Token,
GenerateResponse,
TokenizeResponse,
SimpleToken,
BestOfSequence,
Details,
FinishReason,
StreamResponse,
StreamDetails,
ErrorResponse,
GrammarType,
Usage,
StreamOptions,
DeltaToolCall,
Tool,
ToolCall,
Function,
FunctionDefinition,
ToolChoice,
ModelInfo,
ChatTokenizeResponse,
)
),
tags(
(name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
),
info(
title = "Text Generation Inference",
license(
name = "Apache 2.0",
url = "https://www.apache.org/licenses/LICENSE-2.0"
)
)
)]
pub struct ApiDoc;
pub fn schema() -> ApiDoc {
ApiDoc
}
pub fn py_resolve_tokenizer(
py: pyo3::Python,
tokenizer_name: &str,
revision: Option<&str>,
trust_remote_code: bool,
) -> pyo3::PyResult<()> {
let transformers = py.import_bound("transformers")?;
let auto = transformers.getattr("AutoTokenizer")?;
let from_pretrained = auto.getattr("from_pretrained")?;
let args = (tokenizer_name,);
let kwargs = if let Some(rev) = &revision {
[
("revision", rev.to_string().into_py(py)),
("trust_remote_code", trust_remote_code.into_py(py)),
]
.into_py_dict_bound(py)
} else {
[("trust_remote_code", trust_remote_code.into_py(py))].into_py_dict_bound(py)
};
let tokenizer = from_pretrained.call(args, Some(&kwargs))?;
let save = tokenizer.getattr("save_pretrained")?;
let args = ("out".to_string(),);
save.call1(args)?;
Ok(())
}
pub fn legacy_tokenizer_handle(config_filename: Option<&PathBuf>) -> Option<()> {
// XXX Legacy case for FasterDecoding/medusa-vicuna-7b-v1.3
// and state-spaces/mamba-130m
tracing::warn!("Odd tokenizer detected, falling back on legacy tokenization");
#[derive(serde::Deserialize)]
struct FallbackConfig {
base_model_name_or_path: Option<String>,
model_type: Option<String>,
ssm_config: Option<serde_json::Value>,
}
config_filename.and_then(|filename| {
std::fs::read_to_string(filename)
.ok()
.as_ref()
.and_then(|c| {
let config: Result<FallbackConfig, _> = serde_json::from_str(c);
if let Ok(config) = config {
if config.model_type.is_none() {
if let Some(base) = config.base_model_name_or_path {
pyo3::Python::with_gil(|py| -> PyResult<()> {
py_resolve_tokenizer(py, &base, Some("main"), false)
})
.ok()?;
}
}
if config.ssm_config.is_some() {
// XXX Legacy mamba
pyo3::Python::with_gil(|py| -> PyResult<()> {
py_resolve_tokenizer(py, "EleutherAI/gpt-neox-20b", Some("main"), false)
})
.ok()?;
}
}
Some(())
})
})
}
/// Serving method
#[allow(clippy::too_many_arguments)]
pub async fn run(
backend: impl Backend + Send + Sync + 'static,
max_concurrent_requests: usize,
max_best_of: usize,
max_stop_sequences: usize,
max_top_n_tokens: u32,
max_input_tokens: usize,
max_total_tokens: usize,
validation_workers: usize,
api_key: Option<String>,
tokenizer_name: String,
tokenizer_config_path: Option<String>,
revision: Option<String>,
trust_remote_code: bool,
hostname: String,
port: u16,
cors_allow_origin: Option<Vec<String>>,
ngrok: bool,
_ngrok_authtoken: Option<String>,
_ngrok_edge: Option<String>,
disable_grammar_support: bool,
max_client_batch_size: usize,
usage_stats_level: usage_stats::UsageStatsLevel,
payload_limit: usize,
) -> Result<(), WebServerError> {
// CORS allowed origins
// map to go inside the option and then map to parse from String to HeaderValue
// Finally, convert to AllowOrigin
let allow_origin: Option<AllowOrigin> = cors_allow_origin.map(|cors_allow_origin| {
AllowOrigin::list(
cors_allow_origin
.iter()
.map(|origin| origin.parse::<HeaderValue>().unwrap()),
)
});
// Parse Huggingface hub token
let authorization_token = std::env::var("HF_TOKEN")
.or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN"))
.ok();
// Tokenizer instance
// This will only be used to validate payloads
let local_path = Path::new(&tokenizer_name);
// Shared API builder initialization
let api_builder = || {
let mut builder = ApiBuilder::new()
.with_progress(false)
.with_token(authorization_token);
if let Ok(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE") {
builder = builder.with_cache_dir(cache_dir.into());
}
builder
};
// Decide if we need to use the API based on the revision and local path
let use_api = revision.is_some() || !local_path.exists() || !local_path.is_dir();
// Initialize API if needed
#[derive(Clone)]
enum Type {
Api(Api),
Cache(Cache),
None,
}
let api = if use_api {
if std::env::var("HF_HUB_OFFLINE") == Ok("1".to_string()) {
let cache = std::env::var("HUGGINGFACE_HUB_CACHE")
.map_err(|_| ())
.map(|cache_dir| Cache::new(cache_dir.into()))
.unwrap_or_else(|_| Cache::default());
tracing::warn!("Offline mode active using cache defaults");
Type::Cache(cache)
} else {
tracing::info!("Using the Hugging Face API");
match api_builder().build() {
Ok(api) => Type::Api(api),
Err(_) => {
tracing::warn!("Unable to build the Hugging Face API");
Type::None
}
}
}
} else {
Type::None
};
// Load tokenizer and model info
let (
config_filename,
tokenizer_config_filename,
preprocessor_config_filename,
processor_config_filename,
model_info,
) = match api {
Type::None => (
Some(local_path.join("config.json")),
Some(local_path.join("tokenizer_config.json")),
Some(local_path.join("preprocessor_config.json")),
Some(local_path.join("processor_config.json")),
None,
),
Type::Api(api) => {
let api_repo = api.repo(Repo::with_revision(
tokenizer_name.to_string(),
RepoType::Model,
revision.clone().unwrap_or_else(|| "main".to_string()),
));
let config_filename = api_repo.get("config.json").await.ok();
let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok();
let preprocessor_config_filename = api_repo.get("preprocessor_config.json").await.ok();
let processor_config_filename = api_repo.get("processor_config.json").await.ok();
let model_info = if let Some(model_info) = get_hub_model_info(&api_repo).await {
Some(model_info)
} else {
tracing::warn!("Could not retrieve model info from the Hugging Face hub.");
None
};
(
config_filename,
tokenizer_config_filename,
preprocessor_config_filename,
processor_config_filename,
model_info,
)
}
Type::Cache(cache) => {
let repo = cache.repo(Repo::with_revision(
tokenizer_name.to_string(),
RepoType::Model,
revision.clone().unwrap_or_else(|| "main".to_string()),
));
(
repo.get("config.json"),
repo.get("tokenizer_config.json"),
repo.get("preprocessor_config.json"),
repo.get("processor_config.json"),
None,
)
}
};
// Read the JSON contents of the file as an instance of 'HubTokenizerConfig'.
let tokenizer_config: Option<HubTokenizerConfig> = if let Some(filename) = tokenizer_config_path
{
HubTokenizerConfig::from_file(filename)
} else {
tokenizer_config_filename.and_then(HubTokenizerConfig::from_file)
};
let tokenizer_config = tokenizer_config.unwrap_or_else(|| {
tracing::warn!("Could not find tokenizer config locally and no API specified");
HubTokenizerConfig::default()
});
let tokenizer: Result<Tokenizer, WebServerError> = {
use pyo3::prelude::*;
Python::with_gil(|py| -> PyResult<()> {
py_resolve_tokenizer(py, &tokenizer_name, revision.as_deref(), trust_remote_code)?;
Ok(())
})
.inspect_err(|err| {
tracing::error!("Failed to import python tokenizer {err}");
})
.or_else(|err| {
let out = legacy_tokenizer_handle(config_filename.as_ref());
out.ok_or(err)
})
.map_err(|_| WebServerError::Tokenizer("Unable to load tokenizer.".to_string()))?;
let filename = "out/tokenizer.json";
if let Ok(tok) = tokenizers::Tokenizer::from_file(filename) {
Ok(Tokenizer::Rust(tok))
} else {
Ok(Tokenizer::Python {
tokenizer_name: tokenizer_name.clone(),
revision: revision.clone(),
trust_remote_code,
})
}
};
let config: Option<Config> = config_filename.and_then(|filename| {
std::fs::read_to_string(filename)
.ok()
.as_ref()
.and_then(|c| {
let config: Result<Config, _> = serde_json::from_str(c);
if let Err(err) = &config {
tracing::warn!("Could not parse config {err:?}");
}
config.ok()
})
});
let model_info = model_info.unwrap_or_else(|| HubModelInfo {
model_id: tokenizer_name.to_string(),
sha: None,
pipeline_tag: None,
});
let processor_config = processor_config_filename
.and_then(HubProcessorConfig::from_file)
.unwrap_or_default();
let preprocessor_config: Option<HubPreprocessorConfig> =
preprocessor_config_filename.and_then(HubPreprocessorConfig::from_file);
tracing::info!("Using config {config:?}");
// Only send usage stats when TGI is run in container and the function returns Some
let is_container = matches!(usage_stats::is_container(), Ok(true));
let user_agent = match (usage_stats_level, is_container) {
(usage_stats::UsageStatsLevel::On | usage_stats::UsageStatsLevel::NoStack, true) => {
let reduced_args = usage_stats::Args::new(
config.clone(),
tokenizer_config.tokenizer_class.clone(),
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
// waiting_served_ratio,
// max_batch_prefill_tokens,
// max_batch_total_tokens,
// max_waiting_tokens,
// max_batch_size,
revision.clone(),
validation_workers,
disable_grammar_support,
max_client_batch_size,
usage_stats_level,
backend.name(),
);
Some(usage_stats::UserAgent::new(reduced_args))
}
_ => None,
};
let stop_usage_thread = Arc::new(AtomicBool::new(false));
let stop_usage_thread_clone = stop_usage_thread.clone();
if let Some(ua) = user_agent.clone() {
let start_event =
usage_stats::UsageStatsEvent::new(ua.clone(), usage_stats::EventType::Start, None);
tokio::spawn(async move {
// send start event
start_event.send().await;
let mut last_report = Instant::now();
while !stop_usage_thread_clone.load(Ordering::Relaxed) {
if last_report.elapsed() > Duration::from_secs(900) {
let report_event = usage_stats::UsageStatsEvent::new(
ua.clone(),
usage_stats::EventType::Ping,
None,
);
report_event.send().await;
last_report = Instant::now();
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
});
};
let compat_return_full_text = match &model_info.pipeline_tag {
None => {
tracing::warn!("no pipeline tag found for model {tokenizer_name}");
true
}
Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation",
};
let result = start(
backend,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
validation_workers,
api_key,
config,
(tokenizer?, tokenizer_config),
(preprocessor_config, processor_config),
hostname,
port,
ngrok,
_ngrok_authtoken,
_ngrok_edge,
disable_grammar_support,
max_client_batch_size,
model_info,
compat_return_full_text,
allow_origin,
payload_limit,
)
.await;
if let Some(ua) = user_agent {
stop_usage_thread.store(true, Ordering::Relaxed);
match result {
Ok(_) => {
let stop_event = usage_stats::UsageStatsEvent::new(
ua.clone(),
usage_stats::EventType::Stop,
None,
);
stop_event.send().await;
Ok(())
}
Err(e) => {
let description = match usage_stats_level {
usage_stats::UsageStatsLevel::On => Some(e.to_string()),
usage_stats::UsageStatsLevel::NoStack => Some("unknow_error".to_string()),
_ => None,
};
let event = usage_stats::UsageStatsEvent::new(
ua.clone(),
usage_stats::EventType::Error,
description,
);
event.send().await;
Err(e)
}
}
} else {
result
}
}
#[allow(clippy::too_many_arguments)]
async fn start(
backend: impl Backend + Send + Sync + 'static,
max_concurrent_requests: usize,
max_best_of: usize,
max_stop_sequences: usize,
max_top_n_tokens: u32,
max_input_tokens: usize,
max_total_tokens: usize,
validation_workers: usize,
api_key: Option<String>,
config: Option<Config>,
(tokenizer, tokenizer_config): (Tokenizer, HubTokenizerConfig),
(preprocessor_config, processor_config): (Option<HubPreprocessorConfig>, HubProcessorConfig),
hostname: String,
port: u16,
ngrok: bool,
_ngrok_authtoken: Option<String>,
_ngrok_edge: Option<String>,
disable_grammar_support: bool,
max_client_batch_size: usize,
model_info: HubModelInfo,
compat_return_full_text: bool,
allow_origin: Option<AllowOrigin>,
payload_limit: usize,
) -> Result<(), WebServerError> {
// Determine the server port based on the feature and environment variable.
let port = if cfg!(feature = "google") {
std::env::var("AIP_HTTP_PORT")
.map(|aip_http_port| aip_http_port.parse::<u16>().unwrap_or(port))
.unwrap_or(port)
} else {
port
};
let addr = match hostname.parse() {
Ok(ip) => SocketAddr::new(ip, port),
Err(_) => {
tracing::warn!("Invalid hostname, defaulting to 0.0.0.0");
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port)
}
};
// Create state
let validation = Validation::new(
validation_workers,
tokenizer,
config,
preprocessor_config,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
disable_grammar_support,
);
let infer = Infer::new(
backend,
validation,
max_concurrent_requests,
tokenizer_config,
processor_config,
);
// Duration buckets
let duration_matcher = Matcher::Suffix(String::from("duration"));
let n_duration_buckets = 35;
let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
// Minimum duration in seconds
let mut value = 0.0001;
for _ in 0..n_duration_buckets {
// geometric sequence
value *= 1.5;
duration_buckets.push(value);
}
// Input Length buckets
let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
let input_length_buckets: Vec<f64> = (0..100)
.map(|x| (max_input_tokens as f64 / 100.0) * (x + 1) as f64)
.collect();
// Generated tokens buckets
let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
let generated_tokens_buckets: Vec<f64> = (0..100)
.map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
.collect();
// Input Length buckets
let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
let max_new_tokens_buckets: Vec<f64> = (0..100)
.map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
.collect();
// Batch size buckets
let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect();
// Speculated tokens buckets
// let skipped_matcher = Matcher::Full(String::from("tgi_request_skipped_tokens"));
// let skipped_buckets: Vec<f64> = (0..shard_info.speculate + 1).map(|x| x as f64).collect();
// Prometheus handler
let builder = PrometheusBuilder::new()
.set_buckets_for_metric(duration_matcher, &duration_buckets)
.unwrap()
.set_buckets_for_metric(input_length_matcher, &input_length_buckets)
.unwrap()
.set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
.unwrap()
.set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
.unwrap()
.set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
.unwrap();
// .set_buckets_for_metric(skipped_matcher, &skipped_buckets)
// .unwrap();
// See: https://github.com/metrics-rs/metrics/issues/467#issuecomment-2022755151
let (recorder, _) = builder
.build()
.expect("failed to build prometheus recorder");
let prom_handle = recorder.handle();
metrics::set_global_recorder(recorder).expect("Failed to set global recorder");
// Metrics descriptions
metrics::describe_counter!("tgi_request_success", "Number of successful requests");
metrics::describe_histogram!(
"tgi_request_duration",
metrics::Unit::Seconds,
"Request duration"
);
metrics::describe_histogram!(
"tgi_request_validation_duration",
metrics::Unit::Seconds,
"Request validation duration"
);
metrics::describe_histogram!(
"tgi_request_queue_duration",
metrics::Unit::Seconds,
"Request queue duration"
);
metrics::describe_histogram!(
"tgi_request_inference_duration",
metrics::Unit::Seconds,
"Request inference duration"
);
metrics::describe_histogram!(
"tgi_request_mean_time_per_token_duration",
metrics::Unit::Seconds,
"Mean time per token per request"
);
metrics::describe_histogram!(
"tgi_request_generated_tokens",
metrics::Unit::Count,
"Generated tokens per request"
);
metrics::describe_counter!(
"tgi_batch_inference_count",
metrics::Unit::Count,
"Inference calls per method (prefill or decode)"
);
metrics::describe_counter!(
"tgi_request_count",
metrics::Unit::Count,
"Total number of requests"
);
metrics::describe_counter!(
"tgi_batch_inference_success",
metrics::Unit::Count,
"Number of successful inference calls per method (prefill or decode)"
);
metrics::describe_gauge!(
"tgi_batch_current_size",
metrics::Unit::Count,
"Current batch size"
);
metrics::describe_gauge!("tgi_queue_size", metrics::Unit::Count, "Current queue size");
metrics::describe_gauge!(
"tgi_batch_current_max_tokens",
metrics::Unit::Count,
"Maximum tokens for the current batch"
);
metrics::describe_gauge!(
"tgi_batch_total_tokens",
metrics::Unit::Count,
"Maximum amount of tokens in total."
);
metrics::describe_histogram!(
"tgi_request_max_new_tokens",
metrics::Unit::Count,
"Maximum new tokens per request"
);
metrics::describe_histogram!(
"tgi_batch_inference_duration",
metrics::Unit::Seconds,
"Batch inference duration"
);
metrics::describe_histogram!(
"tgi_batch_forward_duration",
metrics::Unit::Seconds,
"Batch forward duration per method (prefill or decode)"
);
metrics::describe_histogram!(
"tgi_request_skipped_tokens",
metrics::Unit::Count,
"Speculated tokens per request"
);
metrics::describe_histogram!(
"tgi_batch_filter_duration",
metrics::Unit::Seconds,
"Time spent filtering batches and sending generated tokens per method (prefill or decode)"
);
metrics::describe_histogram!(
"tgi_request_queue_duration",
metrics::Unit::Seconds,
"Time spent in the queue per request"
);
metrics::describe_histogram!(
"tgi_request_validation_duration",
metrics::Unit::Seconds,
"Time spent validating the request"
);
metrics::describe_histogram!(
"tgi_request_duration",
metrics::Unit::Seconds,
"Total time spent processing the request"
);
metrics::describe_histogram!(
"tgi_batch_decode_duration",
metrics::Unit::Seconds,
"Time spent decoding a batch per method (prefill or decode)"
);
metrics::describe_histogram!(
"tgi_request_input_length",
metrics::Unit::Count,
"Input token length per request"
);
metrics::describe_histogram!(
"tgi_batch_next_size",
metrics::Unit::Count,
"Batch size of the next batch"
);
// CORS layer
let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
let cors_layer = CorsLayer::new()
.allow_methods([Method::GET, Method::POST])
.allow_headers([http::header::CONTENT_TYPE])
.allow_origin(allow_origin);
// Endpoint info
let info = Info {
model_id: model_info.model_id,
model_sha: model_info.sha,
// model_dtype: shard_info.dtype,
// model_device_type: shard_info.device_type,
model_pipeline_tag: model_info.pipeline_tag,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_input_tokens,
max_total_tokens,
// waiting_served_ratio,
// max_batch_total_tokens,
// max_waiting_tokens,
// max_batch_size,
validation_workers,
max_client_batch_size,
router: env!("CARGO_PKG_NAME"),
version: env!("CARGO_PKG_VERSION"),
sha: option_env!("VERGEN_GIT_SHA"),
docker_label: option_env!("DOCKER_LABEL"),
};
#[allow(unused_mut)] // mut is needed for conditional compilation
let mut doc = ApiDoc::openapi();
#[cfg(feature = "google")]
{
use crate::vertex::__path_vertex_compatibility;
use crate::vertex::{VertexInstance, VertexRequest, VertexResponse};
#[derive(OpenApi)]
#[openapi(
paths(vertex_compatibility),
components(schemas(VertexInstance, VertexRequest, VertexResponse))
)]
struct VertexApiDoc;
doc.merge(VertexApiDoc::openapi());
}
#[cfg(feature = "kserve")]
{
use crate::kserve::{
InferenceOutput, InferenceRequest, LiveResponse, MetadataServerResponse, OutputChunk,
ReadyResponse,
};
use crate::kserve::{
__path_kerve_server_metadata, __path_kserve_health_live, __path_kserve_health_ready,
__path_kserve_model_infer, __path_kserve_model_metadata,
__path_kserve_model_metadata_ready,
};
#[derive(OpenApi)]
#[openapi(
paths(
kserve_health_live,
kserve_health_ready,
kerve_server_metadata,
kserve_model_metadata,
kserve_model_metadata_ready,
kserve_model_infer,
),
components(schemas(
InferenceOutput,
InferenceRequest,
LiveResponse,
MetadataServerResponse,
OutputChunk,
ReadyResponse,
))
)]
struct KServeApiDoc;
doc.merge(KServeApiDoc::openapi());
}
// Configure Swagger UI
let swagger_ui = SwaggerUi::new("/docs").url("/api-doc/openapi.json", doc);
// Define base and health routes
let mut base_routes = Router::new()
.route("/", post(compat_generate))
.route("/generate", post(generate))
.route("/generate_stream", post(generate_stream))
.route("/v1/chat/completions", post(chat_completions))
.route("/v1/completions", post(completions))
.route("/vertex", post(vertex_compatibility))
.route("/invocations", post(sagemaker_compatibility))
.route("/tokenize", post(tokenize));
if let Some(api_key) = api_key {
let mut prefix = "Bearer ".to_string();
prefix.push_str(&api_key);
// Leak to allow FnMut
let api_key: &'static str = prefix.leak();
let auth = move |headers: HeaderMap,
request: axum::extract::Request,
next: axum::middleware::Next| async move {
match headers.get(AUTHORIZATION) {
Some(token) => match token.to_str() {
Ok(token_str) if token_str.to_lowercase() == api_key.to_lowercase() => {
let response = next.run(request).await;
Ok(response)
}
_ => Err(StatusCode::UNAUTHORIZED),
},
None => Err(StatusCode::UNAUTHORIZED),
}
};
base_routes = base_routes.layer(axum::middleware::from_fn(auth))
}
let info_routes = Router::new()
.route("/", get(health))
.route("/chat_tokenize", post(get_chat_tokenize))
.route("/info", get(get_model_info))
.route("/health", get(health))
.route("/ping", get(health))
.route("/metrics", get(metrics))
.route("/v1/models", get(openai_get_model_info));
let compute_type =
ComputeType(std::env::var("COMPUTE_TYPE").unwrap_or("gpu+optimized".to_string()));
// Combine routes and layers
let mut app = Router::new()
.merge(swagger_ui)
.merge(base_routes)
.merge(info_routes);
#[cfg(feature = "google")]
{
tracing::info!("Built with `google` feature");
tracing::info!(
"Environment variables `AIP_PREDICT_ROUTE` and `AIP_HEALTH_ROUTE` will be respected."
);
if let Ok(env_predict_route) = std::env::var("AIP_PREDICT_ROUTE") {
app = app.route(&env_predict_route, post(vertex_compatibility));
}
if let Ok(env_health_route) = std::env::var("AIP_HEALTH_ROUTE") {
app = app.route(&env_health_route, get(health));
}
}
#[cfg(feature = "kserve")]
{
tracing::info!("Built with `kserve` feature");
app = app
.route(
"/v2/models/:model_name/versions/:model_version/infer",
post(kserve_model_infer),
)
.route(
"/v2/models/:model_name/versions/:model_version",
get(kserve_model_metadata),
)
.route("/v2/health/ready", get(kserve_health_ready))
.route("/v2/health/live", get(kserve_health_live))
.route("/v2", get(kerve_server_metadata))
.route(
"/v2/models/:model_name/versions/:model_version/ready",
get(kserve_model_metadata_ready),
);
}
// add layers after routes
app = app
.layer(Extension(info))
.layer(Extension(compat_return_full_text))
.layer(Extension(infer))
.layer(Extension(compute_type))
.layer(Extension(prom_handle.clone()))
.layer(OtelAxumLayer::default())
.layer(DefaultBodyLimit::max(payload_limit))
.layer(cors_layer);
tracing::info!("Connected");
if ngrok {
#[cfg(feature = "ngrok")]
{
panic!("ngrok feature is not functional with axum=0.7 and hyper=1, waiting on https://github.com/ngrok/ngrok-rust/pull/137/files to re-enable.");
// Run server
}
#[cfg(not(feature = "ngrok"))]
{
let _ngrok_authtoken = ngrok_authtoken;
let _ngrok_domain = ngrok_domain;
let _ngrok_username = ngrok_username;
let _ngrok_password = ngrok_password;
panic!("`text-generation-router` was compiled without the `ngrok` feature");
}
} else {
// Run server
let listener = match tokio::net::TcpListener::bind(&addr).await {
Ok(listener) => listener,
Err(e) => {
tracing::error!("Failed to bind to {addr}: {e}");
return Err(WebServerError::Axum(Box::new(e)));
}
};
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await
.map_err(|err| WebServerError::Axum(Box::new(err)))?;
}
Ok(())
}
/// get model info from the Huggingface Hub
pub async fn get_hub_model_info(api: &ApiRepo) -> Option<HubModelInfo> {
let response = api.info_request().send().await.ok()?;
if response.status().is_success() {
let hub_model_info: HubModelInfo =
serde_json::from_str(&response.text().await.ok()?).ok()?;
if let Some(sha) = &hub_model_info.sha {
tracing::info!(
"Serving revision {sha} of model {}",
hub_model_info.model_id
);
}
Some(hub_model_info)
} else {
None
}
}
/// get tokenizer_config from the Huggingface Hub
pub async fn get_tokenizer_config(api_repo: &ApiRepo) -> Option<HubTokenizerConfig> {
let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok()?;
// Open the file in read-only mode with buffer.
let file = File::open(tokenizer_config_filename).ok()?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of 'HubTokenizerConfig'.
let tokenizer_config: HubTokenizerConfig = serde_json::from_reader(reader)
.map_err(|e| {
tracing::warn!("Unable to parse tokenizer config: {}", e);
e
})
.ok()?;
Some(tokenizer_config)
}
/// Shutdown signal handler
async fn shutdown_signal() {
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => {},
_ = terminate => {},
}
tracing::info!("signal received, starting graceful shutdown");
opentelemetry::global::shutdown_tracer_provider();
}
/// Convert to Axum supported formats
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
fn from(err: InferError) -> Self {
let status_code = match err {
InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
InferError::IncompleteGenerationStream => StatusCode::INTERNAL_SERVER_ERROR,
InferError::TemplateError(_) => StatusCode::UNPROCESSABLE_ENTITY,
InferError::MissingTemplateVariable(_) => StatusCode::UNPROCESSABLE_ENTITY,
InferError::ToolError(_) => StatusCode::UNPROCESSABLE_ENTITY,
InferError::StreamSerializationError(_) => StatusCode::INTERNAL_SERVER_ERROR,
};
(
status_code,
Json(ErrorResponse {
error: err.to_string(),
error_type: err.error_type().to_string(),
}),
)
}
}
impl From<InferError> for Event {
fn from(err: InferError) -> Self {
Event::default()
.json_data(ErrorResponse {
error: err.to_string(),
error_type: err.error_type().to_string(),
})
.unwrap()
}
}
#[derive(Debug, Error)]
pub enum WebServerError {
#[error("Axum error: {0}")]
Axum(#[from] axum::BoxError),
#[error("Tokenizer error: {0}")]
Tokenizer(String),
}
| text-generation-inference/router/src/server.rs/0 | {
"file_path": "text-generation-inference/router/src/server.rs",
"repo_id": "text-generation-inference",
"token_count": 48276
} |
commit_rocm := de990cd12537f78f74e40b5c8ee1a62d63d734dd
build-vllm-rocm:
if [ ! -d 'vllm' ]; then \
pip install -U ninja packaging --no-cache-dir && \
git clone https://github.com/mht-sharma/vllm.git vllm; \
fi
cd vllm && git fetch && git checkout $(commit_rocm) && \
PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build
install-vllm-rocm: build-vllm-rocm
cd vllm && git fetch && git checkout $(commit_rocm) && \
PYTORCH_ROCM_ARCH="gfx90a;gfx942" pip install -e .
| text-generation-inference/server/Makefile-vllm/0 | {
"file_path": "text-generation-inference/server/Makefile-vllm",
"repo_id": "text-generation-inference",
"token_count": 221
} |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _hip_compat_cuh
#define _hip_compat_cuh
// Workaround for a bug in hipamd, backported from upstream, this is fixed in ROCm 5.6.
__device__ __forceinline__ __half __compat_hrcp(__half x) {
return __half_raw{
static_cast<_Float16>(__builtin_amdgcn_rcph(static_cast<__half_raw>(x).data))};
}
__device__ __forceinline__ __half2 __compat_h2rcp(__half2 x) {
return _Float16_2{
_Float16_2{static_cast<_Float16>(1.0f),
static_cast<_Float16>(1.0f)} / x.data};
}
#define hrcp __compat_hrcp
#define h2rcp __compat_h2rcp
// Automatic conversion of hipblasHgemm doesn't convert half to hipblasHalf.
__host__ __forceinline__ hipblasStatus_t __compat_hipblasHgemm(hipblasHandle_t handle,
hipblasOperation_t transA,
hipblasOperation_t transB,
int m,
int n,
int k,
const half* alpha,
const half* AP,
int lda,
const half* BP,
int ldb,
const half* beta,
half* CP,
int ldc) {
return hipblasHgemm(handle, transA, transB, m, n, k,
reinterpret_cast<const hipblasHalf *>(alpha),
reinterpret_cast<const hipblasHalf *>(AP), lda,
reinterpret_cast<const hipblasHalf *>(BP), ldb,
reinterpret_cast<const hipblasHalf *>(beta),
reinterpret_cast<hipblasHalf *>(CP), ldc);
}
#define hipblasHgemm __compat_hipblasHgemm
// Previous version of PyTorch were converting to rocBLAS instead of hipBLAS.
#define rocblas_handle hipblasHandle_t
#define rocblas_operation_none HIPBLAS_OP_N
#define rocblas_get_stream hipblasGetStream
#define rocblas_set_stream hipblasSetStream
#define rocblas_hgemm __compat_hipblasHgemm
#endif
| text-generation-inference/server/exllama_kernels/exllama_kernels/hip_compat.cuh/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/hip_compat.cuh",
"repo_id": "text-generation-inference",
"token_count": 1710
} |
#ifndef _qdq_3_cuh
#define _qdq_3_cuh
#include "qdq_util.cuh"
#include "../../config.h"
#if QMODE_3BIT == 1
// Permutation:
//
// v9997775 55333111 u8886664 44222000 (u, v lsb)
// vjjjhhhf ffdddbbb uiiiggge eecccaaa
// vtttrrrp ppnnnlll usssqqqo oommmkkk
__forceinline__ __device__ void shuffle_3bit_32
(
uint32_t* q,
int stride
)
{
uint32_t qa = q[0 * stride];
uint32_t qb = q[1 * stride];
uint32_t qc = q[2 * stride];
// qa: aa999888 77766655 54443332 22111000
// qb: lkkkjjji iihhhggg fffeeedd dcccbbba
// qc: vvvuuutt tsssrrrq qqpppooo nnnmmmll
uint32_t qd = qc >> 26;
qc <<= 4;
qc |= qb >> 28;
qb <<= 2;
qb |= qa >> 30;
// qa: ..999888 77766655 54443332 22111000
// qb: ..jjjiii hhhgggff feeedddc ccbbbaaa
// qc: ..tttsss rrrqqqpp pooonnnm mmlllkkk
// qd: vvvuuu
uint32_t za = 0;
uint32_t zb = 0;
uint32_t zc = 0;
for (int i = 0; i < 5; i++) { uint32_t t0 = qa & 0x07; uint32_t t1 = (qa & 0x38) >> 3; qa >>= 6; za |= (t0 << (i * 3)); za |= (t1 << (i * 3 + 16)); }
for (int i = 0; i < 5; i++) { uint32_t t0 = qb & 0x07; uint32_t t1 = (qb & 0x38) >> 3; qb >>= 6; zb |= (t0 << (i * 3)); zb |= (t1 << (i * 3 + 16)); }
for (int i = 0; i < 5; i++) { uint32_t t0 = qc & 0x07; uint32_t t1 = (qc & 0x38) >> 3; qc >>= 6; zc |= (t0 << (i * 3)); zc |= (t1 << (i * 3 + 16)); }
// za: 9997775 55333111 8886664 44222000
// zb: jjjhhhf ffdddbbb iiiggge eecccaaa
// zc: tttrrrp ppnnnlll sssqqqo oommmkkk
// qd: vvvuuu
za |= ((qd & 0x01) >> 0) << 15;
zb |= ((qd & 0x02) >> 1) << 15;
zc |= ((qd & 0x04) >> 2) << 15;
za |= ((qd & 0x08) >> 3) << 31;
zb |= ((qd & 0x10) >> 4) << 31;
zc |= ((qd & 0x20) >> 5) << 31;
// za: v9997775 55333111 u8886664 44222000 (u, v lsb)
// zb: vjjjhhhf ffdddbbb uiiiggge eecccaaa
// zc: vtttrrrp ppnnnlll usssqqqo oommmkkk
q[0 * stride] = za;
q[1 * stride] = zb;
q[2 * stride] = zc;
}
__forceinline__ __device__ void dequant_3bit_32
(
const uint32_t q_0,
const uint32_t q_1,
const uint32_t q_2,
half2 (&dq)[16],
int stride
)
{
const uint32_t c0 = 0x64006400;
const half y8_ = __float2half_rn(1.0f / 8.0f);
const half y64_ = __float2half_rn(1.0f / 64.0f);
const half2 y8 = __halves2half2(y8_, y8_);
const half2 y64 = __halves2half2(y64_, y64_);
const half z1_ = __float2half_rn(-1024.0f - 4.0f);
const half z8_ = __float2half_rn(-1024.0f / 8.0f - 4.0f);
const half z64_ = __float2half_rn(-1024.0f / 64.0f - 4.0f);
const half2 z1 = __halves2half2(z1_, z1_);
const half2 z8 = __halves2half2(z8_, z8_);
const half2 z64 = __halves2half2(z64_, z64_);
uint32_t qa = q_0;
uint32_t qb = q_1;
uint32_t qc = q_2;
half2_uint32 q0((qa & 0x00070007) | c0); // half2(q[ 0], q[ 1]) + 1024
half2_uint32 q1((qa & 0x00380038) | c0); // half2(q[ 2], q[ 3]) * 8 + 1024
qa >>= 6;
half2_uint32 q2((qa & 0x00070007) | c0); // half2(q[ 4], q[ 5]) + 1024
half2_uint32 q3((qa & 0x00380038) | c0); // half2(q[ 6], q[ 7]) * 8 + 1024
half2_uint32 q4((qa & 0x01c001c0) | c0); // half2(q[ 8], q[ 9]) * 64 + 1024
qa >>= 9;
qa &= 0x00010001;
half2_uint32 q5((qb & 0x00070007) | c0); // half2(q[10], q[11]) + 1024
half2_uint32 q6((qb & 0x00380038) | c0); // half2(q[12], q[13]) * 8 + 1024
qb >>= 6;
half2_uint32 q7((qb & 0x00070007) | c0); // half2(q[14], q[15]) + 1024
half2_uint32 q8((qb & 0x00380038) | c0); // half2(q[16], q[17]) * 8 + 1024
half2_uint32 q9((qb & 0x01c001c0) | c0); // half2(q[18], q[19]) * 64 + 1024
qb >>= 8;
qb &= 0x00020002;
half2_uint32 q10((qc & 0x00070007) | c0); // half2(q[20], q[21]) + 1024
half2_uint32 q11((qc & 0x00380038) | c0); // half2(q[22], q[23]) * 8 + 1024
qc >>= 6;
half2_uint32 q12((qc & 0x00070007) | c0); // half2(q[24], q[25]) + 1024
half2_uint32 q13((qc & 0x00380038) | c0); // half2(q[26], q[27]) * 8 + 1024
half2_uint32 q14((qc & 0x01c001c0) | c0); // half2(q[28], q[29]) * 64 + 1024
qc >>= 7;
qc &= 0x00040004;
half2_uint32 q15((qa | qb | qc) | c0);
dq[ 0] = __hadd2( q0.as_half2, z1);
dq[ 1] = __hfma2( q1.as_half2, y8, z8);
dq[ 2] = __hadd2( q2.as_half2, z1);
dq[ 3] = __hfma2( q3.as_half2, y8, z8);
dq[ 4] = __hfma2( q4.as_half2, y64, z64);
dq[ 5] = __hadd2( q5.as_half2, z1);
dq[ 6] = __hfma2( q6.as_half2, y8, z8);
dq[ 7] = __hadd2( q7.as_half2, z1);
dq[ 8] = __hfma2( q8.as_half2, y8, z8);
dq[ 9] = __hfma2( q9.as_half2, y64, z64);
dq[10] = __hadd2(q10.as_half2, z1);
dq[11] = __hfma2(q11.as_half2, y8, z8);
dq[12] = __hadd2(q12.as_half2, z1);
dq[13] = __hfma2(q13.as_half2, y8, z8);
dq[14] = __hfma2(q14.as_half2, y64, z64);
dq[15] = __hadd2(q15.as_half2, z1);
}
#else
__forceinline__ __device__ void shuffle_3bit_32
(
uint32_t* q,
int stride
)
{
}
__forceinline__ __device__ void dequant_3bit_32
(
const uint32_t q_0,
const uint32_t q_1,
const uint32_t q_2,
half2 (&dq)[16],
int stride
)
{
half dqh[32];
for (int i = 0; i < 10; i++) dqh[ i] = dq_ns(exb( q_0, i * 3 , 0x07), 4);
dqh[10 ] = dq_ns(exb(q_1, q_0, 30, 0x07), 4);
for (int i = 0; i < 10; i++) dqh[11 + i] = dq_ns(exb( q_1, i * 3 + 1, 0x07), 4);
dqh[21 ] = dq_ns(exb(q_2, q_1, 31, 0x07), 4);
for (int i = 0; i < 10; i++) dqh[22 + i] = dq_ns(exb( q_2, i * 3 + 2, 0x07), 4);
for (int i = 0; i < 16; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]);
}
#endif
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_3.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_3.cuh",
"repo_id": "text-generation-inference",
"token_count": 3335
} |
import pytest
import torch
from copy import copy
from transformers import AutoTokenizer
from text_generation_server.pb import generate_pb2
from text_generation_server.models.causal_lm import CausalLMBatch
from text_generation_server.utils import weight_hub_files, download_weights
from text_generation_server.models.bloom import BloomCausalLMBatch, BLOOMSharded
from text_generation_server.models.custom_modeling.bloom_modeling import (
BloomForCausalLM,
)
@pytest.fixture(scope="session")
def default_bloom():
model_id = "bigscience/bloom-560m"
revision = "main"
filenames = weight_hub_files(model_id, revision, ".safetensors")
download_weights(filenames, model_id, revision)
return BLOOMSharded(
model_id,
model_class=BloomForCausalLM,
)
@pytest.fixture(scope="session")
def bloom_560m_tokenizer():
return AutoTokenizer.from_pretrained("bigscience/bloom-560m", padding_side="left")
@pytest.fixture
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
return generate_pb2.Request(
id=0,
inputs="Test",
input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
prefill_logprobs=True,
truncate=100,
parameters=default_pb_parameters,
stopping_parameters=default_pb_stop_parameters,
)
@pytest.fixture
def default_pb_batch(default_pb_request):
return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
@pytest.fixture
def default_bloom_batch(default_pb_batch, bloom_560m_tokenizer):
return BloomCausalLMBatch.from_pb(
default_pb_batch, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
)
@pytest.fixture
def default_multi_requests_bloom_batch(default_pb_request, bloom_560m_tokenizer):
req_0 = copy(default_pb_request)
req_0.id = 1
req_1 = default_pb_request
req_1.id = 2
req_1.stopping_parameters.max_new_tokens = 5
batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
return BloomCausalLMBatch.from_pb(
batch_pb, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
)
def test_batch_from_pb(default_pb_batch, default_bloom_batch):
batch = default_bloom_batch
assert batch.batch_id == default_pb_batch.id
assert batch.requests == default_pb_batch.requests
assert len(batch.input_ids) == default_pb_batch.size
assert batch.input_ids[0][-1] == 10264
assert torch.all(batch.input_ids[0][:-1] == 3)
assert batch.attention_mask[0][0] == 1
assert torch.all(batch.attention_mask[0][1:] == 0)
assert batch.past_key_values is None
assert all(
[
torch.equal(input_ids, all_input_ids[:, 0])
for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
]
)
assert batch.input_lengths == [1]
assert len(batch) == default_pb_batch.size
assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
assert batch.max_input_length == batch.input_lengths[0]
def test_batch_concatenate_no_prefill(default_bloom_batch):
with pytest.raises(ValueError):
BloomCausalLMBatch.concatenate([default_bloom_batch, default_bloom_batch])
def test_causal_lm_batch_type(default_bloom):
assert default_bloom.batch_type == BloomCausalLMBatch
def test_causal_lm_generate_token(default_bloom, default_bloom_batch):
sequence_length = len(default_bloom_batch.all_input_ids[0])
generations, next_batch, _ = default_bloom.generate_token(default_bloom_batch)
assert len(generations) == len(default_bloom_batch)
assert isinstance(next_batch, CausalLMBatch)
assert not next_batch.keys_head_dim_last
assert len(next_batch.all_input_ids) == len(next_batch)
assert len(next_batch.all_input_ids[0]) == sequence_length + 1
assert len(next_batch.attention_mask[0]) == 11
assert torch.all(next_batch.all_input_ids[0][-2:] == 10264)
assert torch.all(next_batch.all_input_ids[0][:-2] == 3)
assert torch.all(next_batch.attention_mask[0][:2] == 1)
assert torch.all(next_batch.attention_mask[0][2:] == 0)
assert next_batch.input_ids.shape == (len(next_batch), 1)
assert next_batch.input_ids[0, 0] == 10264
assert next_batch.input_lengths == [2]
assert next_batch.max_input_length == next_batch.input_lengths[0]
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (16, 64, sequence_length) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (16, sequence_length, 64) for p in next_batch.past_key_values]
)
assert all([generation.generated_text is None for generation in generations])
assert all([len(generation.prefill_tokens) == 1 for generation in generations])
assert all(
[
token_id.item() == 10264
for generation in generations
for token_id in generation.tokens.token_ids
]
)
assert all(
[
token_text == "Test"
for generation in generations
for token_text in generation.tokens.texts
]
)
assert generations[0].request_id == 0
def test_causal_lm_generate_token_completion(default_bloom, default_bloom_batch):
next_batch = default_bloom_batch
for _ in range(default_bloom_batch.stopping_criterias[0].max_new_tokens - 1):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(default_bloom_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert generations[0].request_id == default_bloom_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_bloom_batch.stopping_criterias[0].max_new_tokens
)
def test_causal_lm_generate_token_completion_multi(
default_bloom, default_multi_requests_bloom_batch
):
next_batch = default_multi_requests_bloom_batch
for i in range(
default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(default_multi_requests_bloom_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[1].generated_text.text == "TestTestTestTestTest"
assert (
generations[1].request_id == default_multi_requests_bloom_batch.requests[1].id
)
assert (
generations[1].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
)
# Copy stopping_criterias before filtering
stopping_criterias = default_multi_requests_bloom_batch.stopping_criterias.copy()
next_batch = next_batch.filter([next_batch.requests[0].id])
for _ in range(
stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert (
generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
)
def test_batch_concatenate(
default_bloom, default_bloom_batch, default_multi_requests_bloom_batch
):
next_batch_0 = default_bloom_batch
_, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
_, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
next_batch_1 = default_multi_requests_bloom_batch
_, next_batch_1, _ = default_bloom.generate_token(next_batch_1)
# Clone past_key_values before concatenating to compare after,
# because they are removed from the concatenated batches
next_batch_0_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
]
next_batch_1_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
]
next_batch = BloomCausalLMBatch.concatenate([next_batch_0, next_batch_1])
assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
assert torch.all(
next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
)
assert torch.all(
next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
)
assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
assert next_batch.batch_id == 0
assert torch.all(next_batch.input_ids == 10264)
assert next_batch.input_lengths == [3, 2, 2]
assert next_batch.max_input_length == 3
assert next_batch.requests[0] == next_batch_0.requests[0]
assert next_batch.requests[1:] == list(next_batch_1.requests)
assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
assert next_batch.past_key_values is not None
assert all([p[0].shape == (3, 16, 64, 2) for p in next_batch.past_key_values])
assert all([p[1].shape == (3, 16, 2, 64) for p in next_batch.past_key_values])
for i, past in enumerate(next_batch.past_key_values):
assert torch.equal(next_batch_0_past_key_values[i][0][:, :, -2:], past[0][0])
assert torch.equal(
next_batch_1_past_key_values[i][0][:, :, -1:],
past[0][1:, :, :, -1].reshape(-1, 64, 1),
)
assert torch.equal(next_batch_0_past_key_values[i][1][:, -2:, :], past[1][0])
assert torch.equal(
next_batch_1_past_key_values[i][1][:, -1:, :],
past[1][1:, :, -1, :].reshape(-1, 1, 64),
)
for _ in range(
default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 2
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 3
assert generations[2].generated_text.text == "TestTestTestTestTest"
assert (
generations[2].request_id == default_multi_requests_bloom_batch.requests[1].id
)
assert (
generations[2].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
)
next_batch = next_batch.filter(
[next_batch.requests[0].id, next_batch.requests[1].id]
)
for _ in range(
default_bloom_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
- 2
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert generations[0].request_id == default_bloom_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_bloom_batch.stopping_criterias[0].max_new_tokens
)
next_batch = next_batch.filter([next_batch.requests[1].id])
for _ in range(
default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
- default_bloom_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
- 4
):
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_bloom.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert (
generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
)
assert (
generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
)
| text-generation-inference/server/tests/models/test_bloom.py/0 | {
"file_path": "text-generation-inference/server/tests/models/test_bloom.py",
"repo_id": "text-generation-inference",
"token_count": 5403
} |
# Origin: https://github.com/predibase/lorax
# Path: lorax/server/lorax_server/adapters/weights.py
# License: Apache License Version 2.0, January 2004
from abc import ABC, abstractclassmethod
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional, Set, Type
import torch
@dataclass
class AdapterBatchMetadata:
# [batch_size]
adapter_indices: torch.Tensor
# [num_adapters]
adapter_set: Set[int]
# [num_segments + 1]
adapter_segments: torch.Tensor
# [num_segments]
# maps from segment index to adapter index, i.e.:
# segment_indices[s] == adapter_indices[i]
segment_indices: List[int]
class AdapterWeights(ABC):
@abstractclassmethod
def get_batch_types(cls) -> List[Type["BatchAdapterWeights"]]:
pass
@property
def speculative_tokens(self) -> int:
return 0
class BatchAdapterWeights(ABC):
@abstractclassmethod
def has_adapter(self, adapter_index: int) -> bool:
pass
@abstractclassmethod
def load(
cls,
adapter_weights: Dict[int, AdapterWeights],
meta: "AdapterBatchMetadata",
prefill: bool,
prefill_head_indices: torch.Tensor,
) -> Optional["BatchAdapterWeights"]:
pass
class LayerAdapterWeights:
"""Adapter weights that apply to a particular layer."""
def __init__(self):
self.adapter_weights: Dict[int, AdapterWeights] = {}
def add_adapter(self, adapter_idx: int, weights: AdapterWeights):
self.adapter_weights[adapter_idx] = weights
def remove_adapter(self, adapter_idx: int):
if adapter_idx not in self.adapter_weights:
return
del self.adapter_weights[adapter_idx]
def is_empty(self) -> bool:
return len(self.adapter_weights) == 0
def get_data(
self,
meta: AdapterBatchMetadata,
prefill: bool,
prefill_head_indices: Optional[torch.Tensor],
) -> Dict[str, BatchAdapterWeights]:
# bucket adapters by batch class
adapter_batch_types: Dict[
Type[BatchAdapterWeights], Dict[int, AdapterWeights]
] = defaultdict(dict)
for adapter_index, adapter_weights in self.adapter_weights.items():
for batch_type in adapter_weights.get_batch_types():
adapter_batch_types[batch_type][adapter_index] = adapter_weights
batch_data = {}
for batch_type, adapter_weights in adapter_batch_types.items():
batched_weights = batch_type.load(
adapter_weights, meta, prefill, prefill_head_indices
)
if batched_weights is not None:
batch_data = batched_weights
return batch_data
@dataclass
class AdapterBatchData:
meta: AdapterBatchMetadata
# layer type -> adapter type -> batch weight data
data: Dict[str, Dict[str, BatchAdapterWeights]]
prefill: bool
@staticmethod
def from_meta(
meta: AdapterBatchMetadata,
weights: Dict[str, LayerAdapterWeights],
prefill: bool,
prefill_head_indices: Optional[torch.Tensor],
) -> "AdapterBatchData":
data = {}
for k, v in weights.items():
if v.is_empty():
continue
data[k] = v.get_data(
meta, prefill, prefill_head_indices if k == "lm_head" else None
)
return AdapterBatchData(meta=meta, data=data, prefill=prefill)
def ranks(self) -> Set[int]:
# TODO(travis): refactor to be less coupled to lora implementation
ranks = set()
for lora_data in self.data.values():
if lora_data is None:
continue
for rank_data in lora_data.rank_data.values():
ranks.add(rank_data.rank)
return ranks
def layer_names(self) -> Set[str]:
return set(self.data.keys())
def adapter_keys(self) -> Set[str]:
adapter_keys = set()
for layer_data in self.data.values():
adapter_keys.update(layer_data.keys())
return adapter_keys
@property
def max_rank(self) -> int:
ranks = self.ranks()
return max(ranks) if len(ranks) > 0 else 0
| text-generation-inference/server/text_generation_server/adapters/weights.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/adapters/weights.py",
"repo_id": "text-generation-inference",
"token_count": 1824
} |
from typing import Optional
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
class WQLinear(nn.Module):
def __init__(
self, w_bit, group_size, qweight, qzeros, scales, bias: Optional[torch.Tensor]
):
super().__init__()
if w_bit not in [4]:
raise NotImplementedError("Only 4-bit are supported for now.")
self.in_features = qweight.shape[0]
self.out_features = qweight.shape[1] * 32 // w_bit
self.w_bit = w_bit
self.group_size = group_size if group_size != -1 else self.in_features
# quick sanity check (make sure aligment)
assert self.in_features % self.group_size == 0
assert self.out_features % (32 // self.w_bit) == 0
self.qweight = qweight
self.qzeros = qzeros
self.scales = scales
self.bias = bias
self.woq_linear = (
ipex.llm.quantization.IPEXWeightOnlyQuantizedLinear.from_weight(
self.qweight,
self.scales,
self.qzeros,
self.in_features,
self.out_features,
bias=self.bias,
group_size=self.group_size,
quant_method=ipex.llm.quantization.QuantMethod.AWQ_GEMM,
dtype=ipex.llm.quantization.QuantDtype.INT4,
)
)
@torch.no_grad()
def forward(self, x):
out_shape = x.shape[:-1] + (self.out_features,)
out = self.woq_linear(x.reshape(-1, x.shape[-1]))
return out.reshape(out_shape)
| text-generation-inference/server/text_generation_server/layers/awq/quantize/ipex.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/awq/quantize/ipex.py",
"repo_id": "text-generation-inference",
"token_count": 778
} |
import math
import numpy as np
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
class QuantLinear(nn.Module):
def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize):
super().__init__()
self.register_buffer("qweight", qweight)
self.register_buffer("qzeros", qzeros)
self.register_buffer("scales", scales)
self.register_buffer("g_idx", g_idx)
if bias is not None:
self.register_buffer("bias", bias)
else:
self.bias = None
if bits not in [4]:
raise NotImplementedError("Only 4 bits are supported.")
self.bits = bits
self.maxq = 2**self.bits - 1
self.groupsize = groupsize
self.outfeatures = qweight.shape[1]
self.infeatures = qweight.shape[0] * 32 // bits
self.woq_linear = (
ipex.llm.quantization.IPEXWeightOnlyQuantizedLinear.from_weight(
self.qweight,
self.scales,
self.qzeros,
self.infeatures,
self.outfeatures,
bias=self.bias,
group_size=self.groupsize,
g_idx=g_idx,
quant_method=ipex.llm.quantization.QuantMethod.GPTQ_GEMM,
dtype=ipex.llm.quantization.QuantDtype.INT4,
)
)
@classmethod
def new(cls, bits, groupsize, infeatures, outfeatures, bias):
if bits not in [4]:
raise NotImplementedError("Only 4 bits are supported.")
qweight = torch.zeros((infeatures // 32 * bits, outfeatures), dtype=torch.int32)
qzeros = torch.zeros(
(math.ceil(infeatures / groupsize), outfeatures // 32 * bits),
dtype=torch.int32,
)
scales = torch.zeros(
(math.ceil(infeatures / groupsize), outfeatures), dtype=torch.float16
)
g_idx = torch.tensor(
[i // groupsize for i in range(infeatures)], dtype=torch.int32
)
if bias:
bias = torch.zeros((outfeatures), dtype=torch.float16)
else:
bias = None
return cls(qweight, qzeros, scales, g_idx, bias, bits, groupsize)
def pack(self, linear, scales, zeros, g_idx=None):
self.g_idx = g_idx.clone() if g_idx is not None else self.g_idx
scales = scales.t().contiguous()
zeros = zeros.t().contiguous()
scale_zeros = zeros * scales
self.scales = scales.clone().half()
if linear.bias is not None:
self.bias = linear.bias.clone().half()
intweight = []
for idx in range(self.infeatures):
intweight.append(
torch.round(
(linear.weight.data[:, idx] + scale_zeros[self.g_idx[idx]])
/ self.scales[self.g_idx[idx]]
).to(torch.int)[:, None]
)
intweight = torch.cat(intweight, dim=1)
intweight = intweight.t().contiguous()
intweight = intweight.numpy().astype(np.uint32)
qweight = np.zeros(
(intweight.shape[0] // 32 * self.bits, intweight.shape[1]), dtype=np.uint32
)
i = 0
row = 0
while row < qweight.shape[0]:
if self.bits in [4]:
for j in range(i, i + (32 // self.bits)):
qweight[row] |= intweight[j] << (self.bits * (j - i))
i += 32 // self.bits
row += 1
else:
raise NotImplementedError("Only 4 bits are supported.")
qweight = qweight.astype(np.int32)
self.qweight = torch.from_numpy(qweight)
zeros -= 1
zeros = zeros.numpy().astype(np.uint32)
qzeros = np.zeros(
(zeros.shape[0], zeros.shape[1] // 32 * self.bits), dtype=np.uint32
)
i = 0
col = 0
while col < qzeros.shape[1]:
if self.bits in [4]:
for j in range(i, i + (32 // self.bits)):
qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i))
i += 32 // self.bits
col += 1
else:
raise NotImplementedError("Only 4 bits are supported.")
qzeros = qzeros.astype(np.int32)
self.qzeros = torch.from_numpy(qzeros)
def forward(self, x):
out_shape = x.shape[:-1] + (self.outfeatures,)
out = self.woq_linear(x.reshape(-1, x.shape[-1]))
return out.reshape(out_shape)
| text-generation-inference/server/text_generation_server/layers/gptq/ipex.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/gptq/ipex.py",
"repo_id": "text-generation-inference",
"token_count": 2335
} |
# coding=utf-8
# Copyright 2023, 2024 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
def grouped_topk(
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
topk: int,
renormalize: bool,
num_expert_group: int = 0,
topk_group: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
scores = torch.softmax(gating_output, dim=-1)
num_token = scores.shape[0]
group_scores = (
scores.view(num_token, num_expert_group, -1).max(dim=-1).values
) # [n, n_group]
group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=False)[
1
] # [n, top_k_group]
group_mask = torch.zeros_like(group_scores) # [n, n_group]
group_mask.scatter_(1, group_idx, 1) # [n, n_group]
score_mask = (
group_mask.unsqueeze(-1)
.expand(num_token, num_expert_group, scores.shape[-1] // num_expert_group)
.reshape(num_token, -1)
) # [n, e]
tmp_scores = scores.masked_fill(~score_mask.bool(), 0.0) # [n, e]
topk_weights, topk_ids = torch.topk(tmp_scores, k=topk, dim=-1, sorted=False)
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
return topk_weights, topk_ids
def fused_topk(
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
topk: int,
renormalize: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
topk_weights = torch.nn.functional.softmax(
gating_output, dim=1, dtype=torch.float32
)
topk_weights, topk_ids = torch.topk(topk_weights, topk, dim=-1)
if renormalize:
topk_weights /= topk_weights.sum(dim=-1, keepdim=True)
return topk_weights, topk_ids
| text-generation-inference/server/text_generation_server/layers/moe/fused_moe_ipex.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/moe/fused_moe_ipex.py",
"repo_id": "text-generation-inference",
"token_count": 920
} |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
TensorParallelMultiAdapterLinear,
TensorParallelAdapterRowLinear,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.layers.rotary import PositionRotaryEmbedding
from text_generation_server.layers.layernorm import (
FastRMSNorm,
)
from text_generation_server.utils.weights import UnquantizedWeight
class Gemma2Config(PretrainedConfig):
def __init__(
self,
vocab_size=256128,
hidden_size=3072,
intermediate_size=24576,
num_hidden_layers=28,
num_attention_heads=16,
num_key_value_heads=16,
head_dim=256,
hidden_act="gelu_pytorch_tanh",
max_position_embeddings=8192,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=None,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=True,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.head_dim = head_dim
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class Gemma2FastRMSNorm(FastRMSNorm):
@classmethod
def load(cls, prefix: str, weights, eps=1e-6):
dtype = weights.dtype
weights.dtype = torch.float32
weight = weights.get_tensor(f"{prefix}.weight") + 1
weights.dtype = dtype
new = cls(weight, eps)
new.dtype = dtype
return new
# perform the multiplication in full precision and downcast after
def forward(self, hidden_states, residual=None):
if residual is not None:
hidden_states += residual
residual = hidden_states
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = hidden_states * self.weight
return hidden_states.to(self.dtype), residual
def load_attention(config, prefix: str, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
def _load_gqa(config, prefix: str, weights):
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.head_dim
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
return TensorParallelColumnLinear(get_linear(weight, bias=None))
class FlashGemma2Attention(torch.nn.Module):
def __init__(
self, prefix: str, config, weights, layer_id, causal: bool, is_sliding: bool
):
super().__init__()
self.num_heads = config.num_attention_heads
self.head_size = config.head_dim
self.causal = causal
if is_sliding:
self.window_size = config.sliding_window
else:
self.window_size = -1
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
# self.softmax_scale = self.head_size**-0.5
self.softmax_scale = config.query_pre_attn_scalar**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.softcap = config.attn_logit_softcapping
query_key_value = load_attention(config, prefix, weights)
self.query_key_value = TensorParallelMultiAdapterLinear.load(
query_key_value,
layer_id,
["q_proj", "k_proj", "v_proj"],
sizes=[
self.head_size * config.num_attention_heads,
self.head_size * config.num_key_value_heads,
self.head_size * config.num_key_value_heads,
],
process_group=weights.process_group,
)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.o_proj = TensorParallelAdapterRowLinear.load(
o_proj,
layer_id,
"o_proj",
process_group=weights.process_group,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
adapter_data,
):
qkv = self.query_key_value(hidden_states, adapter_data)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
kv_cache.store(
key=kv[:, 0],
value=kv[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=kv[:, 0],
value=kv[:, 1],
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
window_size_left=self.window_size,
softcap=self.softcap,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
softcap=self.softcap,
kv_scales=self.kv_scales,
)
return self.o_proj(
attn_output.view(-1, self.num_heads * self.head_size), adapter_data
)
class Gemma2MLP(nn.Module):
def __init__(self, prefix, config, weights, layer_id):
super().__init__()
act = config.hidden_activation
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.gate_up_proj = TensorParallelMultiAdapterLinear.load(
gate_up_proj,
layer_id,
["gate_proj", "up_proj"],
sizes=[
config.intermediate_size,
config.intermediate_size,
],
process_group=weights.process_group,
)
down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.down_proj = TensorParallelAdapterRowLinear.load(
down_proj,
layer_id,
"down_proj",
process_group=weights.process_group,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states, adapter_data):
gate_up_states = self.gate_up_proj(hidden_states, adapter_data)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], adapter_data
)
class FlashGemma2Layer(nn.Module):
def __init__(
self, prefix: str, config, weights, layer_id, causal: bool, is_sliding: bool
):
super().__init__()
self.self_attn = FlashGemma2Attention(
prefix=f"{prefix}.self_attn",
config=config,
weights=weights,
layer_id=layer_id,
causal=causal,
is_sliding=is_sliding,
)
self.mlp = Gemma2MLP(
prefix=f"{prefix}.mlp", config=config, weights=weights, layer_id=layer_id
)
self.input_layernorm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.pre_feedforward_layernorm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.pre_feedforward_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.post_feedforward_layernorm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.post_feedforward_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
adapter_data,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
adapter_data,
)
# faster post attention rms norm
normed_attn_res_output, _ = self.post_attention_layernorm(attn_output)
normed_attn_res_output = normed_attn_res_output + res
res = normed_attn_res_output
pre_normed, _ = self.pre_feedforward_layernorm(normed_attn_res_output)
mlp_output = self.mlp(pre_normed, adapter_data)
post_hidden_states, _ = self.post_feedforward_layernorm(mlp_output)
return post_hidden_states, normed_attn_res_output
class FlashGemma2Model(torch.nn.Module):
def __init__(self, prefix: str, config, weights, causal: bool):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.layers = nn.ModuleList(
[
FlashGemma2Layer(
prefix=f"{prefix}.layers.{layer_id}",
config=config,
weights=weights,
layer_id=layer_id,
causal=causal,
is_sliding=layer_id % 2 == 0,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = Gemma2FastRMSNorm.load(
prefix=f"{prefix}.norm", weights=weights, eps=config.rms_norm_eps
)
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
inputs_embeds: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
hidden_states = inputs_embeds
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
adapter_data,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashGemma2ForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights, *, causal: bool = True):
super().__init__()
embed_norm = config.hidden_size**0.5
if not prefix:
prefix = "model"
else:
prefix = f"{prefix}.model"
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.embed_tokens.weight *= embed_norm
self.model = FlashGemma2Model(
prefix=prefix, config=config, weights=weights, causal=causal
)
self.lm_head = SpeculativeHead.load(
prefix=(
f"{prefix}.embed_tokens"
if config.tie_word_embeddings
else f"{prefix}.lm_head"
),
config=config,
weights=weights,
)
self.softcap = config.final_logit_softcapping
assert isinstance(self.softcap, float)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
input_embeds = self.embed_tokens(input_ids)
hidden_states = self.model(
input_embeds,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
adapter_data,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
logits /= self.softcap
logits = torch.tanh(logits)
logits *= self.softcap
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 9377
} |
# coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model."""
import copy
import math
import warnings
from typing import Optional, Tuple, Union
from loguru import logger
import torch
import torch.distributed
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.utils import (
is_torch_fx_proxy,
)
from transformers import T5Config
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
SpeculativeHead,
)
# copied from https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/models/t5/modeling_t5.py#L1316
# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
__HEAD_MASK_WARNING_MSG = """
The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
num_heads)`.
"""
class PartialTPEmbedding(nn.Module):
def __init__(self, prefix: str, weights):
super().__init__()
weight = weights.get_sharded(f"{prefix}.weight", dim=1)
self.weight = nn.Parameter(weight)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.embedding(input, self.weight)
@torch.jit.script
def layer_norm(hidden_states, weight, epsilon):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + epsilon)
# convert into half-precision if necessary
if weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(weight.dtype)
return weight * hidden_states
class T5LayerNorm(nn.Module):
def __init__(self, prefix, weights, eps=1e-6):
"""
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
"""
super().__init__()
weight = weights.get_tensor(f"{prefix}.weight")
self.weight = nn.Parameter(weight)
self.variance_epsilon = torch.tensor(eps)
def forward(self, hidden_states):
return layer_norm(hidden_states, self.weight, self.variance_epsilon)
try:
from apex.normalization import FusedRMSNorm
T5LayerNorm = FusedRMSNorm # noqa
logger.info(
"Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm"
)
except ImportError:
# using the normal T5LayerNorm
pass
except Exception:
logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm")
pass
ALL_LAYERNORM_LAYERS.append(T5LayerNorm)
class T5DenseActDense(nn.Module):
def __init__(self, config: T5Config, prefix, weights):
super().__init__()
self.wi = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.wi", weights=weights, bias=False
)
### XXX: T5 models do not handle well both f16 and quantization.
### Overidding specifically this layer for that reason.
### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316
### https://github.com/huggingface/transformers/issues/20287
_q = config.quantize
_dtype = weights.dtype
weights.dtype = torch.float32
config.quantize = None
self.wo_cast = (torch.float32, _dtype)
self.wo = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.wo", weights=weights, bias=False
)
weights.dtype = _dtype
config.quantize = _q
self.dropout = nn.Dropout(config.dropout_rate)
self.act = (
ACT2FN[config.dense_act_fn]
if "gelu" not in config.dense_act_fn
else lambda x: torch.nn.functional.gelu(x, approximate="tanh")
)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.to(dtype=self.wo_cast[0])
hidden_states = self.wo(hidden_states)
# XXX: Recasting is already done within the layer norm.
# Casting back to float16 here modifies results
# hidden_states = hidden_states.to(dtype=self.wo_cast[1])
return hidden_states
class T5DenseGatedActDense(nn.Module):
def __init__(self, config: T5Config, prefix, weights):
super().__init__()
self.wi_0 = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.wi_0", weights=weights, bias=False
)
self.wi_1 = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.wi_1", weights=weights, bias=False
)
### XXX: T5 models do not handle well both f16 and quantization.
### Overidding specifically this layer for that reason.
### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316
### https://github.com/huggingface/transformers/issues/20287
_q = config.quantize
_dtype = weights.dtype
weights.dtype = torch.float32
config.quantize = None
self.wo_cast = (torch.float32, _dtype)
self.wo = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.wo", weights=weights, bias=False
)
weights.dtype = _dtype
config.quantize = _q
self.dropout = nn.Dropout(config.dropout_rate)
self.act = (
ACT2FN[config.dense_act_fn]
if "gelu" not in config.dense_act_fn
else lambda x: torch.nn.functional.gelu(x, approximate="tanh")
)
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.to(dtype=self.wo_cast[0])
hidden_states = self.wo(hidden_states)
# XXX: Recasting is already done within the layer norm.
# Casting back to float16 here modifies results
# hidden_states = hidden_states.to(dtype=self.wo_cast[1])
return hidden_states
class T5LayerFF(nn.Module):
def __init__(self, config: T5Config, prefix, weights):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = T5DenseGatedActDense(
config, prefix=f"{prefix}.DenseReluDense", weights=weights
)
else:
self.DenseReluDense = T5DenseActDense(
config, prefix=f"{prefix}.DenseReluDense", weights=weights
)
self.layer_norm = T5LayerNorm(
prefix=f"{prefix}.layer_norm",
weights=weights,
eps=config.layer_norm_epsilon,
)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class T5Attention(nn.Module):
def __init__(
self, config: T5Config, prefix, weights, has_relative_attention_bias=False
):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
process_group = weights.process_group
# Mesh TensorFlow initialization to avoid scaling before softmax
assert self.n_heads % process_group.size() == 0
self.q = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.q", weights=weights, bias=False
)
self.k = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.k", weights=weights, bias=False
)
self.v = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.v", weights=weights, bias=False
)
self.o = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.o", weights=weights, bias=False
)
if self.n_heads % weights.process_group.size() != 0:
raise ValueError(
f"`n_heads` must be divisible by `num_shards` (got `n_heads`: {self.n_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.n_heads = self.n_heads // process_group.size()
self.inner_dim = self.inner_dim // process_group.size()
if self.has_relative_attention_bias:
self.relative_attention_bias = PartialTPEmbedding(
prefix=f"{prefix}.relative_attention_bias", weights=weights
)
@staticmethod
def _relative_position_bucket(
relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(
relative_position, torch.zeros_like(relative_position)
)
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large,
torch.full_like(relative_position_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position, relative_position_if_large
)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
context_position = torch.arange(query_length, dtype=torch.long, device=device)[
:, None
]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[
None, :
]
relative_position = (
memory_position - context_position
) # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(
relative_position_bucket
) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(
0
) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states"
real_seq_length += (
past_key_value[0].shape[2] if query_length is None else query_length
)
key_length = (
real_seq_length if key_value_states is None else key_value_states.shape[1]
)
def shape(states):
"""projection"""
return states.view(
batch_size, -1, self.n_heads, self.key_value_proj_dim
).transpose(1, 2)
def unshape(states):
"""reshape"""
return (
states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
elif past_key_value.shape[2] != key_value_states.shape[1]:
# checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(
self.q(hidden_states)
) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states,
self.k,
key_value_states,
past_key_value[0] if past_key_value is not None else None,
)
value_states = project(
hidden_states,
self.v,
key_value_states,
past_key_value[1] if past_key_value is not None else None,
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length),
device=scores.device,
dtype=scores.dtype,
)
else:
position_bias = self.compute_bias(
real_seq_length, key_length, device=scores.device
)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = (
position_bias + mask
) # (batch_size, n_heads, seq_length, key_length)
position_bias_masked = position_bias
scores += position_bias_masked
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(
torch.matmul(attn_weights, value_states)
) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (
(key_states, value_states) if (self.is_decoder and use_cache) else None
)
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, prefix, weights, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(
config,
prefix=f"{prefix}.SelfAttention",
weights=weights,
has_relative_attention_bias=has_relative_attention_bias,
)
self.layer_norm = T5LayerNorm(
prefix=f"{prefix}.layer_norm",
weights=weights,
eps=config.layer_norm_epsilon,
)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[
1:
] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.EncDecAttention = T5Attention(
config,
prefix=f"{prefix}.EncDecAttention",
weights=weights,
has_relative_attention_bias=False,
)
self.layer_norm = T5LayerNorm(
prefix=f"{prefix}.layer_norm",
weights=weights,
eps=config.layer_norm_epsilon,
)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[
1:
] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, prefix, weights, has_relative_attention_bias: bool):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(
T5LayerSelfAttention(
config,
prefix=f"{prefix}.layer.0",
weights=weights,
has_relative_attention_bias=has_relative_attention_bias,
)
)
if self.is_decoder:
i = 2
self.layer.append(
T5LayerCrossAttention(
config, prefix=f"{prefix}.layer.1", weights=weights
)
)
else:
i = 1
self.layer.append(
T5LayerFF(config, prefix=f"{prefix}.layer.{i}", weights=weights)
)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
if not self.is_decoder:
logger.warning(
"`past_key_values` is passed to the encoder. Please make sure this is intended."
)
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[
2:
] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value
)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value
)
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = (
present_key_value_state + cross_attention_outputs[1]
)
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(
torch.isinf(hidden_states).any(),
torch.finfo(hidden_states.dtype).max - 1000,
torch.finfo(hidden_states.dtype).max,
)
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value
)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class T5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = T5Config
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert decoder_start_token_id is not None, (
"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id."
" See T5 docs for more information"
)
# shift inputs to the right
if is_torch_fx_proxy(input_ids):
# Item assignment is not supported natively for proxies.
shifted_input_ids = torch.full(
input_ids.shape[:-1] + (1,), decoder_start_token_id
)
shifted_input_ids = torch.cat(
[shifted_input_ids, input_ids[..., :-1]], dim=-1
)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert (
pad_token_id is not None
), "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, prefix, weights, embed_tokens):
super().__init__(config)
self.is_decoder = config.is_decoder
self.embed_tokens = embed_tokens
self.block = nn.ModuleList(
[
T5Block(
config,
prefix=f"{prefix}.block.{layer_id}",
weights=weights,
has_relative_attention_bias=(layer_id == 0),
)
for layer_id in range(config.num_layers)
]
)
self.final_layer_norm = T5LayerNorm(
prefix=f"{prefix}.final_layer_norm",
weights=weights,
eps=config.layer_norm_epsilon,
)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Model parallel
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds"
)
if inputs_embeds is None:
assert (
self.embed_tokens is not None
), "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = (
past_key_values[0][0].shape[2] + seq_length
if past_key_values is not None
else seq_length
)
if use_cache is True:
assert (
self.is_decoder
), f"`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(
batch_size, mask_seq_length, device=inputs_embeds.device
)
if (
self.is_decoder
and encoder_attention_mask is None
and encoder_hidden_states is not None
):
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size,
encoder_seq_length,
device=inputs_embeds.device,
dtype=torch.long,
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=inputs_embeds.device
)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(
cross_attn_head_mask, self.config.num_layers
)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(
zip(self.block, past_key_values)
):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
# Model parallel
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[
4 if output_attentions else 3
]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (
present_key_value_state,
)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
class T5ForConditionalGeneration(T5PreTrainedModel):
def __init__(self, config: T5Config, weights):
super().__init__(config)
self.model_dim = config.d_model
self.shared = TensorParallelEmbedding(prefix="shared", weights=weights)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(
config=encoder_config,
prefix="encoder",
weights=weights,
embed_tokens=self.shared,
)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(
config=decoder_config,
prefix="decoder",
weights=weights,
embed_tokens=self.shared,
)
try:
self.lm_head = SpeculativeHead.load(
config, prefix="lm_head", weights=weights
)
except RuntimeError:
# Some models like t5-small were saved with shared weights unlike flan
# Since they are declared as the same arch we have no choice but hope
# that this is OK instead of using a proper flag.
self.lm_head = SpeculativeHead.load(
config, prefix="shared", weights=weights
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if (
labels is not None
and decoder_input_ids is None
and decoder_inputs_embeds is None
):
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
logits, speculative_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
# move labels to correct device to enable PP
labels = labels.to(logits.device)
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return (
Seq2SeqLMOutput(
loss=loss,
logits=logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
),
speculative_logits,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
decoder_attention_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past_key_values is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"decoder_attention_mask": decoder_attention_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past_key_values, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past_key_values is None:
logger.warning(
"You might want to consider setting `use_cache=True` to speed up decoding"
)
return past_key_values
reordered_decoder_past = ()
for layer_past_states in past_key_values:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(
0, beam_idx.to(layer_past_state.device)
),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (
reordered_layer_past_states,
)
return reordered_decoder_past
| text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 22698
} |
import asyncio
import os
import torch
import time
import signal
from grpc import aio
from loguru import logger
from grpc_reflection.v1alpha import reflection
from pathlib import Path
from typing import List, Optional
from text_generation_server.cache import Cache
from text_generation_server.interceptor import ExceptionInterceptor
from text_generation_server.models import Model, get_model_with_lora_adapters
from text_generation_server.utils.adapter import AdapterInfo
from text_generation_server.utils.prefill_chunking import set_max_prefill_tokens
try:
from text_generation_server.models.pali_gemma import PaliGemmaBatch
from text_generation_server.models.vlm_causal_lm import (
VlmCausalLMBatch,
)
from text_generation_server.models.idefics_causal_lm import IdeficsCausalLMBatch
from text_generation_server.models.mllama_causal_lm import MllamaCausalLMBatch
VLM_BATCH_TYPES = {
PaliGemmaBatch,
VlmCausalLMBatch,
IdeficsCausalLMBatch,
MllamaCausalLMBatch,
}
except (ImportError, NotImplementedError):
# These imports can fail on CPU/Non flash.
VLM_BATCH_TYPES = set()
from text_generation_server.pb import generate_pb2_grpc, generate_pb2
from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor
from text_generation_server.models.globals import set_adapter_to_index
class SignalHandler:
KEEP_PROCESSING = True
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def set_keep_processing(self, value: bool):
self.KEEP_PROCESSING = value
def exit_gracefully(self, signum, frame):
print(f"Exiting gracefully: Signal {signum}")
self.set_keep_processing(False)
class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
def __init__(
self,
model: Model,
cache: Cache,
server_urls: List[str],
):
self.cache = cache
self.model = model
# Quantize is resolved during model loading
self.quantize = model.quantize
self.server_urls = server_urls
# For some reason, inference_mode does not work well with GLOO which we use on CPU
# if model.device.type == "cuda":
# # Force inference mode for the lifetime of TextGenerationService
# self._inference_mode_raii_guard = torch._C._InferenceMode(True)
async def Info(self, request, context):
return self.model.info
async def Health(self, request, context):
if self.model.device.type == "cuda":
torch.zeros((2, 2)).cuda()
return generate_pb2.HealthResponse()
async def ServiceDiscovery(self, request, context):
return generate_pb2.ServiceDiscoveryResponse(urls=self.server_urls)
async def ClearCache(self, request, context):
if request.HasField("id"):
self.cache.delete(request.id)
else:
self.cache.clear()
return generate_pb2.ClearCacheResponse()
async def FilterBatch(self, request, context):
batch = self.cache.pop(request.batch_id)
if batch is None:
raise ValueError(f"Batch ID {request.batch_id} not found in cache.")
filtered_batch = batch.filter(request.request_ids)
self.cache.set(filtered_batch)
return generate_pb2.FilterBatchResponse(batch=filtered_batch.to_pb())
async def Warmup(self, request, context):
set_max_prefill_tokens(request.max_prefill_tokens)
if self.quantize in {"exl2", "gptq"}:
try:
# When using GPTQ, Exllama kernels need some global kernels
# For which we have the finale shapes only after the model has loaded
# This will allocate those buffers.
from text_generation_server.layers.gptq import (
create_exllama_buffers,
set_device,
)
set_device(self.model.device)
create_exllama_buffers(request.max_prefill_tokens)
except ImportError:
pass
if (
self.model.batch_type in VLM_BATCH_TYPES
): # Hack, i would rather use kwargs in the `from_pb` call
batch = self.model.batch_type.from_pb_processor(
request.batch,
self.model.tokenizer,
self.model.processor,
self.model.model.config,
self.model.dtype,
self.model.device,
)
else:
batch = self.model.batch_type.from_pb(
request.batch, self.model.tokenizer, self.model.dtype, self.model.device
)
# Override default values with None for clearer semantics.
max_input_tokens = (
request.max_input_tokens if request.HasField("max_input_tokens") else None
)
max_total_tokens = (
request.max_total_tokens if request.HasField("max_total_tokens") else None
)
max_supported_total_tokens, max_input_tokens, max_total_tokens = (
self.model.warmup(batch, max_input_tokens, max_total_tokens)
)
return generate_pb2.WarmupResponse(
max_supported_total_tokens=max_supported_total_tokens,
max_input_tokens=max_input_tokens,
max_total_tokens=max_total_tokens,
)
async def Prefill(self, request, context):
start = time.time_ns()
if (
self.model.batch_type in VLM_BATCH_TYPES
): # Hack, i would rather use kwargs in the `from_pb` call
batch = self.model.batch_type.from_pb_processor(
request.batch,
self.model.tokenizer,
self.model.processor,
self.model.model.config,
self.model.dtype,
self.model.device,
)
else:
batch = self.model.batch_type.from_pb(
request.batch, self.model.tokenizer, self.model.dtype, self.model.device
)
concat_ns = None
if self.model.support_chunking:
if request.HasField("cached_batch"):
cached_batch = self.cache.pop(request.cached_batch.id)
if cached_batch is None:
raise ValueError(
f"Batch ID {request.cached_batch.id} not found in cache."
)
start_concat = time.time_ns()
batch = self.model.batch_type.concatenate([cached_batch, batch])
concat_ns = time.time_ns() - start_concat
generations, next_batch, timings = self.model.generate_token(batch)
self.cache.set(next_batch)
return generate_pb2.PrefillResponse(
generations=[generation.to_pb() for generation in generations],
batch=next_batch.to_pb() if next_batch else None,
forward_ns=timings[0],
decode_ns=timings[1],
total_ns=time.time_ns() - start,
concat_ns=concat_ns,
)
async def Decode(self, request, context):
start = time.time_ns()
if len(request.batches) == 0:
raise ValueError("Must provide at least one batch")
batches = []
for batch_pb in request.batches:
batch = self.cache.pop(batch_pb.id)
if batch is None:
raise ValueError(f"Batch ID {batch_pb.id} not found in cache.")
batches.append(batch)
if len(batches) == 0:
raise ValueError("All batches are empty")
if len(batches) > 1:
start_concat = time.time_ns()
batch = self.model.batch_type.concatenate(batches)
concat_ns = time.time_ns() - start_concat
else:
batch = batches[0]
concat_ns = None
generations, next_batch, timings = self.model.generate_token(batch)
self.cache.set(next_batch)
return generate_pb2.DecodeResponse(
generations=[generation.to_pb() for generation in generations],
batch=next_batch.to_pb() if next_batch else None,
concat_ns=concat_ns,
forward_ns=timings[0],
decode_ns=timings[1],
total_ns=time.time_ns() - start,
)
def serve(
model_id: str,
lora_adapters: Optional[List[AdapterInfo]],
revision: Optional[str],
sharded: bool,
quantize: Optional[str],
speculate: Optional[int],
dtype: Optional[str],
kv_cache_dtype: Optional[str],
trust_remote_code: bool,
uds_path: Path,
max_input_tokens: int,
):
async def serve_inner(
model_id: str,
lora_adapters: Optional[List[AdapterInfo]],
revision: Optional[str],
sharded: bool = False,
quantize: Optional[str] = None,
speculate: Optional[int] = None,
dtype: Optional[str] = None,
kv_cache_dtype: Optional[str] = None,
trust_remote_code: bool = False,
):
unix_socket_template = "unix://{}-{}"
adapter_to_index = {}
if sharded:
server_urls = [
unix_socket_template.format(uds_path, rank)
for rank in range(int(os.environ["WORLD_SIZE"]))
]
local_url = server_urls[int(os.environ["RANK"])]
else:
local_url = unix_socket_template.format(uds_path, 0)
server_urls = [local_url]
try:
model = get_model_with_lora_adapters(
model_id,
lora_adapters,
revision,
sharded,
quantize,
speculate,
dtype,
kv_cache_dtype,
trust_remote_code,
max_input_tokens,
adapter_to_index,
)
except Exception:
logger.exception("Error when initializing model")
raise
signal_handler = SignalHandler()
set_adapter_to_index(adapter_to_index)
server = aio.server(
interceptors=[
ExceptionInterceptor(lambda: signal_handler.set_keep_processing(False)),
UDSOpenTelemetryAioServerInterceptor(),
],
options=[
# Set the maximum possible message length: i32::MAX
("grpc.max_receive_message_length", (1 << 31) - 1)
],
)
generate_pb2_grpc.add_TextGenerationServiceServicer_to_server(
TextGenerationService(model, Cache(), server_urls), server
)
SERVICE_NAMES = (
generate_pb2.DESCRIPTOR.services_by_name["TextGenerationService"].full_name,
reflection.SERVICE_NAME,
)
reflection.enable_server_reflection(SERVICE_NAMES, server)
server.add_insecure_port(local_url)
await server.start()
logger.info("Server started at {}".format(local_url))
while signal_handler.KEEP_PROCESSING:
await asyncio.sleep(0.5)
asyncio.run(
serve_inner(
model_id,
lora_adapters,
revision,
sharded,
quantize,
speculate,
dtype,
kv_cache_dtype,
trust_remote_code,
)
)
| text-generation-inference/server/text_generation_server/server.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/server.py",
"repo_id": "text-generation-inference",
"token_count": 5425
} |
# Origin: https://github.com/predibase/lorax
# Path: lorax/server/lorax_server/utils/segments.py
# License: Apache License Version 2.0, January 2004
from typing import List, Tuple, Union
import torch
import numpy as np
def find_segments(
adapter_indices: Union[torch.Tensor, List[int]]
) -> Tuple[List[int], List[int]]:
if isinstance(adapter_indices, torch.Tensor):
adapter_indices = adapter_indices.cpu().numpy()
elif isinstance(adapter_indices, list):
adapter_indices = np.array(adapter_indices)
change_mask = np.diff(adapter_indices, prepend=adapter_indices[0] - 1)
change_indices = np.nonzero(change_mask)[0]
segments = [0]
segments.extend(change_indices[1:].tolist())
segments.append(len(adapter_indices))
segment_indices = adapter_indices[change_indices].tolist()
return segments, segment_indices
class SegmentConcatBuilder:
def __init__(self):
self.adapter_segment_indices = []
self.adapter_segment_tensors = []
def concat(self, adapter_segments: torch.Tensor, segment_indices: List[int]):
# Update adapter segments
if self.adapter_segment_tensors:
# Because we have already processed at least one batch, remove the 0 start index
# from this batch denoting the beginning of the segment, then offset all segment
# positions by the value of the last segment in the previous batch to account for
# the concatenation.
adapter_segments = (
adapter_segments[1:] + self.adapter_segment_tensors[-1][-1]
)
if (
self.adapter_segment_indices
and self.adapter_segment_indices[-1] == segment_indices[0]
):
# If the last segment in the previous batch is the same as the first segment in this batch,
# then we merge them together into a single segment. In effect, this means removing it from
# the segment indices of this batch, and extending the segment span by removing the segment
# end index from the previous batch.
segment_indices = segment_indices[1:]
self.adapter_segment_tensors[-1] = self.adapter_segment_tensors[-1][:-1]
self.adapter_segment_indices.extend(segment_indices)
self.adapter_segment_tensors.append(adapter_segments)
def build(self) -> Tuple[torch.Tensor, List[int]]:
return torch.concat(self.adapter_segment_tensors), self.adapter_segment_indices
| text-generation-inference/server/text_generation_server/utils/segments.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/segments.py",
"repo_id": "text-generation-inference",
"token_count": 1009
} |
{
"name": "tokenizers-win32-arm64-msvc",
"version": "0.13.4-rc1",
"os": [
"win32"
],
"cpu": [
"arm64"
],
"main": "tokenizers.win32-arm64-msvc.node",
"files": [
"tokenizers.win32-arm64-msvc.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/win32-arm64-msvc/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/win32-arm64-msvc/package.json",
"repo_id": "tokenizers",
"token_count": 277
} |
extern crate tokenizers as tk;
use crate::models::Model;
use napi::bindgen_prelude::*;
use std::sync::{Arc, RwLock};
use tokenizers::models::bpe::{BpeBuilder, BPE};
use tokenizers::models::wordlevel::{WordLevel, WordLevelBuilder};
use tokenizers::models::wordpiece::{WordPiece, WordPieceBuilder};
pub struct BPEFromFilesTask {
pub(crate) builder: Option<BpeBuilder>,
}
impl Task for BPEFromFilesTask {
type Output = BPE;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
pub struct WordPieceFromFilesTask {
pub(crate) builder: Option<WordPieceBuilder>,
}
impl Task for WordPieceFromFilesTask {
type Output = WordPiece;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
pub struct WordLevelFromFilesTask {
pub(crate) builder: Option<WordLevelBuilder>,
}
impl Task for WordLevelFromFilesTask {
type Output = WordLevel;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
| tokenizers/bindings/node/src/tasks/models.rs/0 | {
"file_path": "tokenizers/bindings/node/src/tasks/models.rs",
"repo_id": "tokenizers",
"token_count": 800
} |
import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
| tokenizers/bindings/python/conftest.py/0 | {
"file_path": "tokenizers/bindings/python/conftest.py",
"repo_id": "tokenizers",
"token_count": 217
} |
from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import NFKC
from .base_tokenizer import BaseTokenizer
class SentencePieceBPETokenizer(BaseTokenizer):
"""SentencePiece BPE Tokenizer
Represents the BPE algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
replacement: str = "▁",
add_prefix_space: bool = True,
dropout: Optional[float] = None,
fuse_unk: Optional[bool] = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
else:
tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
tokenizer.normalizer = NFKC()
prepend_scheme = "always" if add_prefix_space else "never"
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
parameters = {
"model": "SentencePieceBPE",
"unk_token": unk_token,
"replacement": replacement,
"add_prefix_space": add_prefix_space,
"dropout": dropout,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return SentencePieceBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py",
"repo_id": "tokenizers",
"token_count": 1682
} |
use pyo3::prelude::*;
use std::collections::VecDeque;
/// An simple iterator that can be instantiated with a specified length.
/// We use this with iterators that don't have a size_hint but we might
/// know its size. This is useful with progress bars for example.
pub struct MaybeSizedIterator<I> {
length: Option<usize>,
iter: I,
}
impl<I> MaybeSizedIterator<I>
where
I: Iterator,
{
pub fn new(iter: I, length: Option<usize>) -> Self {
Self { length, iter }
}
}
impl<I> Iterator for MaybeSizedIterator<I>
where
I: Iterator,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.length.unwrap_or(0), None)
}
}
/// A buffered iterator that takes care of locking the GIL only when needed.
/// The `PyIterator` provided by PyO3 keeps a Python GIL token all along
/// and thus doesn't allow us to release the GIL to allow having other threads.
///
/// This iterator serves two purposes:
/// - First, as opposed to the `pyo3::PyIterator`, it is Send and can easily be parallelized
/// - Second, this let us release the GIL between two refills of the buffer, allowing other
/// Python threads to work
pub struct PyBufferedIterator<T, F> {
iter: Option<Py<PyAny>>,
converter: F,
buffer: VecDeque<PyResult<T>>,
size: usize,
}
impl<T, F, I> PyBufferedIterator<T, F>
where
F: Fn(Bound<'_, PyAny>) -> I,
I: IntoIterator<Item = PyResult<T>>,
{
/// Create a new PyBufferedIterator using the provided Python object.
/// This object must implement the Python Iterator Protocol, and an error will
/// be return if the contract is not respected.
///
/// The `converter` provides a way to convert each item in the iterator into
/// something that doesn't embed a 'py token and thus allows the GIL to be released
///
/// The `buffer_size` represents the number of items that we buffer before we
/// need to acquire the GIL again.
pub fn new(iter: &Bound<'_, PyAny>, converter: F, buffer_size: usize) -> PyResult<Self> {
let py = iter.py();
let iter: Py<PyAny> = unsafe {
Bound::from_borrowed_ptr_or_err(py, pyo3::ffi::PyObject_GetIter(iter.as_ptr()))?.into()
};
Ok(Self {
iter: Some(iter),
converter,
buffer: VecDeque::with_capacity(buffer_size),
size: buffer_size,
})
}
/// Refill the buffer, and set `self.iter` as `None` if nothing more to get
fn refill(&mut self) -> PyResult<()> {
if self.iter.is_none() {
return Ok(());
}
Python::with_gil(|py| loop {
if self.buffer.len() >= self.size {
return Ok(());
}
match unsafe {
Bound::from_owned_ptr_or_opt(
py,
pyo3::ffi::PyIter_Next(self.iter.as_ref().unwrap().bind(py).as_ptr()),
)
} {
Some(obj) => self.buffer.extend((self.converter)(obj)),
None => {
if PyErr::occurred(py) {
return Err(PyErr::fetch(py));
} else {
self.iter = None;
}
}
};
if self.iter.is_none() {
return Ok(());
}
})
}
}
impl<T, F, I> Iterator for PyBufferedIterator<T, F>
where
F: Fn(Bound<'_, PyAny>) -> I,
I: IntoIterator<Item = PyResult<T>>,
{
type Item = PyResult<T>;
fn next(&mut self) -> Option<Self::Item> {
if !self.buffer.is_empty() {
self.buffer.pop_front()
} else if self.iter.is_some() {
if let Err(e) = self.refill() {
return Some(Err(e));
}
self.next()
} else {
None
}
}
}
| tokenizers/bindings/python/src/utils/iterators.rs/0 | {
"file_path": "tokenizers/bindings/python/src/utils/iterators.rs",
"repo_id": "tokenizers",
"token_count": 1807
} |
import pickle
import numpy as np
import pytest
from tokenizers import AddedToken, Encoding, Tokenizer
from tokenizers.implementations import BertWordPieceTokenizer
from tokenizers.models import BPE, Model, Unigram
from tokenizers.pre_tokenizers import ByteLevel, Metaspace
from tokenizers.processors import RobertaProcessing, TemplateProcessing
from tokenizers.normalizers import Strip, Lowercase, Sequence
from tokenizers.decoders import ByteFallback, DecodeStream, Metaspace as DecoderMetaspace
from ..utils import bert_files, data_dir, multiprocessing_with_parallelism, roberta_files
class TestAddedToken:
def test_instantiate_with_content_only(self):
added_token = AddedToken("<mask>")
added_token.content = "<MASK>"
assert added_token.content == "<MASK>"
assert type(added_token) == AddedToken
added_token.content = added_token.content.lower()
assert added_token.special == False
added_token.special = True
assert added_token.special == True
added_token.special = False
assert str(added_token) == "<mask>"
assert (
repr(added_token)
== 'AddedToken("<mask>", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False)'
)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == True
assert isinstance(pickle.loads(pickle.dumps(added_token)), AddedToken)
def test_can_set_rstrip(self):
added_token = AddedToken("<mask>", rstrip=True)
assert added_token.rstrip == True
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == True
def test_can_set_lstrip(self):
added_token = AddedToken("<mask>", lstrip=True)
assert added_token.rstrip == False
assert added_token.lstrip == True
assert added_token.single_word == False
assert added_token.normalized == True
def test_can_set_single_world(self):
added_token = AddedToken("<mask>", single_word=True)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == True
assert added_token.normalized == True
def test_can_set_normalized(self):
added_token = AddedToken("<mask>", normalized=False)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == False
class TestTokenizer:
def test_has_expected_type_and_methods(self):
tokenizer = Tokenizer(BPE())
assert type(tokenizer) == Tokenizer
assert callable(tokenizer.num_special_tokens_to_add)
assert callable(tokenizer.get_vocab)
assert callable(tokenizer.get_vocab_size)
assert callable(tokenizer.enable_truncation)
assert callable(tokenizer.no_truncation)
assert callable(tokenizer.enable_padding)
assert callable(tokenizer.no_padding)
assert callable(tokenizer.encode)
assert callable(tokenizer.encode_batch)
assert callable(tokenizer.decode)
assert callable(tokenizer.decode_batch)
assert callable(tokenizer.token_to_id)
assert callable(tokenizer.id_to_token)
assert callable(tokenizer.add_tokens)
assert callable(tokenizer.add_special_tokens)
assert callable(tokenizer.train)
assert callable(tokenizer.post_process)
assert isinstance(tokenizer.model, Model)
assert tokenizer.normalizer is None
assert tokenizer.pre_tokenizer is None
assert tokenizer.post_processor is None
assert tokenizer.decoder is None
assert isinstance(pickle.loads(pickle.dumps(Tokenizer(BPE()))), Tokenizer)
def test_add_tokens(self):
tokenizer = Tokenizer(BPE())
added = tokenizer.add_tokens(["my", "name", "is", "john"])
assert added == 4
tokens = [AddedToken("the"), AddedToken("quick", normalized=False), AddedToken()]
assert tokens[0].normalized == True
added = tokenizer.add_tokens(tokens)
assert added == 2
assert tokens[0].normalized == True
assert tokens[1].normalized == False
def test_add_special_tokens(self):
tokenizer = Tokenizer(BPE())
# Can add special tokens as `str`
added = tokenizer.add_special_tokens(["my", "name", "is", "john"])
assert added == 4
# Can add special tokens as `AddedToken`
tokens = [AddedToken("the"), AddedToken("quick", normalized=True), AddedToken()]
assert tokens[0].normalized == True
added = tokenizer.add_special_tokens(tokens)
assert added == 2
assert tokens[0].normalized == False
assert tokens[1].normalized == True
def test_encode(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can encode single sequence
output = tokenizer.encode("my name is john")
assert output.tokens == ["my", "name", "is", "john"]
assert type(output.ids) == list
assert type(output.type_ids) == list
assert type(output.offsets) == list
with pytest.warns(DeprecationWarning):
assert type(output.words) == list
assert type(output.word_ids) == list
assert type(output.special_tokens_mask) == list
assert type(output.attention_mask) == list
assert type(output.overflowing) == list
# Can encode a pair of sequences
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["my", "name", "is", "john", "pair"]
assert isinstance(pickle.loads(pickle.dumps(output)), Encoding)
# Can encode a single pre-tokenized sequence
output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True)
assert output.tokens == ["my", "name", "is", "john"]
# Can encode a batch with both a single sequence and a pair of sequences
output = tokenizer.encode_batch(["my name is john", ("my name is john", "pair")])
assert len(output) == 2
def test_encode_formats(self, bert_files):
with pytest.deprecated_call():
tokenizer = BertWordPieceTokenizer(bert_files["vocab"])
# Encode
output = tokenizer.encode("my name is john")
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"]
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"]
output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True)
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"]
output = tokenizer.encode(["my", "name", "is", "john"], ["pair"], is_pretokenized=True)
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"]
# Encode batch
result_single = [
["[CLS]", "my", "name", "is", "john", "[SEP]"],
["[CLS]", "my", "name", "is", "georges", "[SEP]"],
]
result_pair = [
["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"],
["[CLS]", "my", "name", "is", "georges", "[SEP]", "pair", "[SEP]"],
]
def format(encodings):
return [e.tokens for e in encodings]
def test_single(input, is_pretokenized=False):
output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized)
assert format(output) == result_single
def test_pair(input, is_pretokenized=False):
output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized)
assert format(output) == result_pair
# Classic inputs
# Lists
test_single(["My name is John", "My name is Georges"])
test_pair([("my name is john", "pair"), ("my name is georges", "pair")])
test_pair([["my name is john", "pair"], ["my name is georges", "pair"]])
# Tuples
test_single(("My name is John", "My name is Georges"))
test_pair((("My name is John", "pair"), ("My name is Georges", "pair")))
# Numpy
test_single(np.array(["My name is John", "My name is Georges"]))
test_pair(np.array([("My name is John", "pair"), ("My name is Georges", "pair")]))
test_pair(np.array([["My name is John", "pair"], ["My name is Georges", "pair"]]))
# PreTokenized inputs
# Lists
test_single([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]], True)
test_pair(
[
(["My", "name", "is", "John"], ["pair"]),
(["My", "name", "is", "Georges"], ["pair"]),
],
True,
)
test_pair(
[
[["My", "name", "is", "John"], ["pair"]],
[["My", "name", "is", "Georges"], ["pair"]],
],
True,
)
# Tuples
test_single((("My", "name", "is", "John"), ("My", "name", "is", "Georges")), True)
test_pair(
(
(("My", "name", "is", "John"), ("pair",)),
(("My", "name", "is", "Georges"), ("pair",)),
),
True,
)
test_pair(
(
(["My", "name", "is", "John"], ["pair"]),
(["My", "name", "is", "Georges"], ["pair"]),
),
True,
)
# Numpy
test_single(
np.array([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]]),
True,
)
test_single(
np.array((("My", "name", "is", "John"), ("My", "name", "is", "Georges"))),
True,
)
test_pair(
np.array(
[
[["My", "name", "is", "John"], ["pair"]],
[["My", "name", "is", "Georges"], ["pair"]],
],
dtype=object,
),
True,
)
test_pair(
np.array(
(
(("My", "name", "is", "John"), ("pair",)),
(("My", "name", "is", "Georges"), ("pair",)),
),
dtype=object,
),
True,
)
# Mal formed
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode([["my", "name"]])
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode("My name is john", [["pair"]])
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode("my name is john", ["pair"])
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode("My name is john", is_pretokenized=True)
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode("My name is john", ["pair"], is_pretokenized=True)
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode(["My", "name", "is", "John"], "pair", is_pretokenized=True)
def test_encode_add_special_tokens(self, roberta_files):
with pytest.deprecated_call():
tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"]))
tokenizer.add_special_tokens(["<s>", "</s>"])
tokenizer.pre_tokenizer = ByteLevel(add_prefix_space=True)
tokenizer.post_processor = RobertaProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
)
# Can encode with special tokens
output_with_specials = tokenizer.encode("My name is John", add_special_tokens=True)
assert output_with_specials.tokens == ["<s>", "ĠMy", "Ġname", "Ġis", "ĠJohn", "</s>"]
# Can encode without special tokens
output_without_specials = tokenizer.encode("My name is John", add_special_tokens=False)
assert output_without_specials.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"]
def test_truncation(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.enable_truncation(2)
# Can truncate single sequences
output = tokenizer.encode("my name is john")
assert output.tokens == ["my", "name"]
# Can truncate pair sequences as well
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["my", "pair"]
# Can get the params and give them to enable_truncation
trunc = tokenizer.truncation
tokenizer.enable_truncation(**trunc)
# Left truncation direction
tokenizer.enable_truncation(2, direction="left")
output = tokenizer.encode("my name is john")
assert output.tokens == ["is", "john"]
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["john", "pair"]
def test_padding(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# By default it does nothing when encoding single sequence
tokenizer.enable_padding()
output = tokenizer.encode("my name")
assert output.tokens == ["my", "name"]
# Can pad to the longest in a batch
output = tokenizer.encode_batch(["my name", "my name is john"])
assert all([len(encoding) == 4 for encoding in output])
# Can pad to the specified length otherwise
tokenizer.enable_padding(length=4)
output = tokenizer.encode("my name")
assert output.tokens == ["my", "name", "[PAD]", "[PAD]"]
output = tokenizer.encode("my name", "pair")
assert output.tokens == ["my", "name", "pair", "[PAD]"]
# Can get the params and give them to enable_padding
padding = tokenizer.padding
tokenizer.enable_padding(**padding)
def test_decode(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can decode single sequences
output = tokenizer.decode([0, 1, 2, 3])
assert output == "my name is john"
# Can decode batch
output = tokenizer.decode_batch([[0, 1, 2, 3], [4]])
assert output == ["my name is john", "pair"]
# Can decode stream
stream = DecodeStream(skip_special_tokens=False)
assert stream.step(tokenizer, 0) == "my"
assert stream.step(tokenizer, 1) == " name"
assert stream.step(tokenizer, 2) == " is"
assert stream.step(tokenizer, 3) == " john"
def test_decode_stream(self):
vocab = [
("<unk>", 0.0),
("<0x20>", -0.1),
("<0xC3>", -0.2),
("<0xA9>", -0.3),
]
tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=True))
tokenizer.decoder = ByteFallback()
stream = DecodeStream(skip_special_tokens=False)
assert stream.step(tokenizer, 1) == " "
assert stream.step(tokenizer, 2) == None
assert stream.step(tokenizer, 3) == "é"
vocab = [
("<unk>", 0.0),
("▁This", -0.1),
]
tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=False))
tokenizer.decoder = DecoderMetaspace()
stream = DecodeStream(skip_special_tokens=False)
assert stream.step(tokenizer, 1) == "This"
assert stream.step(tokenizer, 1) == " This"
def test_get_vocab(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can retrieve vocab with added tokens
vocab = tokenizer.get_vocab(with_added_tokens=True)
assert vocab == {"is": 2, "john": 3, "my": 0, "name": 1, "pair": 4}
# Can retrieve vocab without added tokens
vocab = tokenizer.get_vocab(with_added_tokens=False)
assert vocab == {}
# Can retrieve added token decoder
vocab = tokenizer.get_added_tokens_decoder()
assert vocab == {
0: AddedToken("my", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
1: AddedToken("name", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
2: AddedToken("is", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
3: AddedToken("john", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
4: AddedToken("pair", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),
}
def test_get_vocab_size(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can retrieve vocab's size with added tokens
size = tokenizer.get_vocab_size(with_added_tokens=True)
assert size == 5
# Can retrieve vocab's size without added tokens
size = tokenizer.get_vocab_size(with_added_tokens=False)
assert size == 0
def test_post_process(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.enable_truncation(2)
tokenizer.enable_padding(length=4)
encoding = tokenizer.encode("my name is john")
pair_encoding = tokenizer.encode("pair")
# Can post process a single encoding
output = tokenizer.post_process(encoding)
assert output.tokens == ["my", "name", "[PAD]", "[PAD]"]
# Can post process a pair of encodings
output = tokenizer.post_process(encoding, pair_encoding)
assert output.tokens == ["my", "pair", "[PAD]", "[PAD]"]
def test_multiprocessing_with_parallelism(self):
tokenizer = Tokenizer(BPE())
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_from_pretrained(self):
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["Hey", "there", "dear", "friend", "!"]
def test_from_pretrained_revision(self):
tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["hey", "there", "dear", "friend", "!"]
tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test", revision="gpt-2")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"]
def test_unigram_byte_fallback(self):
vocab = [
("<unk>", 0.0),
("A", -0.01),
("sen", -0.02),
("te", -0.03),
("n", -0.04),
("ce", -0.05),
("<0xF0>", -0.06),
("<0x9F>", -0.06),
("<0xA4>", -0.06),
("<0x97>", -0.06),
(" ", -0.4),
]
tokenizer = tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=False))
output = tokenizer.encode("A sentence 🤗")
assert output.ids == [1, 10, 2, 3, 4, 5, 10, 0]
assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "🤗"]
tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=True))
output = tokenizer.encode("A sentence 🤗")
assert output.ids == [1, 10, 2, 3, 4, 5, 10, 6, 7, 8, 9]
assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "<0xF0>", "<0x9F>", "<0xA4>", "<0x97>"]
def test_encode_special_tokens(self):
tokenizer = Tokenizer.from_pretrained("t5-base")
tokenizer.add_tokens(["<eot>"])
tokenizer.add_special_tokens(["<end_of_text>"])
output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False)
assert output.tokens == ["▁Hey", "▁there", "<end_of_text>", "▁dear", "<eot>", "▁friend", "!"]
tokenizer.encode_special_tokens = True
assert tokenizer.encode_special_tokens == True
output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False)
assert output.tokens == [
"▁Hey",
"▁there",
"<",
"end",
"_",
"of",
"_",
"text",
">",
"▁dear",
"<eot>",
"▁friend",
"!",
]
tokenizer.add_tokens(["of_text>"])
output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False)
assert output.tokens == ["▁Hey", "▁there", "<", "end", "_", "of_text>", "▁dear", "<eot>", "▁friend", "!"]
def test_splitting(self):
tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-new-metaspace")
tokenizer.pre_tokenizer.split = False
tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=True, lstrip=True)])
assert tokenizer.encode("<REPR_END>inform<s>. Hey. .", add_special_tokens=False).tokens == [
"<REPR_END>",
"in",
"form",
"<s>",
".",
"▁Hey",
".",
"▁▁▁▁▁▁",
"▁.",
]
assert tokenizer.encode("<REPR_END>inform<s>. Hey. .", add_special_tokens=False).ids == [
32000,
262,
689,
1,
29889,
18637,
29889,
539,
869,
]
assert tokenizer.encode("inform<s>. Hey. .").tokens == [
"<s>",
"▁inform",
"<s>",
".",
"▁Hey",
".",
"▁▁▁▁▁▁",
"▁.",
]
assert tokenizer.encode("inform<s>. Hey. .", add_special_tokens=False).tokens == [
"▁inform",
"<s>",
".",
"▁Hey",
".",
"▁▁▁▁▁▁",
"▁.",
]
def test_decode_special(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens([AddedToken("my", special=True), AddedToken("name", special=False), "is", "john", "pair"])
# Can decode single sequences
output = tokenizer.decode([0, 1, 2, 3], skip_special_tokens=False)
assert output == "my name is john"
output = tokenizer.decode([0, 1, 2, 3], skip_special_tokens=True)
assert output == "name is john"
assert tokenizer.get_added_tokens_decoder()[0] == AddedToken("my", special=True)
def test_setting_to_none(self):
tokenizer = Tokenizer(BPE())
tokenizer.normalizer = Strip()
tokenizer.normalizer = None
assert tokenizer.normalizer == None
tokenizer.pre_tokenizer = Metaspace()
tokenizer.pre_tokenizer = None
assert tokenizer.pre_tokenizer == None
class TestTokenizerRepr:
def test_repr(self):
tokenizer = Tokenizer(BPE())
out = repr(tokenizer)
assert (
out
== 'Tokenizer(version="1.0", truncation=None, padding=None, added_tokens=[], normalizer=None, pre_tokenizer=None, post_processor=None, decoder=None, model=BPE(dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=False, byte_fallback=False, ignore_merges=False, vocab={}, merges=[]))'
)
def test_repr_complete(self):
tokenizer = Tokenizer(BPE())
tokenizer.pre_tokenizer = ByteLevel(add_prefix_space=True)
tokenizer.post_processor = TemplateProcessing(
single=["[CLS]", "$0", "[SEP]"],
pair=["[CLS]:0", "$A", "[SEP]:0", "$B:1", "[SEP]:1"],
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
tokenizer.normalizer = Sequence([Lowercase(), Strip()])
out = repr(tokenizer)
assert (
out
== 'Tokenizer(version="1.0", truncation=None, padding=None, added_tokens=[], normalizer=Sequence(normalizers=[Lowercase(), Strip(strip_left=True, strip_right=True)]), pre_tokenizer=ByteLevel(add_prefix_space=True, trim_offsets=True, use_regex=True), post_processor=TemplateProcessing(single=[SpecialToken(id="[CLS]", type_id=0), Sequence(id=A, type_id=0), SpecialToken(id="[SEP]", type_id=0)], pair=[SpecialToken(id="[CLS]", type_id=0), Sequence(id=A, type_id=0), SpecialToken(id="[SEP]", type_id=0), Sequence(id=B, type_id=1), SpecialToken(id="[SEP]", type_id=1)], special_tokens={"[CLS]":SpecialToken(id="[CLS]", ids=[1], tokens=["[CLS]"]), "[SEP]":SpecialToken(id="[SEP]", ids=[0], tokens=["[SEP]"])}), decoder=None, model=BPE(dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=False, byte_fallback=False, ignore_merges=False, vocab={}, merges=[]))'
)
| tokenizers/bindings/python/tests/bindings/test_tokenizer.py/0 | {
"file_path": "tokenizers/bindings/python/tests/bindings/test_tokenizer.py",
"repo_id": "tokenizers",
"token_count": 11643
} |
- sections:
- local: index
title: 🤗 Tokenizers
- local: quicktour
title: Quicktour
- local: installation
title: Installation
- local: pipeline
title: The tokenization pipeline
- local: components
title: Components
- local: training_from_memory
title: Training from memory
title: Getting started
- sections:
- local: api/input-sequences
title: Input Sequences
- local: api/encode-inputs
title: Encode Inputs
- local: api/tokenizer
title: Tokenizer
- local: api/encoding
title: Encoding
- local: api/added-tokens
title: Added Tokens
- local: api/models
title: Models
- local: api/normalizers
title: Normalizers
- local: api/pre-tokenizers
title: Pre-tokenizers
- local: api/post-processors
title: Post-processors
- local: api/trainers
title: Trainers
- local: api/decoders
title: Decoders
- local: api/visualizer
title: Visualizer
title: API
| tokenizers/docs/source-doc-builder/_toctree.yml/0 | {
"file_path": "tokenizers/docs/source-doc-builder/_toctree.yml",
"repo_id": "tokenizers",
"token_count": 338
} |
# The tokenization pipeline
When calling `Tokenizer.encode` or
`Tokenizer.encode_batch`, the input
text(s) go through the following pipeline:
- `normalization`
- `pre-tokenization`
- `model`
- `post-processing`
We'll see in details what happens during each of those steps in detail,
as well as when you want to `decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you
to customize each of those steps to your needs. If you're already
familiar with those steps and want to learn by seeing some code, jump to
`our BERT from scratch example <example>`.
For the examples that require a `Tokenizer` we will use the tokenizer we trained in the
`quicktour`, which you can load with:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_reload_tokenizer",
"end-before": "END pipeline_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Normalization
Normalization is, in a nutshell, a set of operations you apply to a raw
string to make it less random or "cleaner". Common operations include
stripping whitespace, removing accented characters or lowercasing all
text. If you're familiar with [Unicode
normalization](https://unicode.org/reports/tr15), it is also a very
common normalization operation applied in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library
by a `Normalizer`, and you can combine
several of those by using a `normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_normalizer",
"end-before": "END pipeline_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can manually test that normalizer by applying it to any string:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_normalizer",
"end-before": "END pipeline_test_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When building a `Tokenizer`, you can
customize its normalizer by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_normalizer",
"end-before": "END pipeline_replace_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way a tokenizer applies normalization, you
should probably retrain it from scratch afterward.
## Pre-Tokenization
Pre-tokenization is the act of splitting a text into smaller objects
that give an upper bound to what your tokens will be at the end of
training. A good way to think of this is that the pre-tokenizer will
split your text into "words" and then, your final tokens will be parts
of those words.
An easy way to pre-tokenize inputs is to split on spaces and
punctuations, which is done by the
`pre_tokenizers.Whitespace`
pre-tokenizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_pre_tokenizer",
"end-before": "END pipeline_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is a list of tuples, with each tuple containing one word and
its span in the original sentence (which is used to determine the final
`offsets` of our `Encoding`). Note that splitting on
punctuation will split contractions like `"I'm"` in this example.
You can combine together any `PreTokenizer` together. For instance, here is a pre-tokenizer that will
split on space, punctuation and digits, separating numbers in their
individual digits:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_combine_pre_tokenizer",
"end-before": "END pipeline_combine_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
As we saw in the `quicktour`, you can
customize the pre-tokenizer of a `Tokenizer` by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_pre_tokenizer",
"end-before": "END pipeline_replace_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way the pre-tokenizer, you should probably
retrain your tokenizer from scratch afterward.
## Model
Once the input texts are normalized and pre-tokenized, the
`Tokenizer` applies the model on the
pre-tokens. This is the part of the pipeline that needs training on your
corpus (or that has been trained if you are using a pretrained
tokenizer).
The role of the model is to split your "words" into tokens, using the
rules it has learned. It's also responsible for mapping those tokens to
their corresponding IDs in the vocabulary of the model.
This model is passed along when initializing the
`Tokenizer` so you already know how to
customize this part. Currently, the 🤗 Tokenizers library supports:
- `models.BPE`
- `models.Unigram`
- `models.WordLevel`
- `models.WordPiece`
For more details about each model and its behavior, you can check
[here](components#models)
## Post-Processing
Post-processing is the last step of the tokenization pipeline, to
perform any additional transformation to the
`Encoding` before it's returned, like
adding potential special tokens.
As we saw in the quick tour, we can customize the post processor of a
`Tokenizer` by setting the
corresponding attribute. For instance, here is how we can post-process
to make the inputs suitable for the BERT model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_processor",
"end-before": "END pipeline_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Note that contrarily to the pre-tokenizer or the normalizer, you don't
need to retrain a tokenizer after changing its post-processor.
## All together: a BERT tokenizer from scratch
Let's put all those pieces together to build a BERT tokenizer. First,
BERT relies on WordPiece, so we instantiate a new
`Tokenizer` with this model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Then we know that BERT preprocesses texts by removing accents and
lowercasing. We also use a unicode normalizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The pre-tokenizer is just splitting on whitespace and punctuation:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
And the post-processing uses the template we saw in the previous
section:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can use this tokenizer and train on it on wikitext like in the
`quicktour`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Decoding
On top of encoding the input texts, a `Tokenizer` also has an API for decoding, that is converting IDs
generated by your model back to a text. This is done by the methods
`Tokenizer.decode` (for one predicted text) and `Tokenizer.decode_batch` (for a batch of predictions).
The `decoder` will first convert the IDs back to tokens
(using the tokenizer's vocabulary) and remove all special tokens, then
join those tokens with spaces:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_decoding",
"end-before": "END pipeline_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you used a model that added special characters to represent subtokens
of a given "word" (like the `"##"` in
WordPiece) you will need to customize the `decoder` to treat
them properly. If we take our previous `bert_tokenizer` for instance the
default decoding will give:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
But by changing it to a proper decoder, we get:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
| tokenizers/docs/source-doc-builder/pipeline.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/pipeline.mdx",
"repo_id": "tokenizers",
"token_count": 5902
} |
#!/usr/bin/env node
const { spawn } = require("child_process");
const fs = require("fs");
let folderName = '.';
if (process.argv.length >= 3) {
folderName = process.argv[2];
if (!fs.existsSync(folderName)) {
fs.mkdirSync(folderName);
}
}
const clone = spawn("git", ["clone", "https://github.com/rustwasm/create-wasm-app.git", folderName]);
clone.on("close", code => {
if (code !== 0) {
console.error("cloning the template failed!")
process.exit(code);
} else {
console.log("🦀 Rust + 🕸 Wasm = ❤");
}
});
| tokenizers/tokenizers/examples/unstable_wasm/www/.bin/create-wasm-app.js/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/.bin/create-wasm-app.js",
"repo_id": "tokenizers",
"token_count": 210
} |
use crate::tokenizer::{Decoder, Result};
use monostate::MustBe;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
/// Fuse simply fuses all tokens into one big string.
/// It's usually the last decoding step anyway, but this
/// decoder exists incase some decoders need to happen after that
/// step
#[non_exhaustive]
pub struct Fuse {
#[serde(rename = "type")]
type_: MustBe!("Fuse"),
}
impl Fuse {
pub fn new() -> Self {
Self {
type_: MustBe!("Fuse"),
}
}
}
impl Decoder for Fuse {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let new_string = tokens.join("");
Ok(vec![new_string])
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn decode() {
let decoder = Fuse::new();
let res = decoder
.decode_chain(vec!["Hey".into(), " friend!".into()])
.unwrap();
assert_eq!(res, vec!["Hey friend!"]);
}
}
| tokenizers/tokenizers/src/decoders/fuse.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/fuse.rs",
"repo_id": "tokenizers",
"token_count": 433
} |
use crate::models::unigram::{lattice::Lattice, model::Unigram};
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::parallelism::*;
use crate::utils::progress::{ProgressBar, ProgressStyle};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
// A token and a score
type SentencePiece = (String, f64);
// A full sentence or word + it's count within the dataset
type Sentence = (String, u32);
fn digamma(mut x: f64) -> f64 {
let mut result = 0.0;
while x < 7.0 {
result -= 1.0 / x;
x += 1.0;
}
x -= 1.0 / 2.0;
let xx = 1.0 / x;
let xx2 = xx * xx;
let xx4 = xx2 * xx2;
result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2
- (127.0 / 30720.0) * xx4 * xx4;
result
}
#[derive(thiserror::Error, Debug)]
pub enum UnigramTrainerError {
#[error("The vocabulary is not large enough to contain all chars")]
VocabularyTooSmall,
}
fn to_log_prob(pieces: &mut [SentencePiece]) {
let sum: f64 = pieces.iter().map(|(_, score)| score).sum();
let logsum = sum.ln();
for (_, score) in pieces.iter_mut() {
*score = score.ln() - logsum;
}
}
/// A `UnigramTrainer` can train a `Unigram` model from `word_counts`.
#[non_exhaustive]
#[derive(Builder, Debug, Clone, Serialize, Deserialize)]
pub struct UnigramTrainer {
#[builder(default = "true")]
pub show_progress: bool,
#[builder(default = "8000")]
pub vocab_size: u32,
#[builder(default = "2")]
pub n_sub_iterations: u32,
#[builder(default = "0.75")]
pub shrinking_factor: f64,
#[builder(default = "vec![]")]
pub special_tokens: Vec<AddedToken>,
#[builder(default = "HashSet::new()")]
pub initial_alphabet: HashSet<char>,
#[builder(default = "None")]
pub unk_token: Option<String>,
#[builder(default = "16")]
pub max_piece_length: usize,
#[builder(default = "1_000_000")]
seed_size: usize,
#[builder(default = "HashMap::new()")]
words: HashMap<String, u32>,
}
impl Default for UnigramTrainer {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl UnigramTrainer {
pub fn builder() -> UnigramTrainerBuilder {
UnigramTrainerBuilder::default()
}
/// Setup a progress bar if asked to show progress
fn setup_progress(&self) -> Option<ProgressBar> {
if self.show_progress {
let p = ProgressBar::new(0);
p.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<30!} {wide_bar} {pos:<9!}/{len:>9!}")
.expect("Invalid progress template"),
);
Some(p)
} else {
None
}
}
fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool {
// Checks string length
// Space not in the substring, numbers, hiragana and more should be taken
// care of within pre_tokenizers.
// https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203
let n = char_string.len();
if char_string.is_empty() || n > self.max_piece_length {
return false;
}
true
}
fn finalize(&self, model: Unigram, required_chars: HashSet<String>) -> Result<Unigram> {
let mut min_score_penalty = 0.0;
let min_score_penalty_delta = 0.0001;
let mut pieces: Vec<(String, f64)> = vec![];
let mut inserted: HashSet<String> = HashSet::new();
// We don't want to include the <UNK> that was used to train
inserted.insert("<UNK>".into());
let existing_pieces: HashMap<String, f64> = model.iter().cloned().collect();
for c in required_chars {
if let Some(t) = existing_pieces.get(&c) {
inserted.insert(c.clone());
pieces.push((c, *t));
} else {
let score = model.min_score + min_score_penalty;
inserted.insert(c.clone());
pieces.push((c, score));
min_score_penalty += min_score_penalty_delta;
}
}
let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token {
let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| {
if t.content == *unk {
Some(i)
} else {
None
}
});
match unk_id {
Some(id) => (Some(id), false),
None => (Some(0), true),
}
} else {
(None, false)
};
let vocab_size_without_special_tokens = if need_add_unk {
self.vocab_size as usize - self.special_tokens.len() - 1
} else {
self.vocab_size as usize - self.special_tokens.len()
};
for (token, score) in model.iter() {
if inserted.contains::<str>(token) {
continue;
}
inserted.insert(token.to_string());
pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score }));
if pieces.len() == vocab_size_without_special_tokens {
break;
}
}
pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
// Insert the necessary tokens
let mut special_tokens = self
.special_tokens
.iter()
.map(|t| (t.content.clone(), 0.0))
.collect::<Vec<_>>();
if need_add_unk {
special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0));
}
Unigram::from(
special_tokens.into_iter().chain(pieces).collect(),
unk_id,
model.byte_fallback(),
)
}
fn required_chars(&self, word_counts: &[Sentence]) -> HashSet<String> {
word_counts
.iter()
.flat_map(|(s, _count)| s.chars())
.chain(self.initial_alphabet.iter().copied())
.map(|c| c.to_string())
.collect()
}
fn make_seed_sentence_pieces(
&self,
sentences: &[Sentence],
_progress: &Option<ProgressBar>,
) -> Vec<SentencePiece> {
// Put all sentences in a string, separated by \0
let total: usize = sentences
.iter()
.map(|(s, _)| s.chars().count())
.sum::<usize>()
+ sentences.len();
let mut flat_string = String::with_capacity(total);
let mut all_chars: HashMap<char, u32> = HashMap::new();
let c_sentence_boundary = '\0';
let k_sentence_boundary = '\0'.to_string();
for (string, n) in sentences {
if string.is_empty() {
continue;
}
flat_string.push_str(string);
// XXX
// Comment suggests we add sentence boundary, but it seems to be missing from actual
// code in spm.
flat_string.push_str(&k_sentence_boundary);
for c in string.chars() {
if c != c_sentence_boundary {
*all_chars.entry(c).or_insert(0) += n;
}
}
}
flat_string.shrink_to_fit();
#[cfg(feature = "esaxx_fast")]
let suffix = esaxx_rs::suffix(&flat_string).unwrap();
#[cfg(not(feature = "esaxx_fast"))]
let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap();
// Basic chars need to be in sentence pieces.
let mut seed_sentencepieces: Vec<SentencePiece> = vec![];
let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect();
// Reversed order
sall_chars.sort_by_key(|&a| Reverse(a));
let mut substr_index: Vec<_> = suffix
.iter()
.filter_map(|(string, freq)| {
if string.len() <= 1 {
return None;
}
if string.contains(&c_sentence_boundary) {
return None;
}
if !self.is_valid_sentencepiece(string) {
return None;
}
let score = freq * string.len() as u32;
// if let Some(p) = &progress {
// p.inc(1);
// }
Some((score, string))
})
.collect();
// Fill seed_sentencepieces
for (count, character) in sall_chars {
seed_sentencepieces.push((character.to_string(), count.into()));
}
// sort by decreasing score
substr_index.sort_by_key(|&a| Reverse(a));
for (score, char_string) in substr_index {
// Just in case
assert!(self.is_valid_sentencepiece(char_string));
let string: String = char_string.iter().collect();
seed_sentencepieces.push((string, score.into()));
if seed_sentencepieces.len() >= self.seed_size {
break;
}
}
to_log_prob(&mut seed_sentencepieces);
seed_sentencepieces
}
fn prune_sentence_pieces(
&self,
model: &Unigram,
pieces: &[SentencePiece],
sentences: &[Sentence],
) -> Vec<SentencePiece> {
let mut always_keep = vec![true; pieces.len()];
let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
let bos_id = pieces.len() + 1;
let eos_id = pieces.len() + 2;
// First, segments the current sentencepieces to know
// how each sentencepiece is resegmented if this sentencepiece is removed
// from the vocabulary.
// To do so, we take the second best segmentation of sentencepiece[i].
// alternatives[i] stores the sequence of second best sentencepieces.
for (id, (token, _score)) in pieces.iter().enumerate() {
// Always keep unk.
if id == 0 {
always_keep[id] = false;
continue;
}
let mut lattice = Lattice::from(token, bos_id, eos_id);
model.populate_nodes(&mut lattice);
let nbests = lattice.nbest(2);
if nbests.len() == 1 {
always_keep[id] = true;
} else if nbests[0].len() >= 2 {
always_keep[id] = false;
} else if nbests[0].len() == 1 {
always_keep[id] = true;
for node in &nbests[1] {
let alt_id = node.borrow().id;
alternatives[id].push(alt_id);
}
}
}
// Second, segments all sentences to compute likelihood
// with a unigram language model. inverted[i] stores
// the set of sentence index where the sentencepieces[i] appears.
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect();
let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences
.maybe_par_chunks(chunk_size)
.map(|enumerated_sentence_count_chunk| {
let mut vsum = 0.0;
let mut freq: Vec<f64> = vec![0.0; pieces.len()];
let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()];
for (i, (sentence, count)) in enumerated_sentence_count_chunk {
let mut lattice = Lattice::from(sentence, bos_id, eos_id);
model.populate_nodes(&mut lattice);
vsum += *count as f64;
for node_ref in lattice.viterbi() {
let id = node_ref.borrow().id;
freq[id] += *count as f64;
inverted[id].push(*i);
}
}
(vsum, freq, inverted)
})
.reduce(
|| (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]),
|(vsum, freq, inverted), (lvsum, lfreq, linverted)| {
(
vsum + lvsum,
freq.iter()
.zip(lfreq)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
inverted
.iter()
.zip(linverted)
.map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat())
.collect(),
)
},
);
let (vsum, freq, inverted) = collected;
let sum: f64 = freq.iter().sum();
let logsum = sum.ln();
let mut candidates: Vec<(usize, f64)> = vec![];
let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize);
new_pieces.push(pieces[0].clone());
// Finally, computes how likely the LM likelihood is reduced if
// the sentencepiece[i] is removed from the vocabulary.
// Since the exact computation of loss is difficult, we compute the
// loss approximately by assuming that all sentencepiece[i] in the sentences
// are replaced with alternatives[i] when sentencepiece[i] is removed.
for (id, (token, score)) in pieces.iter().enumerate() {
if id == 0 {
continue;
}
if freq[id] == 0.0 && !always_keep[id] {
// not found in Viterbi path. Can remove this entry safely.
continue;
} else if alternatives[id].is_empty() {
// no alternatives. Keeps this entry.
new_pieces.push((token.to_string(), *score));
} else {
let mut f = 0.0; // the frequency of pieces[i];
for n in &inverted[id] {
let score = sentences[*n].1 as f64;
f += score;
}
// TODO: Temporary hack to avoid Nans.
if f == 0.0 || f.is_nan() {
// new_pieces.push((token.to_string(), *score));
continue;
}
f /= vsum; // normalizes by all sentence frequency.
let logprob_sp = freq[id].ln() - logsum;
// After removing the sentencepiece[i], its frequency freq[i] is
// re-assigned to alternatives.
// new_sum = current_sum - freq[i] + freq[i] * alternatives.size()
// = current_sum + freq[i] (alternatives - 1)
let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln();
// The frequencies of altenatives are increased by freq[i].
let mut logprob_alt = 0.0;
for n in &alternatives[id] {
logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt;
}
// loss: the diff of likelihood after removing the sentencepieces[i].
let loss = f * (logprob_sp - logprob_alt);
if loss.is_nan() {
panic!("");
}
candidates.push((id, loss));
}
}
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize;
let pruned_size = desired_vocab_size.max(pruned_size);
candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
for (id, _score) in candidates {
if new_pieces.len() == pruned_size {
break;
}
new_pieces.push(pieces[id].clone());
}
new_pieces.to_vec()
}
/// Update the progress bar with the new provided length and message
fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &'static str) {
if let Some(p) = p {
p.set_message(message);
p.set_length(len as u64);
p.reset();
}
}
/// Set the progress bar in the finish state
fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) {
if let Some(p) = p {
p.set_length(final_len as u64);
p.finish();
println!();
}
}
fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) {
let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum();
let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1);
let collected: (f64, u32, Vec<f64>) = sentences
.maybe_par_chunks(chunk_size)
.map(|sentences_chunk| {
let mut expected: Vec<f64> = vec![0.0; model.len()];
let mut objs: f64 = 0.0;
let mut ntokens: u32 = 0;
for (string, freq) in sentences_chunk {
let mut lattice = Lattice::from(string, model.bos_id, model.eos_id);
model.populate_nodes(&mut lattice);
let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected);
if z.is_nan() {
panic!("likelihood is NAN. Input sentence may be too long.");
}
ntokens += lattice.viterbi().len() as u32;
objs -= z / (all_sentence_freq as f64);
}
(objs, ntokens, expected)
})
.reduce(
|| (0.0, 0, vec![0.0; model.len()]),
|(objs, ntokens, expected), (lobjs, lntokens, lexpected)| {
(
objs + lobjs,
ntokens + lntokens,
expected
.iter()
.zip(lexpected)
.map(|(global_el, local_el)| global_el + local_el)
.collect(),
)
},
);
collected
}
fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> {
if pieces.len() != expected.len() {
panic!(
"Those two iterators are supposed to be the same length ({} vs {})",
pieces.len(),
expected.len()
);
}
let mut new_pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
let mut sum = 0.0;
let expected_frequency_threshold = 0.5;
for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() {
// Always keep unk.
if i == 0 {
new_pieces.push((piece.clone(), f64::NAN));
continue;
}
if *freq < expected_frequency_threshold {
continue;
}
new_pieces.push((piece.clone(), *freq));
sum += freq;
}
// // Here we do not use the original EM, but use the
// // Bayesianified/DPified EM algorithm.
// // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf
// // This modification will act as a sparse prior.
let logsum = digamma(sum);
let new_pieces: Vec<_> = new_pieces
.into_iter()
.map(|(s, c)| (s, digamma(c) - logsum))
.collect();
new_pieces
}
pub fn do_train(
&self,
sentences: Vec<Sentence>,
model: &mut Unigram,
) -> Result<Vec<AddedToken>> {
let progress = self.setup_progress();
//
// 1. Compute frequent substrings
// TODO Should be able to upgrade to u64 when needed
self.update_progress(&progress, sentences.len(), "Suffix array seeds");
let mut pieces: Vec<SentencePiece> =
Vec::with_capacity(self.vocab_size.try_into().unwrap());
// We use a UNK token when training, whatever the `self.unk_token`
pieces.push(("<UNK>".into(), f64::NAN));
pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress));
self.finalize_progress(&progress, sentences.len());
// Useful to check compatibility with spm.
debug!(
"Using {} pieces on {} sentences for EM training",
pieces.len(),
sentences.len()
);
let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1
// 2. Run E-M Loops to fine grain the pieces.
// We will shrink the vocab by shrinking_factor every loop on average
// Some other pieces are dropped if logprob is too small
// V = N * (f)**k
// k = log(V / N) / log(f)
let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln())
/ self.shrinking_factor.ln()) as usize
+ 1;
let expected_updates = expected_loops * self.n_sub_iterations as usize;
self.update_progress(&progress, expected_updates, "EM training");
let required_chars = self.required_chars(&sentences);
if required_chars.len() as u32 > self.vocab_size {
return Err(Box::new(UnigramTrainerError::VocabularyTooSmall));
}
let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?;
loop {
// Sub-EM iteration.
for _iter in 0..self.n_sub_iterations {
// Executes E step
let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences);
// Executes M step.
pieces = self.run_m_step(&pieces, &expected);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
// Useful comment for checking compatibility with spm
debug!(
"Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}",
_iter,
new_model.len(),
_objective,
_num_tokens,
_num_tokens as f64 / model.len() as f64
);
if let Some(p) = &progress {
p.inc(1);
}
} // end of Sub EM iteration
// Stops the iteration when the size of sentences reaches to the
// desired symbol size.
if pieces.len() <= desired_vocab_size {
break;
}
// Prunes pieces.
pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences);
new_model = Unigram::from(pieces.clone(), Some(0), false)?;
}
self.finalize_progress(&progress, expected_updates);
// Finally, adjusts the size of sentencepices to be |vocab_size|.
*model = self.finalize(new_model, required_chars)?;
Ok(self.special_tokens.clone())
}
}
impl Trainer for UnigramTrainer {
type Model = Unigram;
/// Train a Unigram model
fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> {
let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect();
self.do_train(sentences, model)
}
/// Whether we should show progress
fn should_show_progress(&self) -> bool {
self.show_progress
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
let words: Result<HashMap<String, u32>> = iterator
.maybe_par_bridge()
.map(|sequence| {
let words = process(sequence.as_ref())?;
let mut map = HashMap::new();
for word in words {
map.entry(word).and_modify(|c| *c += 1).or_insert(1);
}
Ok(map)
})
.reduce(
|| Ok(HashMap::new()),
|acc, ws| {
let mut acc = acc?;
for (k, v) in ws? {
acc.entry(k).and_modify(|c| *c += v).or_insert(v);
}
Ok(acc)
},
);
self.words = words?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_approx_eq::assert_approx_eq;
use std::iter::FromIterator;
#[test]
fn test_unigram_chars() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let sentences = vec![
("This is a".to_string(), 1),
("こんにちは友達".to_string(), 1),
];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(required_chars.len(), 13);
let progress = None;
let table = trainer.make_seed_sentence_pieces(&sentences, &progress);
let target_strings = vec![
"s", "i", " ", "達", "友", "ん", "は", "に", "ち", "こ", "h", "a", "T", "is ", "s ",
];
let strings: Vec<_> = table.iter().map(|(string, _)| string).collect();
assert_eq!(strings, target_strings);
let scores = table.iter().map(|(_, score)| score);
let target_scores = vec![
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-2.5649493574615367, // 2.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-3.258096538021482, // 1.0
-1.4663370687934272, // 6.0
-1.8718021769015916, // 4.0
];
for (score, target_score) in scores.zip(target_scores) {
assert_approx_eq!(*score, target_score, 0.01);
}
}
#[test]
fn test_initial_alphabet() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.initial_alphabet(HashSet::from_iter(vec!['a', 'b', 'c', 'd', 'e', 'f']))
.build()
.unwrap();
let sentences = vec![("こんにちは友達".to_string(), 1)];
let required_chars = trainer.required_chars(&sentences);
assert_eq!(
required_chars,
vec!["こ", "ん", "に", "ち", "は", "友", "達", "a", "b", "c", "d", "e", "f"]
.into_iter()
.map(|s| s.to_owned())
.collect::<HashSet<_>>()
);
}
#[test]
fn test_unk_token() {
// 1. Should add `unk_token` as first special token
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
// 2. Let it where it is
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
AddedToken::from("[UNK]", true),
])
.unk_token(Some("[UNK]".into()))
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0)));
// 3. Don't put it there if not needed
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next().unwrap().0, "e".to_string());
}
#[test]
fn test_special_tokens() {
let trainer = UnigramTrainerBuilder::default()
.show_progress(false)
.special_tokens(vec![
AddedToken::from("[SEP]", true),
AddedToken::from("[CLS]", true),
])
.build()
.unwrap();
let mut unigram = Unigram::default();
trainer
.do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram)
.unwrap();
let mut pieces = unigram.iter();
assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0)));
assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0)));
}
#[test]
fn test_to_log_prob() {
let mut a = vec![("".to_string(), 1.0), ("".to_string(), 2.0)];
to_log_prob(&mut a);
let scores = a.iter().map(|(_, score)| *score).collect::<Vec<_>>();
// ln(1) - ln(3)
assert_approx_eq!(scores[0], -1.098, 0.01);
// ln(2) - ln(3)
assert_approx_eq!(scores[1], -0.405, 0.01);
}
}
| tokenizers/tokenizers/src/models/unigram/trainer.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/unigram/trainer.rs",
"repo_id": "tokenizers",
"token_count": 15681
} |
use serde::{Deserialize, Serialize};
use crate::normalizers::NormalizerWrapper;
use crate::tokenizer::{NormalizedString, Normalizer, Result};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Deserialize, Debug, Serialize)]
#[serde(tag = "type")]
/// Allows concatenating multiple other Normalizer as a Sequence.
/// All the normalizers run in sequence in the given order against the same NormalizedString.
pub struct Sequence {
normalizers: Vec<NormalizerWrapper>,
}
impl Sequence {
pub fn new(normalizers: Vec<NormalizerWrapper>) -> Self {
Self { normalizers }
}
}
impl AsRef<[NormalizerWrapper]> for Sequence {
fn as_ref(&self) -> &[NormalizerWrapper] {
&self.normalizers
}
}
impl AsMut<[NormalizerWrapper]> for Sequence {
fn as_mut(&mut self) -> &mut [NormalizerWrapper] {
&mut self.normalizers
}
}
impl IntoIterator for Sequence {
type Item = NormalizerWrapper;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.normalizers.into_iter()
}
}
impl Normalizer for Sequence {
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
for normalizer in &self.normalizers {
normalizer.normalize(normalized)?;
}
Ok(())
}
}
/// Lowercases the input
#[derive(Copy, Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Lowercase;
impl Normalizer for Lowercase {
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
normalized.lowercase();
Ok(())
}
}
| tokenizers/tokenizers/src/normalizers/utils.rs/0 | {
"file_path": "tokenizers/tokenizers/src/normalizers/utils.rs",
"repo_id": "tokenizers",
"token_count": 591
} |
use crate::processors::byte_level::process_offsets;
use crate::tokenizer::{Encoding, PostProcessor, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::iter::FromIterator;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(tag = "type")]
pub struct RobertaProcessing {
pub sep: (String, u32),
pub cls: (String, u32),
pub trim_offsets: bool,
pub add_prefix_space: bool,
}
impl Default for RobertaProcessing {
fn default() -> Self {
Self {
sep: ("</s>".into(), 2),
cls: ("<s>".into(), 0),
trim_offsets: true,
add_prefix_space: true,
}
}
}
impl RobertaProcessing {
pub fn new(sep: (String, u32), cls: (String, u32)) -> Self {
Self {
sep,
cls,
..Default::default()
}
}
#[must_use]
pub fn trim_offsets(mut self, v: bool) -> Self {
self.trim_offsets = v;
self
}
#[must_use]
pub fn add_prefix_space(mut self, v: bool) -> Self {
self.add_prefix_space = v;
self
}
pub fn get_sep_copy(&self) -> (String, u32) {
(self.sep.0.clone(), self.sep.1)
}
pub fn get_cls_copy(&self) -> (String, u32) {
(self.cls.0.clone(), self.cls.1)
}
}
impl PostProcessor for RobertaProcessing {
fn added_tokens(&self, is_pair: bool) -> usize {
if is_pair {
4
} else {
2
}
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
if self.trim_offsets {
for encoding in encodings.iter_mut() {
process_offsets(encoding, self.add_prefix_space);
encoding
.get_overflowing_mut()
.iter_mut()
.for_each(|encoding| process_offsets(encoding, self.add_prefix_space));
}
}
// Roberta is weird, and every encoding is type_id=0.
encodings
.iter_mut()
.for_each(|encoding| encoding.set_type_ids(vec![0; encoding.len()]));
if !add_special_tokens {
return Ok(encodings);
}
let encodings: Vec<Encoding> = encodings
.iter_mut()
.enumerate()
.map(|(i, encoding)| {
if i == 0 {
let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat();
let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat();
let tokens = [
&[self.cls.0.clone()],
encoding.get_tokens(),
&[self.sep.0.clone()],
]
.concat();
let words = [&[None], encoding.get_word_ids(), &[None]].concat();
let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat();
let special_tokens =
[&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]].concat();
let attention_mask = vec![1; ids.len()];
// For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain
// the special tokens.
let sequence_ranges = HashMap::from_iter(vec![(0, 1..ids.len() - 1)]);
Encoding::new(
ids,
type_ids,
tokens,
words,
offsets,
special_tokens,
attention_mask,
encoding
.take_overflowing()
.into_iter()
.map(|encoding| {
let ids =
[&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat();
let type_ids = vec![0; encoding.get_ids().len() + 2];
let tokens = [
&[self.cls.0.clone()],
encoding.get_tokens(),
&[self.sep.0.clone()],
]
.concat();
let words = [&[None], encoding.get_word_ids(), &[None]].concat();
let offsets =
[&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat();
let special_tokens =
[&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]]
.concat();
let attention_mask = vec![1; ids.len()];
// For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't
// contain the special tokens.
let sequence_ranges =
HashMap::from_iter(vec![(0, 1..ids.len() - 1)]);
Encoding::new(
ids,
type_ids,
tokens,
words,
offsets,
special_tokens,
attention_mask,
vec![],
sequence_ranges,
)
})
.collect(),
sequence_ranges,
)
} else {
let pair_ids = [&[self.sep.1], encoding.get_ids(), &[self.sep.1]].concat();
let pair_type_ids = vec![0; encoding.get_ids().len() + 2];
let pair_tokens = [
&[self.sep.0.clone()],
encoding.get_tokens(),
&[self.sep.0.clone()],
]
.concat();
let pair_words = [&[None], encoding.get_word_ids(), &[None]].concat();
let pair_offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat();
let pair_special_tokens =
[&[1], &vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat();
let pair_attention_mask = vec![1; pair_ids.len()];
// For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain
// the special tokens.
let pair_sequence_ranges = HashMap::from_iter(vec![(1, 1..pair_ids.len() - 1)]);
Encoding::new(
pair_ids,
pair_type_ids,
pair_tokens,
pair_words,
pair_offsets,
pair_special_tokens,
pair_attention_mask,
encoding
.take_overflowing()
.into_iter()
.map(|encoding| {
let pair_ids =
[&[self.sep.1], encoding.get_ids(), &[self.sep.1]].concat();
let pair_type_ids = vec![0; encoding.get_ids().len() + 2];
let pair_tokens = [
&[self.sep.0.clone()],
encoding.get_tokens(),
&[self.sep.0.clone()],
]
.concat();
let pair_words =
[&[None], encoding.get_word_ids(), &[None]].concat();
let pair_offsets =
[&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat();
let pair_special_tokens =
[&[1], &vec![0u32; encoding.get_type_ids().len()][..], &[1]]
.concat();
let pair_attention_mask = vec![1; pair_ids.len()];
// For compatibility with `TemplateProcessing`, the sequence_ranges
// shouldn't contain the special tokens.
let pair_sequence_ranges =
HashMap::from_iter(vec![(1, 1..pair_ids.len() - 1)]);
Encoding::new(
pair_ids,
pair_type_ids,
pair_tokens,
pair_words,
pair_offsets,
pair_special_tokens,
pair_attention_mask,
vec![],
pair_sequence_ranges,
)
})
.collect(),
pair_sequence_ranges,
)
}
})
.collect();
Ok(encodings)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serde() {
let roberta = RobertaProcessing::default();
let roberta_r = r#"{
"type":"RobertaProcessing",
"sep":["</s>",2],
"cls":["<s>",0],
"trim_offsets":true,
"add_prefix_space":true
}"#
.replace(char::is_whitespace, "");
assert_eq!(serde_json::to_string(&roberta).unwrap(), roberta_r);
assert_eq!(
serde_json::from_str::<RobertaProcessing>(&roberta_r).unwrap(),
roberta
);
}
#[test]
fn roberta_processing() {
let processor = RobertaProcessing::default();
assert_eq!(processor.added_tokens(false), 2);
assert_eq!(processor.added_tokens(true), 4);
use crate::Token;
let encoding = Encoding::from_tokens(
vec![
Token::new(12, "Hello".into(), (0, 5)),
Token::new(14, "there".into(), (6, 11)),
],
0,
);
let pair = Encoding::from_tokens(vec![Token::new(15, "pair".into(), (0, 4))], 0);
let single_encoding = processor.process(encoding.clone(), None, true).unwrap();
assert_eq!(
single_encoding,
Encoding::new(
vec![0, 12, 14, 2],
vec![0, 0, 0, 0],
vec!["<s>".into(), "Hello".into(), "there".into(), "</s>".into()],
vec![None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0)],
vec![1, 0, 0, 1],
vec![1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..3)]),
)
);
assert_eq!(single_encoding.token_to_sequence(2), Some(0));
assert_eq!(single_encoding.token_to_sequence(3), None);
let pair_encoding = processor
.process(encoding.clone(), Some(pair.clone()), true)
.unwrap();
assert_eq!(
pair_encoding,
Encoding::new(
vec![0, 12, 14, 2, 2, 15, 2],
vec![0, 0, 0, 0, 0, 0, 0],
vec![
"<s>".into(),
"Hello".into(),
"there".into(),
"</s>".into(),
"</s>".into(),
"pair".into(),
"</s>".into()
],
vec![None, None, None, None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 0), (0, 4), (0, 0)],
vec![1, 0, 0, 1, 1, 0, 1],
vec![1, 1, 1, 1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..3), (1, 5..6)]),
)
);
assert_eq!(pair_encoding.token_to_sequence(2), Some(0));
assert_eq!(pair_encoding.token_to_sequence(3), None);
assert_eq!(pair_encoding.token_to_sequence(4), None);
assert_eq!(pair_encoding.token_to_sequence(5), Some(1));
assert_eq!(pair_encoding.token_to_sequence(6), None);
// No special tokens
let pair_encoding = processor.process(encoding, Some(pair), false).unwrap();
assert_eq!(
pair_encoding,
Encoding::new(
vec![12, 14, 15],
vec![0, 0, 0],
vec!["Hello".into(), "there".into(), "pair".into(),],
vec![None, None, None],
vec![(0, 5), (6, 11), (0, 4)],
vec![0, 0, 0],
vec![1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 0..2), (1, 2..3)]),
)
);
assert_eq!(pair_encoding.token_to_sequence(0), Some(0));
assert_eq!(pair_encoding.token_to_sequence(1), Some(0));
assert_eq!(pair_encoding.token_to_sequence(2), Some(1));
}
}
| tokenizers/tokenizers/src/processors/roberta.rs/0 | {
"file_path": "tokenizers/tokenizers/src/processors/roberta.rs",
"repo_id": "tokenizers",
"token_count": 8529
} |
use crate::parallelism::*;
use crate::tokenizer::{Encoding, Result};
use serde::{Deserialize, Serialize};
/// The various possible padding directions.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum PaddingDirection {
Left,
Right,
}
impl std::convert::AsRef<str> for PaddingDirection {
fn as_ref(&self) -> &str {
match self {
PaddingDirection::Left => "left",
PaddingDirection::Right => "right",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PaddingParams {
pub strategy: PaddingStrategy,
pub direction: PaddingDirection,
pub pad_to_multiple_of: Option<usize>,
pub pad_id: u32,
pub pad_type_id: u32,
pub pad_token: String,
}
impl Default for PaddingParams {
fn default() -> Self {
Self {
strategy: PaddingStrategy::BatchLongest,
direction: PaddingDirection::Right,
pad_to_multiple_of: None,
pad_id: 0,
pad_type_id: 0,
pad_token: String::from("[PAD]"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PaddingStrategy {
BatchLongest,
Fixed(usize),
}
pub fn pad_encodings(encodings: &mut [Encoding], params: &PaddingParams) -> Result<()> {
if encodings.is_empty() {
return Ok(());
}
let mut pad_length = match params.strategy {
PaddingStrategy::Fixed(size) => size,
PaddingStrategy::BatchLongest => encodings
.maybe_par_iter()
.map(|e| e.get_ids().len())
.max()
.unwrap(),
};
if let Some(multiple) = params.pad_to_multiple_of {
if multiple > 0 && pad_length % multiple > 0 {
pad_length += multiple - pad_length % multiple;
}
}
encodings.maybe_par_iter_mut().for_each(|encoding| {
encoding.pad(
pad_length,
params.pad_id,
params.pad_type_id,
¶ms.pad_token,
params.direction,
)
});
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::Encoding;
use std::collections::HashMap;
#[test]
fn pad_to_multiple() {
fn get_encodings() -> [Encoding; 2] {
[
Encoding::new(
vec![0, 1, 2, 3, 4],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
HashMap::new(),
),
Encoding::new(
vec![0, 1, 2],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
HashMap::new(),
),
]
}
// Test fixed
let mut encodings = get_encodings();
let mut params = PaddingParams {
strategy: PaddingStrategy::Fixed(7),
direction: PaddingDirection::Right,
pad_to_multiple_of: Some(8),
pad_id: 0,
pad_type_id: 0,
pad_token: String::from("[PAD]"),
};
pad_encodings(&mut encodings, ¶ms).unwrap();
assert!(encodings.iter().all(|e| e.get_ids().len() == 8));
// Test batch
let mut encodings = get_encodings();
params.strategy = PaddingStrategy::BatchLongest;
params.pad_to_multiple_of = Some(6);
pad_encodings(&mut encodings, ¶ms).unwrap();
assert!(encodings.iter().all(|e| e.get_ids().len() == 6));
// Do not crash with 0
params.pad_to_multiple_of = Some(0);
pad_encodings(&mut encodings, ¶ms).unwrap();
}
}
| tokenizers/tokenizers/src/utils/padding.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/padding.rs",
"repo_id": "tokenizers",
"token_count": 2049
} |
It's super simple to translate from existing code! Just like the python library, we support the `pipeline` API. Pipelines group together a pretrained model with preprocessing of inputs and postprocessing of outputs, making it the easiest way to run models with the library.
<table>
<tr>
<th width="440px" align="center"><b>Python (original)</b></th>
<th width="440px" align="center"><b>Javascript (ours)</b></th>
</tr>
<tr>
<td>
```python
from transformers import pipeline
# Allocate a pipeline for sentiment-analysis
pipe = pipeline('sentiment-analysis')
out = pipe('I love transformers!')
# [{'label': 'POSITIVE', 'score': 0.999806941}]
```
</td>
<td>
```javascript
import { pipeline } from '@huggingface/transformers';
// Allocate a pipeline for sentiment-analysis
const pipe = await pipeline('sentiment-analysis');
const out = await pipe('I love transformers!');
// [{'label': 'POSITIVE', 'score': 0.999817686}]
```
</td>
</tr>
</table>
You can also use a different model by specifying the model id or path as the second argument to the `pipeline` function. For example:
```javascript
// Use a different model for sentiment-analysis
const pipe = await pipeline('sentiment-analysis', 'Xenova/bert-base-multilingual-uncased-sentiment');
```
By default, when running in the browser, the model will be run on your CPU (via WASM). If you would like
to run the model on your GPU (via WebGPU), you can do this by setting `device: 'webgpu'`, for example:
```javascript
// Run the model on WebGPU
const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', {
device: 'webgpu',
});
```
For more information, check out the [WebGPU guide](./guides/webgpu).
> [!WARNING]
> The WebGPU API is still experimental in many browsers, so if you run into any issues,
> please file a [bug report](https://github.com/huggingface/transformers.js/issues/new?title=%5BWebGPU%5D%20Error%20running%20MODEL_ID_GOES_HERE&assignees=&labels=bug,webgpu&projects=&template=1_bug-report.yml).
In resource-constrained environments, such as web browsers, it is advisable to use a quantized version of
the model to lower bandwidth and optimize performance. This can be achieved by adjusting the `dtype` option,
which allows you to select the appropriate data type for your model. While the available options may vary
depending on the specific model, typical choices include `"fp32"` (default for WebGPU), `"fp16"`, `"q8"`
(default for WASM), and `"q4"`. For more information, check out the [quantization guide](./guides/dtypes).
```javascript
// Run the model at 4-bit quantization
const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', {
dtype: 'q4',
});
```
| transformers.js/docs/snippets/1_quick-tour.snippet/0 | {
"file_path": "transformers.js/docs/snippets/1_quick-tour.snippet",
"repo_id": "transformers.js",
"token_count": 837
} |
import { env, pipeline } from '@xenova/transformers';
// Skip local model check since we are downloading the model from the Hugging Face Hub.
env.allowLocalModels = false;
class MyFeatureExtractionPipeline {
static task = 'feature-extraction';
static model = 'nomic-ai/nomic-embed-text-v1.5';
static instance = null;
static async getInstance(progress_callback = null) {
if (this.instance === null) {
this.instance = pipeline(this.task, this.model, {
quantized: true,
progress_callback,
});
}
return this.instance;
}
}
// https://huggingface.co/nomic-ai/nomic-embed-text-v1.5#usage
const SEARCH_PREFIX = 'search_query: ';
const DOCUMENT_PREFIX = 'search_document: ';
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Retrieve the pipeline. When called for the first time,
// this will load the pipeline and save it for future use.
const extractor = await MyFeatureExtractionPipeline.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
const { source, text } = event.data;
const split = [
SEARCH_PREFIX + source,
...text.trim().split('\n').map(x => DOCUMENT_PREFIX + x),
];
const embeddings = await extractor(split, { pooling: 'mean', normalize: true });
// Send the output back to the main thread
self.postMessage({ status: 'complete', embeddings: embeddings.tolist() });
});
| transformers.js/examples/adaptive-retrieval/src/worker.js/0 | {
"file_path": "transformers.js/examples/adaptive-retrieval/src/worker.js",
"repo_id": "transformers.js",
"token_count": 595
} |
import { SamModel, AutoProcessor, RawImage, Tensor } from '@huggingface/transformers';
// We adopt the singleton pattern to enable lazy-loading of the model and processor.
export class SegmentAnythingSingleton {
static model_id = 'Xenova/slimsam-77-uniform';
static model;
static processor;
static getInstance() {
this.model ??= SamModel.from_pretrained(this.model_id, {
dtype: 'fp16',
device: 'webgpu',
});
this.processor ??= AutoProcessor.from_pretrained(this.model_id);
return Promise.all([this.model, this.processor]);
}
}
// State variables
let imageEmbeddings = null;
let imageInputs = null;
let ready = false;
self.onmessage = async (e) => {
const [model, processor] = await SegmentAnythingSingleton.getInstance();
if (!ready) {
// Indicate that we are ready to accept requests
ready = true;
self.postMessage({
type: 'ready',
});
}
const { type, data } = e.data;
if (type === 'reset') {
imageInputs = null;
imageEmbeddings = null;
} else if (type === 'segment') {
// Indicate that we are starting to segment the image
self.postMessage({
type: 'segment_result',
data: 'start',
});
// Read the image and recompute image embeddings
const image = await RawImage.read(e.data.data);
imageInputs = await processor(image);
imageEmbeddings = await model.get_image_embeddings(imageInputs)
// Indicate that we have computed the image embeddings, and we are ready to accept decoding requests
self.postMessage({
type: 'segment_result',
data: 'done',
});
} else if (type === 'decode') {
// Prepare inputs for decoding
const reshaped = imageInputs.reshaped_input_sizes[0];
const points = data.map(x => [x.point[0] * reshaped[1], x.point[1] * reshaped[0]])
const labels = data.map(x => BigInt(x.label));
const input_points = new Tensor(
'float32',
points.flat(Infinity),
[1, 1, points.length, 2],
)
const input_labels = new Tensor(
'int64',
labels.flat(Infinity),
[1, 1, labels.length],
)
// Generate the mask
const { pred_masks, iou_scores } = await model({
...imageEmbeddings,
input_points,
input_labels,
})
// Post-process the mask
const masks = await processor.post_process_masks(
pred_masks,
imageInputs.original_sizes,
imageInputs.reshaped_input_sizes,
);
// Send the result back to the main thread
self.postMessage({
type: 'decode_result',
data: {
mask: RawImage.fromTensor(masks[0][0]),
scores: iou_scores.data,
},
});
} else {
throw new Error(`Unknown message type: ${type}`);
}
}
| transformers.js/examples/segment-anything-client/worker.js/0 | {
"file_path": "transformers.js/examples/segment-anything-client/worker.js",
"repo_id": "transformers.js",
"token_count": 1374
} |
# syntax=docker/dockerfile:1.4
# Adapted from https://github.com/vercel/next.js/blob/e60a1e747c3f521fc24dfd9ee2989e13afeb0a9b/examples/with-docker/Dockerfile
# For more information, see https://nextjs.org/docs/pages/building-your-application/deploying#docker-image
FROM node:18 AS base
# Install dependencies only when needed
FROM base AS deps
WORKDIR /app
# Install dependencies based on the preferred package manager
COPY --link package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./
RUN \
if [ -f yarn.lock ]; then yarn --frozen-lockfile; \
elif [ -f package-lock.json ]; then npm ci; \
elif [ -f pnpm-lock.yaml ]; then yarn global add pnpm && pnpm i --frozen-lockfile; \
else echo "Lockfile not found." && exit 1; \
fi
# Rebuild the source code only when needed
FROM base AS builder
WORKDIR /app
COPY --from=deps --link /app/node_modules ./node_modules
COPY --link . .
# Next.js collects completely anonymous telemetry data about general usage.
# Learn more here: https://nextjs.org/telemetry
# Uncomment the following line in case you want to disable telemetry during the build.
# ENV NEXT_TELEMETRY_DISABLED 1
RUN npm run build
# If using yarn comment out above and use below instead
# RUN yarn build
# Production image, copy all the files and run next
FROM base AS runner
WORKDIR /app
ENV NODE_ENV production
# Uncomment the following line in case you want to disable telemetry during runtime.
# ENV NEXT_TELEMETRY_DISABLED 1
RUN \
addgroup --system --gid 1001 nodejs; \
adduser --system --uid 1001 nextjs
COPY --from=builder --link /app/public ./public
# Automatically leverage output traces to reduce image size
# https://nextjs.org/docs/advanced-features/output-file-tracing
COPY --from=builder --link --chown=1001:1001 /app/.next/standalone ./
COPY --from=builder --link --chown=1001:1001 /app/.next/static ./.next/static
USER nextjs
EXPOSE 3000
ENV PORT 3000
ENV HOSTNAME 0.0.0.0
# Allow the running process to write model files to the cache folder.
# NOTE: In practice, you would probably want to pre-download the model files to avoid having to download them on-the-fly.
RUN mkdir -p /app/node_modules/@xenova/.cache/
RUN chmod 777 -R /app/node_modules/@xenova/
CMD ["node", "server.js"]
| transformers.js/examples/semantic-image-search/Dockerfile/0 | {
"file_path": "transformers.js/examples/semantic-image-search/Dockerfile",
"repo_id": "transformers.js",
"token_count": 743
} |
import './globals.css'
import { Inter } from 'next/font/google'
const inter = Inter({ subsets: ['latin'] })
export const metadata = {
title: 'Semantic Image Search',
description: 'Search for images using text (built w/ Transformers.js and Supabase)',
}
export default function RootLayout({ children }) {
return (
<html lang="en">
<body className={inter.className}>{children}</body>
</html>
)
}
| transformers.js/examples/semantic-image-search/src/app/layout.js/0 | {
"file_path": "transformers.js/examples/semantic-image-search/src/app/layout.js",
"repo_id": "transformers.js",
"token_count": 139
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Transformers.js | Real-time background removal</title>
</head>
<body>
<h1>
Real-time background removal w/
<a href="https://github.com/huggingface/transformers.js" target="_blank">🤗 Transformers.js</a>
</h1>
<h4>
Runs locally in your browser, powered by
<a href="https://huggingface.co/Xenova/modnet" target="_blank">MODNet</a>
</h4>
<div id="container">
<video id="video" autoplay muted playsinline></video>
<canvas id="canvas" width="360" height="240"></canvas>
<canvas id="output-canvas" width="360" height="240"></canvas>
</div>
<div id="controls">
<div title="Read frames from your webcam and process them at a lower size (lower = faster)">
<label>Stream scale</label>
(<label id="scale-value">0.5</label>)
<br>
<input id="scale" type="range" min="0.1" max="1" step="0.1" value="0.5" disabled>
</div>
<div title="The length of the shortest edge of the image (lower = faster)">
<label>Image size</label>
(<label id="size-value">256</label>)
<br>
<input id="size" type="range" min="64" max="512" step="32" value="256" disabled>
</div>
</div>
<label id="status"></label>
<script type="module" src="/main.js"></script>
</body>
</html> | transformers.js/examples/webgpu-video-background-removal/index.html/0 | {
"file_path": "transformers.js/examples/webgpu-video-background-removal/index.html",
"repo_id": "transformers.js",
"token_count": 553
} |
import {
AutoTokenizer,
AutoProcessor,
WhisperForConditionalGeneration,
TextStreamer,
full,
} from '@huggingface/transformers';
const MAX_NEW_TOKENS = 64;
/**
* This class uses the Singleton pattern to ensure that only one instance of the model is loaded.
*/
class AutomaticSpeechRecognitionPipeline {
static model_id = null;
static tokenizer = null;
static processor = null;
static model = null;
static async getInstance(progress_callback = null) {
this.model_id = 'onnx-community/whisper-base';
this.tokenizer ??= AutoTokenizer.from_pretrained(this.model_id, {
progress_callback,
});
this.processor ??= AutoProcessor.from_pretrained(this.model_id, {
progress_callback,
});
this.model ??= WhisperForConditionalGeneration.from_pretrained(this.model_id, {
dtype: {
encoder_model: 'fp32', // 'fp16' works too
decoder_model_merged: 'q4', // or 'fp32' ('fp16' is broken)
},
device: 'webgpu',
progress_callback,
});
return Promise.all([this.tokenizer, this.processor, this.model]);
}
}
let processing = false;
async function generate({ audio, language }) {
if (processing) return;
processing = true;
// Tell the main thread we are starting
self.postMessage({ status: 'start' });
// Retrieve the text-generation pipeline.
const [tokenizer, processor, model] = await AutomaticSpeechRecognitionPipeline.getInstance();
let startTime;
let numTokens = 0;
const callback_function = (output) => {
startTime ??= performance.now();
let tps;
if (numTokens++ > 0) {
tps = numTokens / (performance.now() - startTime) * 1000;
}
self.postMessage({
status: 'update',
output, tps, numTokens,
});
}
const streamer = new TextStreamer(tokenizer, {
skip_prompt: true,
skip_special_tokens: true,
callback_function,
});
const inputs = await processor(audio);
const outputs = await model.generate({
...inputs,
max_new_tokens: MAX_NEW_TOKENS,
language,
streamer,
});
const outputText = tokenizer.batch_decode(outputs, { skip_special_tokens: true });
// Send the output back to the main thread
self.postMessage({
status: 'complete',
output: outputText,
});
processing = false;
}
async function load() {
self.postMessage({
status: 'loading',
data: 'Loading model...'
});
// Load the pipeline and save it for future use.
const [tokenizer, processor, model] = await AutomaticSpeechRecognitionPipeline.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
self.postMessage({
status: 'loading',
data: 'Compiling shaders and warming up model...'
});
// Run model with dummy input to compile shaders
await model.generate({
input_features: full([1, 80, 3000], 0.0),
max_new_tokens: 1,
});
self.postMessage({ status: 'ready' });
}
// Listen for messages from the main thread
self.addEventListener('message', async (e) => {
const { type, data } = e.data;
switch (type) {
case 'load':
load();
break;
case 'generate':
generate(data);
break;
}
});
| transformers.js/examples/webgpu-whisper/src/worker.js/0 | {
"file_path": "transformers.js/examples/webgpu-whisper/src/worker.js",
"repo_id": "transformers.js",
"token_count": 1466
} |
import { pipeline } from '@xenova/transformers';
const PER_DEVICE_CONFIG = {
webgpu: {
dtype: {
encoder_model: 'fp32',
decoder_model_merged: 'q4',
},
device: 'webgpu',
},
wasm: {
dtype: 'q8',
device: 'wasm',
},
};
/**
* This class uses the Singleton pattern to ensure that only one instance of the model is loaded.
*/
class PipelineSingeton {
static model_id = 'onnx-community/whisper-base_timestamped';
static instance = null;
static async getInstance(progress_callback = null, device = 'webgpu') {
if (!this.instance) {
this.instance = pipeline('automatic-speech-recognition', this.model_id, {
...PER_DEVICE_CONFIG[device],
progress_callback,
});
}
return this.instance;
}
}
async function load({ device }) {
self.postMessage({
status: 'loading',
data: `Loading model (${device})...`
});
// Load the pipeline and save it for future use.
const transcriber = await PipelineSingeton.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
}, device);
if (device === 'webgpu') {
self.postMessage({
status: 'loading',
data: 'Compiling shaders and warming up model...'
});
await transcriber(new Float32Array(16_000), {
language: 'en',
});
}
self.postMessage({ status: 'ready' });
}
async function run({ audio, language }) {
const transcriber = await PipelineSingeton.getInstance();
// Read and preprocess image
const start = performance.now();
const result = await transcriber(audio, {
language,
return_timestamps: 'word',
chunk_length_s: 30,
});
const end = performance.now();
self.postMessage({ status: 'complete', result, time: end - start });
}
// Listen for messages from the main thread
self.addEventListener('message', async (e) => {
const { type, data } = e.data;
switch (type) {
case 'load':
load(data);
break;
case 'run':
run(data);
break;
}
});
| transformers.js/examples/whisper-word-timestamps/src/worker.js/0 | {
"file_path": "transformers.js/examples/whisper-word-timestamps/src/worker.js",
"repo_id": "transformers.js",
"token_count": 970
} |
/*
* For a detailed explanation regarding each configuration property, visit:
* https://jestjs.io/docs/configuration
*/
export default {
// All imported modules in your tests should be mocked automatically
// automock: false,
// Stop running tests after `n` failures
// bail: 0,
// Automatically clear mock calls, instances, contexts and results before every test
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
collectCoverage: true,
// An array of glob patterns indicating a set of files for which coverage information should be collected
// collectCoverageFrom: undefined,
// The directory where Jest should output its coverage files
coverageDirectory: "coverage",
// An array of regexp pattern strings used to skip coverage collection
coveragePathIgnorePatterns: [
"node_modules",
"tests",
],
// Indicates which provider should be used to instrument code for coverage
coverageProvider: "v8",
// A list of reporter names that Jest uses when writing coverage reports
// coverageReporters: [
// "json",
// "text",
// "lcov",
// "clover"
// ],
// An object that configures minimum threshold enforcement for coverage results
// coverageThreshold: undefined,
// A path to a custom dependency extractor
// dependencyExtractor: undefined,
// Make calling deprecated APIs throw helpful error messages
// errorOnDeprecated: false,
// The default configuration for fake timers
// fakeTimers: {
// "enableGlobally": false
// },
// Force coverage collection from ignored files using an array of glob patterns
// forceCoverageMatch: [],
// A path to a module which exports an async function that is triggered once before all test suites
// globalSetup: undefined,
// A path to a module which exports an async function that is triggered once after all test suites
// globalTeardown: undefined,
// A set of global variables that need to be available in all test environments
// globals: {},
// The maximum amount of workers used to run your tests. Can be specified as % or a number. E.g. maxWorkers: 10% will use 10% of your CPU amount + 1 as the maximum worker number. maxWorkers: 2 will use a maximum of 2 workers.
// maxWorkers: "50%",
// An array of directory names to be searched recursively up from the requiring module's location
// moduleDirectories: [
// "node_modules"
// ],
// An array of file extensions your modules use
// moduleFileExtensions: [
// "js",
// "mjs",
// "cjs",
// "jsx",
// "ts",
// "tsx",
// "json",
// "node"
// ],
// A map from regular expressions to module names or to arrays of module names that allow to stub out resources with a single module
// moduleNameMapper: {},
// An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader
// modulePathIgnorePatterns: [],
// Activates notifications for test results
// notify: false,
// An enum that specifies notification mode. Requires { notify: true }
// notifyMode: "failure-change",
// A preset that is used as a base for Jest's configuration
// preset: undefined,
// Run tests from one or more projects
// projects: undefined,
// Use this configuration option to add custom reporters to Jest
// reporters: undefined,
// Automatically reset mock state before every test
// resetMocks: false,
// Reset the module registry before running each individual test
// resetModules: false,
// A path to a custom resolver
// resolver: undefined,
// Automatically restore mock state and implementation before every test
// restoreMocks: false,
// The root directory that Jest should scan for tests and modules within
// rootDir: undefined,
// A list of paths to directories that Jest should use to search for files in
roots: ["./tests/"],
// Allows you to use a custom runner instead of Jest's default test runner
// runner: "jest-runner",
// The paths to modules that run some code to configure or set up the testing environment before each test
// setupFiles: [],
// A list of paths to modules that run some code to configure or set up the testing framework before each test
// setupFilesAfterEnv: [],
// The number of seconds after which a test is considered as slow and reported as such in the results.
// slowTestThreshold: 5,
// A list of paths to snapshot serializer modules Jest should use for snapshot testing
// snapshotSerializers: [],
// The test environment that will be used for testing
// testEnvironment: "jest-environment-node",
// Options that will be passed to the testEnvironment
// testEnvironmentOptions: {},
// Adds a location field to test results
// testLocationInResults: false,
// The glob patterns Jest uses to detect test files
// testMatch: [
// "**/__tests__/**/*.[jt]s?(x)",
// "**/?(*.)+(spec|test).[tj]s?(x)"
// ],
// An array of regexp pattern strings that are matched against all test paths, matched tests are skipped
// testPathIgnorePatterns: [
// "\\\\node_modules\\\\"
// ],
// The regexp pattern or array of patterns that Jest uses to detect test files
// testRegex: [],
// This option allows the use of a custom results processor
// testResultsProcessor: undefined,
// This option allows use of a custom test runner
// testRunner: "jest-circus/runner",
// A map from regular expressions to paths to transformers
transform: {},
// An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
// transformIgnorePatterns: [
// "\\\\node_modules\\\\",
// "\\.pnp\\.[^\\\\]+$"
// ],
// An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them
// unmockedModulePathPatterns: undefined,
// Indicates whether each individual test should be reported during the run
// verbose: undefined,
// An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode
// watchPathIgnorePatterns: [],
// Whether to use watchman for file crawling
// watchman: true,
};
| transformers.js/jest.config.mjs/0 | {
"file_path": "transformers.js/jest.config.mjs",
"repo_id": "transformers.js",
"token_count": 1658
} |
/**
* @file Handler file for choosing the correct version of ONNX Runtime, based on the environment.
* Ideally, we could import the `onnxruntime-web` and `onnxruntime-node` packages only when needed,
* but dynamic imports don't seem to work with the current webpack version and/or configuration.
* This is possibly due to the experimental nature of top-level await statements.
* So, we just import both packages, and use the appropriate one based on the environment:
* - When running in node, we use `onnxruntime-node`.
* - When running in the browser, we use `onnxruntime-web` (`onnxruntime-node` is not bundled).
*
* This module is not directly exported, but can be accessed through the environment variables:
* ```javascript
* import { env } from '@huggingface/transformers';
* console.log(env.backends.onnx);
* ```
*
* @module backends/onnx
*/
import { env, apis } from '../env.js';
// NOTE: Import order matters here. We need to import `onnxruntime-node` before `onnxruntime-web`.
// In either case, we select the default export if it exists, otherwise we use the named export.
import * as ONNX_NODE from 'onnxruntime-node';
import * as ONNX_WEB from 'onnxruntime-web';
export { Tensor } from 'onnxruntime-common';
/**
* @typedef {import('onnxruntime-common').InferenceSession.ExecutionProviderConfig} ONNXExecutionProviders
*/
/** @type {Record<import("../utils/devices.js").DeviceType, ONNXExecutionProviders>} */
const DEVICE_TO_EXECUTION_PROVIDER_MAPPING = Object.freeze({
auto: null, // Auto-detect based on device and environment
gpu: null, // Auto-detect GPU
cpu: 'cpu', // CPU
wasm: 'wasm', // WebAssembly
webgpu: 'webgpu', // WebGPU
cuda: 'cuda', // CUDA
dml: 'dml', // DirectML
webnn: { name: 'webnn', deviceType: 'cpu' }, // WebNN (default)
'webnn-npu': { name: 'webnn', deviceType: 'npu' }, // WebNN NPU
'webnn-gpu': { name: 'webnn', deviceType: 'gpu' }, // WebNN GPU
'webnn-cpu': { name: 'webnn', deviceType: 'cpu' }, // WebNN CPU
});
/**
* The list of supported devices, sorted by priority/performance.
* @type {import("../utils/devices.js").DeviceType[]}
*/
const supportedDevices = [];
/** @type {ONNXExecutionProviders[]} */
let defaultDevices;
let ONNX;
const ORT_SYMBOL = Symbol.for('onnxruntime');
if (ORT_SYMBOL in globalThis) {
// If the JS runtime exposes their own ONNX runtime, use it
ONNX = globalThis[ORT_SYMBOL];
} else if (apis.IS_NODE_ENV) {
ONNX = ONNX_NODE.default ?? ONNX_NODE;
// Updated as of ONNX Runtime 1.20.1
// The following table lists the supported versions of ONNX Runtime Node.js binding provided with pre-built binaries.
// | EPs/Platforms | Windows x64 | Windows arm64 | Linux x64 | Linux arm64 | MacOS x64 | MacOS arm64 |
// | ------------- | ----------- | ------------- | ----------------- | ----------- | --------- | ----------- |
// | CPU | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ |
// | DirectML | ✔️ | ✔️ | ❌ | ❌ | ❌ | ❌ |
// | CUDA | ❌ | ❌ | ✔️ (CUDA v11.8) | ❌ | ❌ | ❌ |
switch (process.platform) {
case 'win32': // Windows x64 and Windows arm64
supportedDevices.push('dml');
break;
case 'linux': // Linux x64 and Linux arm64
if (process.arch === 'x64') {
supportedDevices.push('cuda');
}
break;
case 'darwin': // MacOS x64 and MacOS arm64
break;
}
supportedDevices.push('cpu');
defaultDevices = ['cpu'];
} else {
ONNX = ONNX_WEB;
if (apis.IS_WEBNN_AVAILABLE) {
// TODO: Only push supported providers (depending on available hardware)
supportedDevices.push('webnn-npu', 'webnn-gpu', 'webnn-cpu', 'webnn');
}
if (apis.IS_WEBGPU_AVAILABLE) {
supportedDevices.push('webgpu');
}
supportedDevices.push('wasm');
defaultDevices = ['wasm'];
}
// @ts-ignore
const InferenceSession = ONNX.InferenceSession;
/**
* Map a device to the execution providers to use for the given device.
* @param {import("../utils/devices.js").DeviceType|"auto"|null} [device=null] (Optional) The device to run the inference on.
* @returns {ONNXExecutionProviders[]} The execution providers to use for the given device.
*/
export function deviceToExecutionProviders(device = null) {
// Use the default execution providers if the user hasn't specified anything
if (!device) return defaultDevices;
// Handle overloaded cases
switch (device) {
case "auto":
return supportedDevices;
case "gpu":
return supportedDevices.filter(x =>
["webgpu", "cuda", "dml", "webnn-gpu"].includes(x),
);
}
if (supportedDevices.includes(device)) {
return [DEVICE_TO_EXECUTION_PROVIDER_MAPPING[device] ?? device];
}
throw new Error(`Unsupported device: "${device}". Should be one of: ${supportedDevices.join(', ')}.`)
}
/**
* To prevent multiple calls to `initWasm()`, we store the first call in a Promise
* that is resolved when the first InferenceSession is created. Subsequent calls
* will wait for this Promise to resolve before creating their own InferenceSession.
* @type {Promise<any>|null}
*/
let wasmInitPromise = null;
/**
* Create an ONNX inference session.
* @param {Uint8Array} buffer The ONNX model buffer.
* @param {import('onnxruntime-common').InferenceSession.SessionOptions} session_options ONNX inference session options.
* @param {Object} session_config ONNX inference session configuration.
* @returns {Promise<import('onnxruntime-common').InferenceSession & { config: Object}>} The ONNX inference session.
*/
export async function createInferenceSession(buffer, session_options, session_config) {
if (wasmInitPromise) {
// A previous session has already initialized the WASM runtime
// so we wait for it to resolve before creating this new session.
await wasmInitPromise;
}
const sessionPromise = InferenceSession.create(buffer, session_options);
wasmInitPromise ??= sessionPromise;
const session = await sessionPromise;
session.config = session_config;
return session;
}
/**
* Check if an object is an ONNX tensor.
* @param {any} x The object to check
* @returns {boolean} Whether the object is an ONNX tensor.
*/
export function isONNXTensor(x) {
return x instanceof ONNX.Tensor;
}
/** @type {import('onnxruntime-common').Env} */
// @ts-ignore
const ONNX_ENV = ONNX?.env;
if (ONNX_ENV?.wasm) {
// Initialize wasm backend with suitable default settings.
// (Optional) Set path to wasm files. This is needed when running in a web worker.
// https://onnxruntime.ai/docs/api/js/interfaces/Env.WebAssemblyFlags.html#wasmPaths
// We use remote wasm files by default to make it easier for newer users.
// In practice, users should probably self-host the necessary .wasm files.
ONNX_ENV.wasm.wasmPaths = `https://cdn.jsdelivr.net/npm/@huggingface/transformers@${env.version}/dist/`;
// TODO: Add support for loading WASM files from cached buffer when we upgrade to [email protected]
// https://github.com/microsoft/onnxruntime/pull/21534
// Users may wish to proxy the WASM backend to prevent the UI from freezing,
// However, this is not necessary when using WebGPU, so we default to false.
ONNX_ENV.wasm.proxy = false;
// https://developer.mozilla.org/en-US/docs/Web/API/crossOriginIsolated
if (typeof crossOriginIsolated === 'undefined' || !crossOriginIsolated) {
ONNX_ENV.wasm.numThreads = 1;
}
}
if (ONNX_ENV?.webgpu) {
ONNX_ENV.webgpu.powerPreference = 'high-performance';
}
/**
* Check if ONNX's WASM backend is being proxied.
* @returns {boolean} Whether ONNX's WASM backend is being proxied.
*/
export function isONNXProxy() {
// TODO: Update this when allowing non-WASM backends.
return ONNX_ENV?.wasm?.proxy;
}
// Expose ONNX environment variables to `env.backends.onnx`
env.backends.onnx = ONNX_ENV;
| transformers.js/src/backends/onnx.js/0 | {
"file_path": "transformers.js/src/backends/onnx.js",
"repo_id": "transformers.js",
"token_count": 3082
} |
import { IMAGE_PROCESSOR_NAME } from '../../utils/constants.js';
import { getModelJSON } from '../../utils/hub.js';
import { Processor } from '../../base/processing_utils.js';
import * as AllProcessors from '../processors.js';
import * as AllImageProcessors from '../image_processors.js';
import * as AllFeatureExtractors from '../feature_extractors.js';
/**
* Helper class which is used to instantiate pretrained processors with the `from_pretrained` function.
* The chosen processor class is determined by the type specified in the processor config.
*
* **Example:** Load a processor using `from_pretrained`.
* ```javascript
* let processor = await AutoProcessor.from_pretrained('openai/whisper-tiny.en');
* ```
*
* **Example:** Run an image through a processor.
* ```javascript
* let processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16');
* let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');
* let image_inputs = await processor(image);
* // {
* // "pixel_values": {
* // "dims": [ 1, 3, 224, 224 ],
* // "type": "float32",
* // "data": Float32Array [ -1.558687686920166, -1.558687686920166, -1.5440893173217773, ... ],
* // "size": 150528
* // },
* // "original_sizes": [
* // [ 533, 800 ]
* // ],
* // "reshaped_input_sizes": [
* // [ 224, 224 ]
* // ]
* // }
* ```
*/
export class AutoProcessor {
/** @type {typeof Processor.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options={}) {
// TODO: first check for processor.json
const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME, true, options);
const { image_processor_type, feature_extractor_type, processor_class } = preprocessorConfig;
if (processor_class && AllProcessors[processor_class]) {
return AllProcessors[processor_class].from_pretrained(pretrained_model_name_or_path, options);
}
if (!image_processor_type && !feature_extractor_type) {
throw new Error('No `image_processor_type` or `feature_extractor_type` found in the config.');
}
const components = {};
if (image_processor_type) {
const image_processor_class = AllImageProcessors[image_processor_type];
if (!image_processor_class) {
throw new Error(`Unknown image_processor_type: '${image_processor_type}'.`);
}
components.image_processor = new image_processor_class(preprocessorConfig);
}
if (feature_extractor_type) {
const image_processor_class = AllImageProcessors[feature_extractor_type];
if (image_processor_class) {
// Handle legacy case where image processors were specified as feature extractors
components.image_processor = new image_processor_class(preprocessorConfig);
} else {
const feature_extractor_class = AllFeatureExtractors[feature_extractor_type];
if (!feature_extractor_class) {
throw new Error(`Unknown feature_extractor_type: '${feature_extractor_type}'.`);
}
components.feature_extractor = new feature_extractor_class(preprocessorConfig);
}
}
const config = {};
return new Processor(config, components);
}
}
| transformers.js/src/models/auto/processing_auto.js/0 | {
"file_path": "transformers.js/src/models/auto/processing_auto.js",
"repo_id": "transformers.js",
"token_count": 1334
} |
import { Processor } from "../../base/processing_utils.js";
import { AutoImageProcessor } from "../auto/image_processing_auto.js";
import { AutoTokenizer } from "../../tokenizers.js";
import { center_to_corners_format } from "../../base/image_processors_utils.js";
/**
* Get token ids of phrases from posmaps and input_ids.
* @param {import('../../utils/tensor.js').Tensor} posmaps A boolean tensor of unbatched text-thresholded logits related to the detected bounding boxes of shape `(hidden_size, )`.
* @param {import('../../utils/tensor.js').Tensor} input_ids A tensor of token ids of shape `(sequence_length, )`.
*/
function get_phrases_from_posmap(posmaps, input_ids) {
const left_idx = 0;
const right_idx = posmaps.dims.at(-1) - 1;
const posmaps_list = posmaps.tolist();
posmaps_list.fill(false, 0, left_idx + 1);
posmaps_list.fill(false, right_idx);
const input_ids_list = input_ids.tolist();
return posmaps_list
.map((val, idx) => val ? idx : null)
.filter(idx => idx !== null)
.map(i => input_ids_list[i]);
}
export class GroundingDinoProcessor extends Processor {
static tokenizer_class = AutoTokenizer
static image_processor_class = AutoImageProcessor
/**
* @typedef {import('../../utils/image.js').RawImage} RawImage
*/
/**
*
* @param {RawImage|RawImage[]|RawImage[][]} images
* @param {string|string[]} text
* @returns {Promise<any>}
*/
async _call(images, text, options = {}) {
const image_inputs = images ? await this.image_processor(images, options) : {};
const text_inputs = text ? this.tokenizer(text, options) : {};
return {
...text_inputs,
...image_inputs,
}
}
post_process_grounded_object_detection(outputs, input_ids, {
box_threshold = 0.25,
text_threshold = 0.25,
target_sizes = null
} = {}) {
const { logits, pred_boxes } = outputs;
const batch_size = logits.dims[0];
if (target_sizes !== null && target_sizes.length !== batch_size) {
throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits")
}
const num_queries = logits.dims.at(1);
const probs = logits.sigmoid(); // (batch_size, num_queries, 256)
const scores = probs.max(-1).tolist(); // (batch_size, num_queries)
// Convert to [x0, y0, x1, y1] format
const boxes = pred_boxes.tolist() // (batch_size, num_queries, 4)
.map(batch => batch.map(box => center_to_corners_format(box)));
const results = [];
for (let i = 0; i < batch_size; ++i) {
const target_size = target_sizes !== null ? target_sizes[i] : null;
// Convert from relative [0, 1] to absolute [0, height] coordinates
if (target_size !== null) {
boxes[i] = boxes[i].map(box => box.map((x, j) => x * target_size[(j + 1) % 2]));
}
const batch_scores = scores[i];
const final_scores = [];
const final_phrases = [];
const final_boxes = [];
for (let j = 0; j < num_queries; ++j) {
const score = batch_scores[j];
if (score <= box_threshold) {
continue;
}
const box = boxes[i][j];
const prob = probs[i][j];
final_scores.push(score);
final_boxes.push(box);
const phrases = get_phrases_from_posmap(prob.gt(text_threshold), input_ids[i]);
final_phrases.push(phrases);
}
results.push({ scores: final_scores, boxes: final_boxes, labels: this.batch_decode(final_phrases) });
}
return results;
}
}
| transformers.js/src/models/grounding_dino/processing_grounding_dino.js/0 | {
"file_path": "transformers.js/src/models/grounding_dino/processing_grounding_dino.js",
"repo_id": "transformers.js",
"token_count": 1714
} |
import {
ImageProcessor,
post_process_object_detection,
} from "../../base/image_processors_utils.js";
export class RTDetrImageProcessor extends ImageProcessor {
/** @type {typeof post_process_object_detection} */
post_process_object_detection(...args) {
return post_process_object_detection(...args);
}
}
| transformers.js/src/models/rt_detr/image_processing_rt_detr.js/0 | {
"file_path": "transformers.js/src/models/rt_detr/image_processing_rt_detr.js",
"repo_id": "transformers.js",
"token_count": 122
} |
import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js';
import { Tensor } from '../../utils/tensor.js';
import { mel_filter_bank, spectrogram, window_function } from '../../utils/audio.js';
export class WeSpeakerFeatureExtractor extends FeatureExtractor {
constructor(config) {
super(config);
const sampling_rate = this.config.sampling_rate;
const mel_filters = mel_filter_bank(
256, // num_frequency_bins
this.config.num_mel_bins, // num_mel_filters
20, // min_frequency
Math.floor(sampling_rate / 2), // max_frequency
sampling_rate, // sampling_rate
null, // norm
"kaldi", // mel_scale
true, // triangularize_in_mel_space
);
// Do padding:
for (let i = 0; i < mel_filters.length; ++i) {
mel_filters[i].push(0);
}
this.mel_filters = mel_filters;
this.window = window_function(400, 'hamming', {
periodic: false,
})
this.min_num_frames = this.config.min_num_frames;
}
/**
* Computes the log-Mel spectrogram of the provided audio waveform.
* @param {Float32Array|Float64Array} waveform The audio waveform to process.
* @returns {Promise<Tensor>} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers.
*/
async _extract_fbank_features(waveform) {
// Kaldi compliance: 16-bit signed integers
// 32768 == 2 ** 15
waveform = waveform.map((/** @type {number} */ x) => x * 32768)
return spectrogram(
waveform,
this.window, // window
400, // frame_length
160, // hop_length
{
fft_length: 512,
power: 2.0,
center: false,
preemphasis: 0.97,
mel_filters: this.mel_filters,
log_mel: 'log',
mel_floor: 1.192092955078125e-07,
remove_dc_offset: true,
// Custom
transpose: true,
min_num_frames: this.min_num_frames,
}
)
}
/**
* Asynchronously extracts features from a given audio using the provided configuration.
* @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array.
* @returns {Promise<{ input_features: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor.
*/
async _call(audio) {
validate_audio_inputs(audio, 'WeSpeakerFeatureExtractor');
const features = (await this._extract_fbank_features(audio)).unsqueeze_(0);
if (this.config.fbank_centering_span === null) {
// center features with global average
const meanData = /** @type {Float32Array} */ (features.mean(1).data);
const featuresData = /** @type {Float32Array} */(features.data);
const [batch_size, num_frames, feature_size] = features.dims;
for (let i = 0; i < batch_size; ++i) {
const offset1 = i * num_frames * feature_size;
const offset2 = i * feature_size;
for (let j = 0; j < num_frames; ++j) {
const offset3 = offset1 + j * feature_size;
for (let k = 0; k < feature_size; ++k) {
featuresData[offset3 + k] -= meanData[offset2 + k];
}
}
}
}
return {
input_features: features
};
}
}
| transformers.js/src/models/wespeaker/feature_extraction_wespeaker.js/0 | {
"file_path": "transformers.js/src/models/wespeaker/feature_extraction_wespeaker.js",
"repo_id": "transformers.js",
"token_count": 1713
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.