text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
//! Recurrent Neural Networks
use candle::{DType, Device, IndexOp, Result, Tensor};
/// Trait for Recurrent Neural Networks.
#[allow(clippy::upper_case_acronyms)]
pub trait RNN {
type State: Clone;
/// A zero state from which the recurrent network is usually initialized.
fn zero_state(&self, batch_dim: usize) -> Result<Self::State>;
/// Applies a single step of the recurrent network.
///
/// The input should have dimensions [batch_size, features].
fn step(&self, input: &Tensor, state: &Self::State) -> Result<Self::State>;
/// Applies multiple steps of the recurrent network.
///
/// The input should have dimensions [batch_size, seq_len, features].
/// The initial state is the result of applying zero_state.
fn seq(&self, input: &Tensor) -> Result<Vec<Self::State>> {
let batch_dim = input.dim(0)?;
let state = self.zero_state(batch_dim)?;
self.seq_init(input, &state)
}
/// Applies multiple steps of the recurrent network.
///
/// The input should have dimensions [batch_size, seq_len, features].
fn seq_init(&self, input: &Tensor, init_state: &Self::State) -> Result<Vec<Self::State>> {
let (_b_size, seq_len, _features) = input.dims3()?;
let mut output = Vec::with_capacity(seq_len);
for seq_index in 0..seq_len {
let input = input.i((.., seq_index, ..))?.contiguous()?;
let state = if seq_index == 0 {
self.step(&input, init_state)?
} else {
self.step(&input, &output[seq_index - 1])?
};
output.push(state);
}
Ok(output)
}
/// Converts a sequence of state to a tensor.
fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor>;
}
/// The state for a LSTM network, this contains two tensors.
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone)]
pub struct LSTMState {
pub h: Tensor,
pub c: Tensor,
}
impl LSTMState {
pub fn new(h: Tensor, c: Tensor) -> Self {
LSTMState { h, c }
}
/// The hidden state vector, which is also the output of the LSTM.
pub fn h(&self) -> &Tensor {
&self.h
}
/// The cell state vector.
pub fn c(&self) -> &Tensor {
&self.c
}
}
#[derive(Debug, Clone, Copy)]
pub enum Direction {
Forward,
Backward,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone, Copy)]
pub struct LSTMConfig {
pub w_ih_init: super::Init,
pub w_hh_init: super::Init,
pub b_ih_init: Option<super::Init>,
pub b_hh_init: Option<super::Init>,
pub layer_idx: usize,
pub direction: Direction,
}
impl Default for LSTMConfig {
fn default() -> Self {
Self {
w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM,
w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM,
b_ih_init: Some(super::Init::Const(0.)),
b_hh_init: Some(super::Init::Const(0.)),
layer_idx: 0,
direction: Direction::Forward,
}
}
}
impl LSTMConfig {
pub fn default_no_bias() -> Self {
Self {
w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM,
w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM,
b_ih_init: None,
b_hh_init: None,
layer_idx: 0,
direction: Direction::Forward,
}
}
}
/// A Long Short-Term Memory (LSTM) layer.
///
/// <https://en.wikipedia.org/wiki/Long_short-term_memory>
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub struct LSTM {
w_ih: Tensor,
w_hh: Tensor,
b_ih: Option<Tensor>,
b_hh: Option<Tensor>,
hidden_dim: usize,
config: LSTMConfig,
device: Device,
dtype: DType,
}
impl LSTM {
/// Creates a LSTM layer.
pub fn new(
in_dim: usize,
hidden_dim: usize,
config: LSTMConfig,
vb: crate::VarBuilder,
) -> Result<Self> {
let layer_idx = config.layer_idx;
let direction_str = match config.direction {
Direction::Forward => "",
Direction::Backward => "_reverse",
};
let w_ih = vb.get_with_hints(
(4 * hidden_dim, in_dim),
&format!("weight_ih_l{layer_idx}{direction_str}"), // Only a single layer is supported.
config.w_ih_init,
)?;
let w_hh = vb.get_with_hints(
(4 * hidden_dim, hidden_dim),
&format!("weight_hh_l{layer_idx}{direction_str}"), // Only a single layer is supported.
config.w_hh_init,
)?;
let b_ih = match config.b_ih_init {
Some(init) => Some(vb.get_with_hints(
4 * hidden_dim,
&format!("bias_ih_l{layer_idx}{direction_str}"),
init,
)?),
None => None,
};
let b_hh = match config.b_hh_init {
Some(init) => Some(vb.get_with_hints(
4 * hidden_dim,
&format!("bias_hh_l{layer_idx}{direction_str}"),
init,
)?),
None => None,
};
Ok(Self {
w_ih,
w_hh,
b_ih,
b_hh,
hidden_dim,
config,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
pub fn config(&self) -> &LSTMConfig {
&self.config
}
}
/// Creates a LSTM layer.
pub fn lstm(
in_dim: usize,
hidden_dim: usize,
config: LSTMConfig,
vb: crate::VarBuilder,
) -> Result<LSTM> {
LSTM::new(in_dim, hidden_dim, config, vb)
}
impl RNN for LSTM {
type State = LSTMState;
fn zero_state(&self, batch_dim: usize) -> Result<Self::State> {
let zeros =
Tensor::zeros((batch_dim, self.hidden_dim), self.dtype, &self.device)?.contiguous()?;
Ok(Self::State {
h: zeros.clone(),
c: zeros.clone(),
})
}
fn step(&self, input: &Tensor, in_state: &Self::State) -> Result<Self::State> {
let w_ih = input.matmul(&self.w_ih.t()?)?;
let w_hh = in_state.h.matmul(&self.w_hh.t()?)?;
let w_ih = match &self.b_ih {
None => w_ih,
Some(b_ih) => w_ih.broadcast_add(b_ih)?,
};
let w_hh = match &self.b_hh {
None => w_hh,
Some(b_hh) => w_hh.broadcast_add(b_hh)?,
};
let chunks = (&w_ih + &w_hh)?.chunk(4, 1)?;
let in_gate = crate::ops::sigmoid(&chunks[0])?;
let forget_gate = crate::ops::sigmoid(&chunks[1])?;
let cell_gate = chunks[2].tanh()?;
let out_gate = crate::ops::sigmoid(&chunks[3])?;
let next_c = ((forget_gate * &in_state.c)? + (in_gate * cell_gate)?)?;
let next_h = (out_gate * next_c.tanh()?)?;
Ok(LSTMState {
c: next_c,
h: next_h,
})
}
fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor> {
let states = states.iter().map(|s| s.h.clone()).collect::<Vec<_>>();
Tensor::stack(&states, 1)
}
}
/// The state for a GRU network, this contains a single tensor.
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone)]
pub struct GRUState {
pub h: Tensor,
}
impl GRUState {
/// The hidden state vector, which is also the output of the LSTM.
pub fn h(&self) -> &Tensor {
&self.h
}
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone, Copy)]
pub struct GRUConfig {
pub w_ih_init: super::Init,
pub w_hh_init: super::Init,
pub b_ih_init: Option<super::Init>,
pub b_hh_init: Option<super::Init>,
}
impl Default for GRUConfig {
fn default() -> Self {
Self {
w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM,
w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM,
b_ih_init: Some(super::Init::Const(0.)),
b_hh_init: Some(super::Init::Const(0.)),
}
}
}
impl GRUConfig {
pub fn default_no_bias() -> Self {
Self {
w_ih_init: super::init::DEFAULT_KAIMING_UNIFORM,
w_hh_init: super::init::DEFAULT_KAIMING_UNIFORM,
b_ih_init: None,
b_hh_init: None,
}
}
}
/// A Gated Recurrent Unit (GRU) layer.
///
/// <https://en.wikipedia.org/wiki/Gated_recurrent_unit>
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub struct GRU {
w_ih: Tensor,
w_hh: Tensor,
b_ih: Option<Tensor>,
b_hh: Option<Tensor>,
hidden_dim: usize,
config: GRUConfig,
device: Device,
dtype: DType,
}
impl GRU {
/// Creates a GRU layer.
pub fn new(
in_dim: usize,
hidden_dim: usize,
config: GRUConfig,
vb: crate::VarBuilder,
) -> Result<Self> {
let w_ih = vb.get_with_hints(
(3 * hidden_dim, in_dim),
"weight_ih_l0", // Only a single layer is supported.
config.w_ih_init,
)?;
let w_hh = vb.get_with_hints(
(3 * hidden_dim, hidden_dim),
"weight_hh_l0", // Only a single layer is supported.
config.w_hh_init,
)?;
let b_ih = match config.b_ih_init {
Some(init) => Some(vb.get_with_hints(3 * hidden_dim, "bias_ih_l0", init)?),
None => None,
};
let b_hh = match config.b_hh_init {
Some(init) => Some(vb.get_with_hints(3 * hidden_dim, "bias_hh_l0", init)?),
None => None,
};
Ok(Self {
w_ih,
w_hh,
b_ih,
b_hh,
hidden_dim,
config,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
pub fn config(&self) -> &GRUConfig {
&self.config
}
}
pub fn gru(
in_dim: usize,
hidden_dim: usize,
config: GRUConfig,
vb: crate::VarBuilder,
) -> Result<GRU> {
GRU::new(in_dim, hidden_dim, config, vb)
}
impl RNN for GRU {
type State = GRUState;
fn zero_state(&self, batch_dim: usize) -> Result<Self::State> {
let h =
Tensor::zeros((batch_dim, self.hidden_dim), self.dtype, &self.device)?.contiguous()?;
Ok(Self::State { h })
}
fn step(&self, input: &Tensor, in_state: &Self::State) -> Result<Self::State> {
let w_ih = input.matmul(&self.w_ih.t()?)?;
let w_hh = in_state.h.matmul(&self.w_hh.t()?)?;
let w_ih = match &self.b_ih {
None => w_ih,
Some(b_ih) => w_ih.broadcast_add(b_ih)?,
};
let w_hh = match &self.b_hh {
None => w_hh,
Some(b_hh) => w_hh.broadcast_add(b_hh)?,
};
let chunks_ih = w_ih.chunk(3, 1)?;
let chunks_hh = w_hh.chunk(3, 1)?;
let r_gate = crate::ops::sigmoid(&(&chunks_ih[0] + &chunks_hh[0])?)?;
let z_gate = crate::ops::sigmoid(&(&chunks_ih[1] + &chunks_hh[1])?)?;
let n_gate = (&chunks_ih[2] + (r_gate * &chunks_hh[2])?)?.tanh();
let next_h = ((&z_gate * &in_state.h)? - ((&z_gate - 1.)? * n_gate)?)?;
Ok(GRUState { h: next_h })
}
fn states_to_tensor(&self, states: &[Self::State]) -> Result<Tensor> {
let states = states.iter().map(|s| s.h.clone()).collect::<Vec<_>>();
Tensor::cat(&states, 1)
}
}
| candle/candle-nn/src/rnn.rs/0 | {
"file_path": "candle/candle-nn/src/rnn.rs",
"repo_id": "candle",
"token_count": 5697
} |
# candle-onnx
This crate adds ONNX support to candle
## FAQ
#### Missing protoc installation when compiling candle-onnx
The candle-onnx dependency prost-build no longer comes bundled with prost
binaries. This could cause the following error when attempting to compile
candle-onnx:
```
error: failed to run custom build command for `candle-onnx`
Caused by: // (...)
Could not find `protoc` installation and this build crate cannot proceed without this knowledge.
```
To fix this issue install protoc on your system and make it available in your
system `PATH`. See the [protoc
documentation](https://grpc.io/docs/protoc-installation/) for more information.
| candle/candle-onnx/README.md/0 | {
"file_path": "candle/candle-onnx/README.md",
"repo_id": "candle",
"token_count": 180
} |
# Generated content DO NOT EDIT
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence
from os import PathLike
from candle.typing import _ArrayLike, Device, Scalar, Index, Shape
from candle import Tensor, DType, QTensor
@staticmethod
def avg_pool2d(tensor: Tensor, ksize: int, stride: int = 1) -> Tensor:
"""
Applies the 2d avg-pool function to a given tensor.#
"""
pass
@staticmethod
def gelu(tensor: Tensor) -> Tensor:
"""
Applies the Gaussian Error Linear Unit (GELU) function to a given tensor.
"""
pass
@staticmethod
def max_pool2d(tensor: Tensor, ksize: int, stride: int = 1) -> Tensor:
"""
Applies the 2d max-pool function to a given tensor.#
"""
pass
@staticmethod
def relu(tensor: Tensor) -> Tensor:
"""
Applies the Rectified Linear Unit (ReLU) function to a given tensor.
"""
pass
@staticmethod
def silu(tensor: Tensor) -> Tensor:
"""
Applies the Sigmoid Linear Unit (SiLU) function to a given tensor.
"""
pass
@staticmethod
def softmax(tensor: Tensor, dim: int) -> Tensor:
"""
Applies the Softmax function to a given tensor.#
"""
pass
@staticmethod
def tanh(tensor: Tensor) -> Tensor:
"""
Applies the tanh function to a given tensor.
"""
pass
| candle/candle-pyo3/py_src/candle/functional/__init__.pyi/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/functional/__init__.pyi",
"repo_id": "candle",
"token_count": 484
} |
[project]
name = 'candle-nn'
requires-python = '>=3.7'
authors = [
{name = 'The Candle Team'},
]
dynamic = [
'description',
'license',
'readme',
]
[project.urls]
Homepage = 'https://github.com/huggingface/candle'
Source = 'https://github.com/huggingface/candle'
[build-system]
requires = ["maturin>=1.0,<2.0"]
build-backend = "maturin"
[tool.maturin]
python-source = "py_src"
module-name = "candle.candle"
bindings = 'pyo3'
features = ["pyo3/extension-module"]
[tool.black]
line-length = 119
target-version = ['py35']
[project.optional-dependencies]
testing = ["pytest", "black==22.3"]
huggingface = ["transformers>=4.33.3", "huggingface-hub>=0.17.3"] | candle/candle-pyo3/pyproject.toml/0 | {
"file_path": "candle/candle-pyo3/pyproject.toml",
"repo_id": "candle",
"token_count": 285
} |
[package]
name = "candle-transformers"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
readme = "README.md"
[dependencies]
accelerate-src = { workspace = true, optional = true }
byteorder = { workspace = true }
candle = { workspace = true }
candle-flash-attn = { workspace = true, optional = true }
candle-nn = { workspace = true }
fancy-regex = { workspace = true }
intel-mkl-src = { workspace = true, optional = true }
num-traits = { workspace = true }
rand = { workspace = true }
rayon = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_plain = { workspace = true }
tracing = { workspace = true }
[features]
default = []
accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate"]
cuda = ["candle/cuda", "candle-nn/cuda"]
flash-attn = ["cuda", "dep:candle-flash-attn"]
mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"]
metal = ["candle/metal", "candle-nn/metal"]
| candle/candle-transformers/Cargo.toml/0 | {
"file_path": "candle/candle-transformers/Cargo.toml",
"repo_id": "candle",
"token_count": 372
} |
//! Contrastive Language-Image Pre-Training
//!
//! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! https://github.com/openai/CLIP
//! https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip
use candle::{Context, IndexOp, Result, Shape, Tensor, D};
use candle_nn as nn;
use candle_nn::Module;
use nn::Conv2dConfig;
use super::{
text_model::{Activation, ClipEncoder},
EncoderConfig,
};
#[derive(Debug, Clone)]
pub struct ClipVisionConfig {
pub embed_dim: usize,
pub activation: Activation,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
#[allow(dead_code)]
pub projection_dim: usize,
pub num_channels: usize,
pub image_size: usize,
pub patch_size: usize,
}
impl ClipVisionConfig {
// The config details can be found in the "vision_config" section of this json file:
// https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json
pub fn vit_base_patch32() -> Self {
Self {
embed_dim: 768,
activation: Activation::QuickGelu,
intermediate_size: 3072,
num_hidden_layers: 12,
num_attention_heads: 12,
projection_dim: 512,
num_channels: 3,
image_size: 224,
patch_size: 32,
}
}
pub fn clip_vit_large_patch14_336() -> Self {
Self {
embed_dim: 1024,
activation: Activation::QuickGelu,
intermediate_size: 4096,
num_hidden_layers: 24,
num_attention_heads: 16,
projection_dim: 768,
num_channels: 3,
image_size: 336,
patch_size: 14,
}
}
}
// https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L112
#[derive(Clone, Debug)]
struct ClipVisionEmbeddings {
patch_embedding: candle_nn::Conv2d,
position_ids: Tensor,
class_embedding: Tensor,
position_embedding: candle_nn::Embedding,
}
impl ClipVisionEmbeddings {
fn new(vs: candle_nn::VarBuilder, c: &ClipVisionConfig) -> Result<Self> {
// originally nn.Parameter
let class_embedding = if vs.contains_tensor("class_embedding") {
vs.get(c.embed_dim, "class_embedding")?
} else {
Tensor::randn(0f32, 1f32, c.embed_dim, vs.device())?
};
let num_patches = (c.image_size / c.patch_size).pow(2);
let num_positions = num_patches + 1;
let position_ids = Tensor::arange(0, num_positions as i64, vs.device())?;
let conv2dconfig = Conv2dConfig {
stride: c.patch_size,
..Default::default()
};
let position_embedding =
candle_nn::embedding(num_positions, c.embed_dim, vs.pp("position_embedding"))?;
let patch_embedding = candle_nn::conv2d_no_bias(
c.num_channels,
c.embed_dim,
c.patch_size,
conv2dconfig,
vs.pp("patch_embedding"),
)?;
Ok(Self {
patch_embedding,
position_ids,
class_embedding,
position_embedding,
})
}
}
impl Module for ClipVisionEmbeddings {
fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> {
let batch_size = pixel_values.shape().dims();
let patch_embeds = self
.patch_embedding
.forward(pixel_values)?
.flatten_from(2)?
.transpose(1, 2)?;
let shape = Shape::from((batch_size[0], 1, self.class_embedding.dim(D::Minus1)?));
let class_embeds = self.class_embedding.expand(shape)?;
let embeddings = Tensor::cat(&[class_embeds, patch_embeds], 1)?;
let position_embedding = self.position_embedding.forward(&self.position_ids)?;
embeddings.broadcast_add(&position_embedding)
}
}
// https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L743
#[derive(Clone, Debug)]
pub struct ClipVisionTransformer {
embeddings: ClipVisionEmbeddings,
encoder: ClipEncoder,
pre_layer_norm: candle_nn::LayerNorm,
final_layer_norm: candle_nn::LayerNorm,
}
impl ClipVisionTransformer {
pub fn new(vs: candle_nn::VarBuilder, c: &ClipVisionConfig) -> Result<Self> {
let embeddings = ClipVisionEmbeddings::new(vs.pp("embeddings"), c)?;
let pre_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("pre_layrnorm"))?;
let encoder = ClipEncoder::new(vs.pp("encoder"), &EncoderConfig::Vision(c.clone()))?;
let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("post_layernorm"))?;
Ok(Self {
embeddings,
encoder,
final_layer_norm,
pre_layer_norm,
})
}
// required by LLaVA
pub fn output_hidden_states(&self, pixel_values: &Tensor) -> Result<Vec<Tensor>> {
let hidden_states = pixel_values
.apply(&self.embeddings)?
.apply(&self.pre_layer_norm)?;
let mut result = self.encoder.output_hidden_states(&hidden_states, None)?;
let encoder_outputs = result.last().context("no last")?;
let pooled_output = encoder_outputs.i((.., 0, ..))?;
result.push(self.final_layer_norm.forward(&pooled_output)?.clone());
Ok(result)
}
}
impl Module for ClipVisionTransformer {
fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> {
let hidden_states = pixel_values
.apply(&self.embeddings)?
.apply(&self.pre_layer_norm)?;
let encoder_outputs = self.encoder.forward(&hidden_states, None)?;
// https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L787
// pooled_output = encoder_outputs[:, 0, :]
let pooled_output = encoder_outputs.i((.., 0, ..))?;
self.final_layer_norm.forward(&pooled_output)
}
}
| candle/candle-transformers/src/models/clip/vision_model.rs/0 | {
"file_path": "candle/candle-transformers/src/models/clip/vision_model.rs",
"repo_id": "candle",
"token_count": 2837
} |
//! # FastViT inference implementation based on timm
//!
//! ## Description
//! See ["FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization"](https://arxiv.org/pdf/2303.14189)
//!
//! Implementation based on [timm model](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/fastvit.py)
use candle::{Context, DType, Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d, conv2d_no_bias, linear, linear_no_bias, ops::sigmoid, ops::softmax,
BatchNorm, Conv2d, Conv2dConfig, Func, VarBuilder,
};
#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)]
pub struct Config {
pub exp_ratio: usize,
pub in_channels: usize,
pub blocks: [usize; 4],
pub attn: bool,
pub lkc_use_act: bool,
}
impl Config {
pub fn t8() -> Self {
Self {
exp_ratio: 3,
in_channels: 48,
blocks: [2, 2, 4, 2],
attn: false,
lkc_use_act: false,
}
}
pub fn t12() -> Self {
Self {
exp_ratio: 3,
in_channels: 64,
blocks: [2, 2, 6, 2],
attn: false,
lkc_use_act: false,
}
}
pub fn s12() -> Self {
Self {
exp_ratio: 4,
in_channels: 64,
blocks: [2, 2, 6, 2],
attn: false,
lkc_use_act: false,
}
}
pub fn sa12() -> Self {
Self {
exp_ratio: 4,
in_channels: 64,
blocks: [2, 2, 6, 2],
attn: true,
lkc_use_act: false,
}
}
pub fn sa24() -> Self {
Self {
exp_ratio: 4,
in_channels: 64,
blocks: [4, 4, 12, 4],
attn: true,
lkc_use_act: false,
}
}
pub fn sa36() -> Self {
Self {
exp_ratio: 4,
in_channels: 64,
blocks: [6, 6, 18, 6],
attn: true,
lkc_use_act: false,
}
}
pub fn ma36() -> Self {
Self {
exp_ratio: 4,
in_channels: 76,
blocks: [6, 6, 18, 6],
attn: true,
lkc_use_act: false,
}
}
// configs used by MobileCLIP's image encoder
pub fn mci0() -> Self {
Self {
exp_ratio: 3,
in_channels: 64,
blocks: [2, 6, 10, 2],
attn: true,
lkc_use_act: true,
}
}
pub fn mci1() -> Self {
Self {
exp_ratio: 3,
in_channels: 64,
blocks: [4, 12, 20, 4],
attn: true,
lkc_use_act: true,
}
}
pub fn mci2() -> Self {
Self {
exp_ratio: 3,
in_channels: 80,
blocks: [4, 12, 24, 4],
attn: true,
lkc_use_act: true,
}
}
}
fn conv_norm(
in_channels: usize,
out_channels: usize,
kernel: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding: kernel / 2,
groups: in_channels,
..Default::default()
};
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(in_channels, out_channels, kernel, conv2d_cfg, vb.pp("conv"))?;
let conv = conv.absorb_bn(&bn)?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&conv)?;
Ok(xs)
}))
}
fn conv_mlp(dim: usize, exp_ratio: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let conv = conv_norm(dim, dim, 7, 1, vb.pp("conv"))?;
let fc1 = conv2d(dim, dim * exp_ratio, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(dim * exp_ratio, dim, 1, conv2d_cfg, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&conv)?.apply(&fc1)?.gelu_erf()?.apply(&fc2)?;
Ok(xs)
}))
}
fn squeeze_and_excitation(
in_channels: usize,
squeeze_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let fc1 = conv2d(in_channels, squeeze_channels, 1, conv2d_cfg, vb.pp("fc1"))?;
let fc2 = conv2d(squeeze_channels, in_channels, 1, conv2d_cfg, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let residual = xs;
let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?;
let xs = sigmoid(&xs.apply(&fc1)?.relu()?.apply(&fc2)?)?;
residual.broadcast_mul(&xs)
}))
}
// fuses a convolutional kernel and a batchnorm layer into a convolutional layer
// based on the _fuse_bn_tensor method in timm
// see https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L602
fn fuse_conv_bn(weights: &Tensor, bn: BatchNorm) -> Result<(Tensor, Tensor)> {
let (gamma, beta) = bn.weight_and_bias().context("no weight-bias")?;
let mu = bn.running_mean();
let sigma = (bn.running_var() + bn.eps())?.sqrt();
let gps = (gamma / sigma)?;
let bias = (beta - mu * &gps)?;
let weights = weights.broadcast_mul(&gps.reshape(((), 1, 1, 1))?)?;
Ok((weights, bias))
}
fn mobileone_block(
in_channels: usize,
out_channels: usize,
kernel: usize,
stride: usize,
group_size: usize,
use_act: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let groups = if group_size == 0 {
1
} else {
in_channels / group_size
};
let padding = kernel / 2;
let conv2d_cfg = Conv2dConfig {
stride,
groups,
padding,
..Default::default()
};
let mut w = Tensor::zeros(
(out_channels, in_channels / groups, kernel, kernel),
DType::F32,
vb.device(),
)?;
let dim = out_channels;
let mut b = Tensor::zeros(dim, DType::F32, vb.device())?;
let conv_kxk_bn = batch_norm(dim, 1e-5, vb.pp("conv_kxk.0.bn"));
let conv_kxk = conv2d_no_bias(
in_channels,
out_channels,
kernel,
conv2d_cfg,
vb.pp("conv_kxk.0.conv"),
);
if let (Ok(conv), Ok(bn)) = (conv_kxk, conv_kxk_bn) {
let (wk, bk) = fuse_conv_bn(conv.weight(), bn)?;
w = (w + wk)?;
b = (b + bk)?;
};
let conv_scale_bn = batch_norm(dim, 1e-5, vb.pp("conv_scale.bn"));
let conv_scale = conv2d_no_bias(
in_channels,
out_channels,
1,
conv2d_cfg,
vb.pp("conv_scale.conv"),
);
if let (Ok(conv), Ok(bn)) = (conv_scale, conv_scale_bn) {
let (ws, bs) = fuse_conv_bn(conv.weight(), bn)?;
// pad to 3x3
let ws = ws
.pad_with_zeros(D::Minus1, 1, 1)?
.pad_with_zeros(D::Minus2, 1, 1)?;
w = (w + ws)?;
b = (b + bs)?;
};
let se = squeeze_and_excitation(out_channels, out_channels / 16, vb.pp("se"));
// read and reparameterize the identity bn into wi and bi
let identity_bn = batch_norm(dim, 1e-5, vb.pp("identity"));
if let Ok(id_bn) = identity_bn {
let mut weights: Vec<f32> = vec![0.0; w.elem_count()];
let id = in_channels / groups;
// See https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L809
for i in 0..in_channels {
if kernel > 1 {
weights[i * kernel * kernel + 4] = 1.0;
} else {
weights[i * (id + 1)] = 1.0;
}
}
let weights = &Tensor::from_vec(weights, w.shape(), w.device())?;
let (wi, bi) = fuse_conv_bn(weights, id_bn)?;
w = (w + wi)?;
b = (b + bi)?;
};
let reparam_conv = Conv2d::new(w, Some(b), conv2d_cfg);
Ok(Func::new(move |xs| {
let mut xs = xs.apply(&reparam_conv)?;
if let Ok(f) = &se {
xs = xs.apply(f)?;
}
if use_act {
xs = xs.gelu_erf()?;
};
Ok(xs)
}))
}
fn repmixer(dim: usize, kernel: usize, vb: VarBuilder) -> Result<Func<'static>> {
let gamma = vb.get((dim, 1, 1), "layer_scale.gamma")?;
let norm = mobileone_block(dim, dim, kernel, 1, 1, false, vb.pp("norm"))?;
let mixer = mobileone_block(dim, dim, kernel, 1, 1, false, vb.pp("mixer"))?;
Ok(Func::new(move |xs| {
let residual = xs.clone();
let xs = (xs.apply(&mixer)? - xs.apply(&norm)?)?;
let xs = xs.broadcast_mul(&gamma.reshape((1, (), 1, 1))?)?;
let xs = (xs + residual)?;
Ok(xs)
}))
}
fn repmixer_block(dim: usize, exp_ratio: usize, vb: VarBuilder) -> Result<Func<'static>> {
let gamma = vb.get((dim, 1, 1), "layer_scale.gamma")?;
let token_mixer = repmixer(dim, 3, vb.pp("token_mixer"))?;
let mlp = conv_mlp(dim, exp_ratio, vb.pp("mlp"))?;
Ok(Func::new(move |xs| {
let residual = xs.apply(&token_mixer)?;
let mut xs = residual.apply(&mlp)?;
xs = xs.broadcast_mul(&gamma.reshape((1, (), 1, 1))?)?;
let xs = (xs + residual)?;
Ok(xs)
}))
}
fn positional_encoding(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 1,
padding: 3,
groups: dim,
..Default::default()
};
let conv = conv2d(dim, dim, 7, conv2d_cfg, vb.pp("pos_enc"))?;
Ok(Func::new(move |xs| {
let xs = (xs + xs.apply(&conv)?)?;
Ok(xs)
}))
}
fn attention(dim: usize, vb: VarBuilder) -> Result<Func<'static>> {
let qkv = linear_no_bias(dim, dim * 3, vb.pp("qkv"))?;
let proj = linear(dim, dim, vb.pp("proj"))?;
let head_dim = 32;
let num_heads = dim / head_dim;
let scale = (head_dim as f64).powf(-0.5);
Ok(Func::new(move |xs| {
let xs = xs.clone();
let (b, c, h, w) = xs.dims4()?;
let n = h * w;
let xs = xs.flatten_from(2)?.transpose(D::Minus1, D::Minus2)?;
let qkv = xs
.apply(&qkv)?
.reshape((b, n, 3, num_heads, head_dim))?
.permute((2, 0, 3, 1, 4))?;
let q = qkv.get(0)?;
let k = qkv.get(1)?;
let v = qkv.get(2)?;
let q = (q * scale)?;
let att = q.matmul(&k.transpose(D::Minus2, D::Minus1)?)?;
let att = softmax(&att, D::Minus1)?;
let xs = att.matmul(&v)?;
let xs = xs.transpose(1, 2)?.reshape((b, n, c))?;
let xs = xs.apply(&proj)?;
let xs = xs.transpose(D::Minus1, D::Minus2)?.reshape((b, c, h, w))?;
Ok(xs)
}))
}
fn attention_block(dim: usize, exp_ratio: usize, vb: VarBuilder) -> Result<Func<'static>> {
let gamma1 = vb.get((dim, 1, 1), "layer_scale_1.gamma")?;
let gamma2 = vb.get((dim, 1, 1), "layer_scale_2.gamma")?;
let norm = batch_norm(dim, 1e-5, vb.pp("norm"))?;
let token_mixer = attention(dim, vb.pp("token_mixer"))?;
let mlp = conv_mlp(dim, exp_ratio, vb.pp("mlp"))?;
Ok(Func::new(move |xs| {
let xs = xs.clone();
let xs = (&xs
+ &xs
.apply_t(&norm, false)?
.apply(&token_mixer)?
.broadcast_mul(&gamma1.reshape((1, (), 1, 1))?)?)?;
let xs = (&xs
+ &xs
.apply(&mlp)?
.broadcast_mul(&gamma2.reshape((1, (), 1, 1))?)?)?;
Ok(xs)
}))
}
fn fastvit_stage(cfg: &Config, idx: usize, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.blocks[idx];
let mut blocks = Vec::with_capacity(nblocks);
let dim = cfg.in_channels << idx;
let downsample = fastvit_patch_embed(dim / 2, dim, cfg.lkc_use_act, vb.pp("downsample"));
for block_idx in 0..nblocks {
let block = if cfg.attn && idx == 3 {
attention_block(dim, cfg.exp_ratio, vb.pp(format!("blocks.{block_idx}")))?
} else {
repmixer_block(dim, cfg.exp_ratio, vb.pp(format!("blocks.{block_idx}")))?
};
blocks.push(block);
}
let pos_emb = positional_encoding(dim, vb.pp("pos_emb"));
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
if let Ok(ds) = &downsample {
xs = xs.apply(ds)?;
}
if let Ok(pos) = &pos_emb {
xs = xs.apply(pos)?;
}
for block in blocks.iter() {
xs = xs.apply(block)?;
}
Ok(xs)
}))
}
fn fastvit_patch_embed(
in_channels: usize,
out_channels: usize,
use_act: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let lk = conv_norm(in_channels, out_channels, 7, 2, vb.pp("proj.0.large_conv"))?;
let sk = conv_norm(in_channels, out_channels, 3, 2, vb.pp("proj.0.small_conv"))?;
let se = squeeze_and_excitation(out_channels, out_channels / 4, vb.pp("proj.0.se"));
let mb = mobileone_block(out_channels, out_channels, 1, 1, 0, true, vb.pp("proj.1"))?;
Ok(Func::new(move |xs| {
let mut xs = (xs.apply(&lk)? + xs.apply(&sk)?)?;
if let Ok(f) = &se {
xs = xs.apply(f)?;
}
if use_act {
xs = xs.gelu_erf()?;
};
let xs = xs.apply(&mb)?;
Ok(xs)
}))
}
fn fastvit_stem(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let mb0 = mobileone_block(in_channels, out_channels, 3, 2, 0, true, vb.pp(0))?;
let mb1 = mobileone_block(out_channels, out_channels, 3, 2, 1, true, vb.pp(1))?;
let mb2 = mobileone_block(out_channels, out_channels, 1, 1, 0, true, vb.pp(2))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&mb0)?.apply(&mb1)?.apply(&mb2)?;
Ok(xs)
}))
}
// Build a fastvit model for a given configuration.
fn fastvit_model(cfg: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let linear = linear(cfg.in_channels * 16, nclasses, vb.pp("head.fc"))?;
Some(linear)
}
};
let stem = fastvit_stem(3, cfg.in_channels, vb.pp("stem"))?;
let final_conv = mobileone_block(
cfg.in_channels * 8,
cfg.in_channels * 16,
3,
1,
1,
true,
vb.pp("final_conv"),
)?;
let vb = vb.pp("stages");
let stage1 = fastvit_stage(cfg, 0, vb.pp(0))?;
let stage2 = fastvit_stage(cfg, 1, vb.pp(1))?;
let stage3 = fastvit_stage(cfg, 2, vb.pp(2))?;
let stage4 = fastvit_stage(cfg, 3, vb.pp(3))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&stem)?
.apply(&stage1)?
.apply(&stage2)?
.apply(&stage3)?
.apply(&stage4)?
.apply(&final_conv)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.mean(D::Minus2)?.mean(D::Minus1)?.apply(cls),
}
}))
}
pub fn fastvit(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
fastvit_model(cfg, Some(nclasses), vb)
}
pub fn fastvit_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
fastvit_model(cfg, None, vb)
}
| candle/candle-transformers/src/models/fastvit.rs/0 | {
"file_path": "candle/candle-transformers/src/models/fastvit.rs",
"repo_id": "candle",
"token_count": 8020
} |
use std::collections::HashMap;
use crate::models::{
clip::{text_model::Activation, vision_model::ClipVisionConfig},
llama::{Config, LlamaEosToks},
};
use serde::{Deserialize, Serialize};
// original config from liuhaotian/llava
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LLaVAConfig {
pub architectures: Vec<String>,
pub bos_token_id: usize,
pub eos_token_id: usize,
pub hidden_size: usize,
#[serde(default = "default_image_aspect_ratio")]
pub image_aspect_ratio: String,
pub image_crop_resolution: usize,
pub image_grid_pinpoints: Vec<(u32, u32)>,
pub image_split_resolution: usize,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub mm_hidden_size: usize,
#[serde(default = "default_mm_patch_merge_type")]
pub mm_patch_merge_type: String,
pub mm_projector_type: String,
pub mm_use_im_start_end: bool,
pub mm_vision_select_feature: String,
pub mm_vision_select_layer: isize,
pub mm_vision_tower: Option<String>,
pub model_type: String,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub pad_token_id: usize,
pub rms_norm_eps: f32,
pub rope_theta: f32,
pub tokenizer_model_max_length: Option<usize>,
pub torch_dtype: String,
pub use_cache: bool,
pub vocab_size: usize,
#[serde(default = "default_image_token_index")]
pub image_token_index: isize,
#[serde(default = "default_hf")]
pub hf: bool,
pub tie_word_embeddings: Option<bool>,
}
fn default_hf() -> bool {
false
}
fn default_image_token_index() -> isize {
-200
}
fn default_mm_patch_merge_type() -> String {
"flat".to_string()
}
fn default_image_aspect_ratio() -> String {
"square".to_string()
}
impl LLaVAConfig {
pub fn to_llama_config(&self) -> Config {
Config {
hidden_size: self.hidden_size,
intermediate_size: self.intermediate_size,
vocab_size: self.vocab_size,
num_hidden_layers: self.num_hidden_layers,
num_attention_heads: self.num_attention_heads,
num_key_value_heads: self.num_key_value_heads,
rms_norm_eps: self.rms_norm_eps as f64,
rope_theta: self.rope_theta,
bos_token_id: Some(self.bos_token_id as u32),
eos_token_id: Some(LlamaEosToks::Single(self.eos_token_id as u32)),
use_flash_attn: false,
rope_scaling: None, // Assume we don't have LLaVA for Llama 3.1
max_position_embeddings: self.max_position_embeddings,
tie_word_embeddings: self.tie_word_embeddings.unwrap_or(false),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFLLaVATextConfig {
pub architectures: Vec<String>,
#[serde(default = "default_hidden_size")]
pub hidden_size: usize,
#[serde(default = "default_intermediate_size")]
pub intermediate_size: usize,
#[serde(default = "default_max_length")]
pub max_length: usize,
pub max_position_embeddings: usize,
pub model_type: String,
#[serde(default = "default_num_attention_heads")]
pub num_attention_heads: usize,
#[serde(default = "default_num_hidden_layers")]
pub num_hidden_layers: usize,
#[serde(default = "default_num_key_value_heads")]
pub num_key_value_heads: usize,
pub pad_token_id: usize,
pub rms_norm_eps: f32,
#[serde(default = "default_rope_theta")]
pub rope_theta: f32,
pub torch_dtype: String,
#[serde(default = "default_use_cache")]
pub use_cache: bool,
pub vocab_size: usize,
}
fn default_num_hidden_layers() -> usize {
32
}
fn default_use_cache() -> bool {
true
}
fn default_hidden_size() -> usize {
4096
}
fn default_intermediate_size() -> usize {
11008
}
fn default_max_length() -> usize {
4096
}
fn default_num_attention_heads() -> usize {
32
}
fn default_num_key_value_heads() -> usize {
32
}
fn default_rope_theta() -> f32 {
10000.0
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFLLaVAVisionConfig {
pub hidden_size: usize,
pub image_size: usize,
pub intermediate_size: usize,
pub model_type: String,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub patch_size: usize,
pub projection_dim: usize,
pub vocab_size: usize,
}
// config from llava-v1.6-vicuna-7b-hf
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFLLaVAConfig {
pub architectures: Vec<String>,
pub ignore_index: isize,
pub image_grid_pinpoints: Vec<(u32, u32)>,
pub image_token_index: isize,
pub model_type: String,
pub projector_hidden_act: String,
pub text_config: HFLLaVATextConfig,
pub torch_dtype: String,
pub use_image_newline_parameter: bool,
pub vision_config: HFLLaVAVisionConfig,
pub vision_feature_layer: isize,
pub vision_feature_select_strategy: String,
pub vocab_size: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFGenerationConfig {
pub bos_token_id: usize,
pub eos_token_id: usize,
#[serde(default = "default_max_length")]
pub max_length: usize,
pub pad_token_id: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct HFPreProcessorConfig {
pub aspect_ratio_setting: String,
pub crop_size: HashMap<String, usize>,
pub do_center_crop: bool,
pub do_convert_rgb: bool,
pub do_normalize: bool,
pub do_rescale: bool,
pub do_resize: bool,
pub image_mean: Vec<f32>,
pub image_std: Vec<f32>,
pub resample: u32,
pub rescale_factor: f32,
pub size: HashMap<String, f32>,
}
impl HFLLaVAConfig {
pub fn to_clip_vision_config(&self) -> ClipVisionConfig {
ClipVisionConfig {
embed_dim: self.vision_config.hidden_size,
activation: Activation::QuickGelu,
intermediate_size: self.vision_config.intermediate_size,
num_hidden_layers: self.vision_config.num_hidden_layers,
num_attention_heads: self.vision_config.num_attention_heads,
projection_dim: self.vision_config.projection_dim,
num_channels: 3,
image_size: self.vision_config.image_size,
patch_size: self.vision_config.patch_size,
}
}
fn map_projector_type(s: &str) -> String {
if s == "gelu" {
"mlp2x_gelu".to_string()
} else {
s.to_string()
}
}
fn map_select_feature(s: &str) -> String {
if s == "default" {
"patch".to_string()
} else {
"cls_patch".to_string()
}
}
pub fn to_llava_config(
&self,
generation_config: &HFGenerationConfig,
preprocessor_config: &HFPreProcessorConfig,
) -> LLaVAConfig {
LLaVAConfig {
hf: true,
architectures: self.architectures.clone(),
bos_token_id: generation_config.bos_token_id,
eos_token_id: generation_config.eos_token_id,
hidden_size: self.text_config.hidden_size,
image_aspect_ratio: preprocessor_config.aspect_ratio_setting.clone(),
image_crop_resolution: 224,
image_grid_pinpoints: self.image_grid_pinpoints.clone(),
image_split_resolution: 224,
intermediate_size: self.text_config.intermediate_size,
max_position_embeddings: self.text_config.max_position_embeddings,
mm_hidden_size: 1024,
mm_patch_merge_type: "spatial_unpad".to_string(),
mm_projector_type: Self::map_projector_type(&self.projector_hidden_act),
mm_use_im_start_end: false,
mm_vision_select_feature: Self::map_select_feature(
&self.vision_feature_select_strategy,
),
mm_vision_select_layer: self.vision_feature_layer,
mm_vision_tower: None,
model_type: self.model_type.clone(),
num_attention_heads: self.text_config.num_attention_heads,
num_hidden_layers: self.text_config.num_hidden_layers,
num_key_value_heads: self.text_config.num_key_value_heads,
pad_token_id: self.text_config.pad_token_id,
rms_norm_eps: self.text_config.rms_norm_eps,
rope_theta: self.text_config.rope_theta,
tokenizer_model_max_length: Some(4096),
torch_dtype: self.torch_dtype.clone(),
use_cache: self.text_config.use_cache,
vocab_size: self.vocab_size,
image_token_index: self.image_token_index,
tie_word_embeddings: None,
}
}
}
| candle/candle-transformers/src/models/llava/config.rs/0 | {
"file_path": "candle/candle-transformers/src/models/llava/config.rs",
"repo_id": "candle",
"token_count": 3948
} |
use candle::{bail, DType, Module, Result, Tensor};
use candle_nn as nn;
pub struct PatchEmbedder {
proj: nn::Conv2d,
}
impl PatchEmbedder {
pub fn new(
patch_size: usize,
in_channels: usize,
embed_dim: usize,
vb: nn::VarBuilder,
) -> Result<Self> {
let proj = nn::conv2d(
in_channels,
embed_dim,
patch_size,
nn::Conv2dConfig {
stride: patch_size,
..Default::default()
},
vb.pp("proj"),
)?;
Ok(Self { proj })
}
}
impl Module for PatchEmbedder {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.proj.forward(x)?;
// flatten spatial dim and transpose to channels last
let (b, c, h, w) = x.dims4()?;
x.reshape((b, c, h * w))?.transpose(1, 2)
}
}
pub struct Unpatchifier {
patch_size: usize,
out_channels: usize,
}
impl Unpatchifier {
pub fn new(patch_size: usize, out_channels: usize) -> Result<Self> {
Ok(Self {
patch_size,
out_channels,
})
}
pub fn unpatchify(&self, x: &Tensor, h: usize, w: usize) -> Result<Tensor> {
let h = (h + 1) / self.patch_size;
let w = (w + 1) / self.patch_size;
let x = x.reshape((
x.dim(0)?,
h,
w,
self.patch_size,
self.patch_size,
self.out_channels,
))?;
let x = x.permute((0, 5, 1, 3, 2, 4))?; // "nhwpqc->nchpwq"
x.reshape((
x.dim(0)?,
self.out_channels,
self.patch_size * h,
self.patch_size * w,
))
}
}
pub struct PositionEmbedder {
pos_embed: Tensor,
patch_size: usize,
pos_embed_max_size: usize,
}
impl PositionEmbedder {
pub fn new(
hidden_size: usize,
patch_size: usize,
pos_embed_max_size: usize,
vb: nn::VarBuilder,
) -> Result<Self> {
let pos_embed = vb.get(
(1, pos_embed_max_size * pos_embed_max_size, hidden_size),
"pos_embed",
)?;
Ok(Self {
pos_embed,
patch_size,
pos_embed_max_size,
})
}
pub fn get_cropped_pos_embed(&self, h: usize, w: usize) -> Result<Tensor> {
let h = (h + 1) / self.patch_size;
let w = (w + 1) / self.patch_size;
if h > self.pos_embed_max_size || w > self.pos_embed_max_size {
bail!("Input size is too large for the position embedding")
}
let top = (self.pos_embed_max_size - h) / 2;
let left = (self.pos_embed_max_size - w) / 2;
let pos_embed =
self.pos_embed
.reshape((1, self.pos_embed_max_size, self.pos_embed_max_size, ()))?;
let pos_embed = pos_embed.narrow(1, top, h)?.narrow(2, left, w)?;
pos_embed.reshape((1, h * w, ()))
}
}
pub struct TimestepEmbedder {
mlp: nn::Sequential,
frequency_embedding_size: usize,
}
impl TimestepEmbedder {
pub fn new(
hidden_size: usize,
frequency_embedding_size: usize,
vb: nn::VarBuilder,
) -> Result<Self> {
let mlp = nn::seq()
.add(nn::linear(
frequency_embedding_size,
hidden_size,
vb.pp("mlp.0"),
)?)
.add(nn::Activation::Silu)
.add(nn::linear(hidden_size, hidden_size, vb.pp("mlp.2"))?);
Ok(Self {
mlp,
frequency_embedding_size,
})
}
fn timestep_embedding(t: &Tensor, dim: usize, max_period: f64) -> Result<Tensor> {
if dim % 2 != 0 {
bail!("Embedding dimension must be even")
}
if t.dtype() != DType::F32 && t.dtype() != DType::F64 {
bail!("Input tensor must be floating point")
}
let half = dim / 2;
let freqs = Tensor::arange(0f32, half as f32, t.device())?
.to_dtype(candle::DType::F32)?
.mul(&Tensor::full(
(-f64::ln(max_period) / half as f64) as f32,
half,
t.device(),
)?)?
.exp()?;
let args = t
.unsqueeze(1)?
.to_dtype(candle::DType::F32)?
.matmul(&freqs.unsqueeze(0)?)?;
let embedding = Tensor::cat(&[args.cos()?, args.sin()?], 1)?;
embedding.to_dtype(candle::DType::F16)
}
}
impl Module for TimestepEmbedder {
fn forward(&self, t: &Tensor) -> Result<Tensor> {
let t_freq = Self::timestep_embedding(t, self.frequency_embedding_size, 10000.0)?;
self.mlp.forward(&t_freq)
}
}
pub struct VectorEmbedder {
mlp: nn::Sequential,
}
impl VectorEmbedder {
pub fn new(input_dim: usize, hidden_size: usize, vb: nn::VarBuilder) -> Result<Self> {
let mlp = nn::seq()
.add(nn::linear(input_dim, hidden_size, vb.pp("mlp.0"))?)
.add(nn::Activation::Silu)
.add(nn::linear(hidden_size, hidden_size, vb.pp("mlp.2"))?);
Ok(Self { mlp })
}
}
impl Module for VectorEmbedder {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
self.mlp.forward(x)
}
}
| candle/candle-transformers/src/models/mmdit/embedding.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mmdit/embedding.rs",
"repo_id": "candle",
"token_count": 2837
} |
//! Text encoder as used in most OpenCLIP pretrained models
//! https://github.com/mlfoundations/open_clip
use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::{
embedding, layer_norm, linear, ops::softmax_last_dim, Embedding, LayerNorm, Linear, Module,
VarBuilder,
};
#[derive(Debug, Clone)]
pub struct Config {
pub vocab_size: usize,
pub embed_dim: usize,
pub intermediate_size: usize,
pub max_position_embeddings: usize,
pub pad_with: Option<String>,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub projection_dim: usize,
}
impl Config {
pub fn vit_base_patch32() -> Self {
Self {
vocab_size: 49408,
embed_dim: 512,
intermediate_size: 2048,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 12,
num_attention_heads: 8,
projection_dim: 512,
}
}
}
#[derive(Clone, Debug)]
struct TextEmbeddings {
token_embedding: Embedding,
position_embedding: Tensor,
}
impl TextEmbeddings {
fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let token_embedding = embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?;
let position_embedding = vs.get(
(c.max_position_embeddings, c.embed_dim),
"positional_embedding",
)?;
Ok(TextEmbeddings {
token_embedding,
position_embedding,
})
}
}
impl Module for TextEmbeddings {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let seq_length = input_ids.dim(D::Minus1)?;
let inputs_embeds = self.token_embedding.forward(input_ids)?;
let position_embedding = self.position_embedding.narrow(0, 0, seq_length)?;
inputs_embeds.broadcast_add(&position_embedding)
}
}
#[derive(Clone, Debug)]
struct Attention {
k_proj: candle_nn::Linear,
v_proj: candle_nn::Linear,
q_proj: candle_nn::Linear,
out_proj: Linear,
head_dim: usize,
scale: f64,
num_attention_heads: usize,
}
impl Attention {
fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> {
let embed_dim = c.embed_dim;
let num_attention_heads = c.num_attention_heads;
let in_proj_weights = vs
.get((embed_dim * 3, embed_dim), "in_proj_weight")?
.chunk(3, 0)?;
let (q_w, k_w, v_w) = (
&in_proj_weights[0],
&in_proj_weights[1],
&in_proj_weights[2],
);
let in_proj_biases = vs.get(embed_dim * 3, "in_proj_bias")?.chunk(3, 0)?;
let (q_b, k_b, v_b) = (&in_proj_biases[0], &in_proj_biases[1], &in_proj_biases[2]);
let q_proj = Linear::new(q_w.clone(), Some(q_b.clone()));
let k_proj = Linear::new(k_w.clone(), Some(k_b.clone()));
let v_proj = Linear::new(v_w.clone(), Some(v_b.clone()));
let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?;
let head_dim = embed_dim / num_attention_heads;
let scale = (head_dim as f64).powf(-0.5);
Ok(Attention {
k_proj,
v_proj,
q_proj,
out_proj,
head_dim,
scale,
num_attention_heads,
})
}
fn shape_multihead(&self, xs: &Tensor, bsz: usize, seq_len: usize) -> Result<Tensor> {
xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?
.to_dtype(DType::F32)
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let in_dtype = xs.dtype();
let (bsz, seq_len, embed_dim) = xs.dims3()?;
let q = self.shape_multihead(&self.q_proj.forward(xs)?, bsz, seq_len)?;
let k = self.shape_multihead(&self.k_proj.forward(xs)?, bsz, seq_len)?;
let v = self.shape_multihead(&self.v_proj.forward(xs)?, bsz, seq_len)?;
let q = (q * self.scale)?;
let attn_weights = q.matmul(&k.transpose(D::Minus1, D::Minus2)?)?;
let attn_weights = softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?.to_dtype(in_dtype)?;
let attn_output = attn_output
.transpose(1, 2)?
.contiguous()?
.reshape((bsz, seq_len, embed_dim))?;
let out = self.out_proj.forward(&attn_output)?;
Ok(out)
}
}
#[derive(Clone, Debug)]
struct Mlp {
fc1: Linear,
fc2: Linear,
}
impl Mlp {
fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let fc1 = linear(c.embed_dim, c.intermediate_size, vs.pp("c_fc"))?;
let fc2 = linear(c.intermediate_size, c.embed_dim, vs.pp("c_proj"))?;
Ok(Mlp { fc1, fc2 })
}
}
impl Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?;
self.fc2.forward(&xs.gelu_erf()?)
}
}
#[derive(Clone, Debug)]
struct EncoderLayer {
self_attn: Attention,
layer_norm1: LayerNorm,
mlp: Mlp,
layer_norm2: LayerNorm,
}
impl EncoderLayer {
fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let self_attn = Attention::new(vs.pp("attn"), c)?;
let layer_norm1 = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_1"))?;
let mlp = Mlp::new(vs.pp("mlp"), c)?;
let layer_norm2 = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_2"))?;
Ok(EncoderLayer {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = self.layer_norm1.forward(xs)?;
let xs = self.self_attn.forward(&xs)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self.layer_norm2.forward(&xs)?;
let xs = self.mlp.forward(&xs)?;
let out = (xs + residual)?;
Ok(out)
}
}
#[derive(Clone, Debug)]
pub struct Encoder {
layers: Vec<EncoderLayer>,
}
impl Encoder {
pub fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let vs = vs.pp("resblocks");
let mut layers: Vec<EncoderLayer> = Vec::new();
for index in 0..c.num_hidden_layers {
let layer = EncoderLayer::new(vs.pp(index.to_string()), c)?;
layers.push(layer)
}
Ok(Encoder { layers })
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs)?;
}
Ok(xs)
}
}
/// A text transformer as used in CLIP variants.
#[derive(Clone, Debug)]
pub struct OpenClipTextTransformer {
embeddings: TextEmbeddings,
encoder: Encoder,
final_layer_norm: LayerNorm,
}
impl OpenClipTextTransformer {
pub fn new(vs: VarBuilder, c: &Config) -> Result<Self> {
let embeddings = TextEmbeddings::new(vs.clone(), c)?;
let final_layer_norm = layer_norm(c.embed_dim, 1e-5, vs.pp("ln_final"))?;
let encoder = Encoder::new(vs.pp("transformer"), c)?;
Ok(OpenClipTextTransformer {
embeddings,
encoder,
final_layer_norm,
})
}
pub fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let input_ids = self.embeddings.forward(input_ids)?;
let input_ids = self.encoder.forward(&input_ids)?;
self.final_layer_norm.forward(&input_ids)
}
}
impl Module for OpenClipTextTransformer {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let output = self.forward(input_ids)?;
let sequence_max_indices = input_ids.argmax(D::Minus1)?.to_dtype(DType::I64)?;
let mut indices = Vec::new();
for (batch_idx, &seq_idx) in sequence_max_indices.to_vec1::<i64>()?.iter().enumerate() {
let index = output.i((batch_idx, seq_idx as usize))?.unsqueeze(0)?;
indices.push(index);
}
Tensor::cat(&indices, 0)
}
}
| candle/candle-transformers/src/models/openclip/text_model.rs/0 | {
"file_path": "candle/candle-transformers/src/models/openclip/text_model.rs",
"repo_id": "candle",
"token_count": 3955
} |
//! Implementation of a quantized Moondream vision language model.
//!
//! Moondream is a lightweight vision-language model for image understanding and generation.
//! This module provides a quantized version for reduced memory usage and faster inference.
//!
//! Key features:
//! - ViT-based vision encoder
//! - Phi-2 text decoder model
//! - Memory efficient 8-bit quantization
//! - Optimized for efficient deployment
//!
//! References:
//! - [Moondream Model](https://github.com/vikhyat/moondream)
//!
use crate::models::moondream::{Config, VisionConfig};
use crate::models::quantized_mixformer::MixFormerSequentialForCausalLM as PhiModel;
use crate::quantized_nn::{layer_norm, linear_b, Linear};
use crate::quantized_var_builder::VarBuilder;
use candle::{IndexOp, Module, Result, Tensor, D};
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v)
}
#[derive(Debug, Clone)]
struct LinearPatchEmbedding {
linear: Linear,
}
impl LinearPatchEmbedding {
fn new(vb: VarBuilder) -> Result<Self> {
let linear = linear_b(588, 1152, true, vb.pp("linear"))?;
Ok(Self { linear })
}
}
impl Module for LinearPatchEmbedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.linear)
}
}
#[derive(Debug, Clone)]
struct Attention {
num_heads: usize,
head_dim: usize,
qkv: Linear,
proj: Linear,
}
impl Attention {
pub fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> {
let qkv = linear_b(dim, dim * 3, true, vb.pp("qkv"))?;
let proj = linear_b(dim, dim, true, vb.pp("proj"))?;
Ok(Self {
num_heads,
head_dim: dim / num_heads,
qkv,
proj,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, n, c) = xs.dims3()?;
let qkv = xs
.apply(&self.qkv)?
.reshape((b, n, 3, self.num_heads, self.head_dim))?
.permute((2, 0, 3, 1, 4))?;
let (q, k, v) = (
qkv.i(0)?.contiguous()?,
qkv.i(1)?.contiguous()?,
qkv.i(2)?.contiguous()?,
);
scaled_dot_product_attention(&q, &k, &v)?
.transpose(1, 2)?
.reshape((b, n, c))?
.apply(&self.proj)
}
}
#[derive(Debug, Clone)]
struct VitBlock {
attn: Attention,
mlp: Mlp,
norm1: candle_nn::LayerNorm,
norm2: candle_nn::LayerNorm,
}
impl VitBlock {
fn new(vb: VarBuilder, dim: usize, num_heads: usize, cfg: &VisionConfig) -> Result<Self> {
let attn = Attention::new(vb.pp("attn"), dim, num_heads)?;
let mlp = Mlp::new(vb.pp("mlp"), dim, cfg.hidden_features, dim, cfg.act)?;
let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?;
let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?;
Ok(Self {
attn,
mlp,
norm1,
norm2,
})
}
}
impl Module for VitBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = xs.apply(&self.norm1)?.apply(&self.attn)?;
let xs = (xs + &ys)?;
let ys = xs.apply(&self.norm2)?.apply(&self.mlp)?;
let xs = (&xs + &ys)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct VisionTransformer {
patch_embed: LinearPatchEmbedding,
pos_embed: Tensor,
blocks: Vec<VitBlock>,
norm: candle_nn::LayerNorm,
}
impl VisionTransformer {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let patch_embed = LinearPatchEmbedding::new(vb.pp("patch_embed"))?;
let pos_embed = vb
.get((1, cfg.embed_len, cfg.embed_dim), "pos_embed")?
.dequantize(vb.device())?;
let blocks = (0..cfg.num_blocks)
.map(|i| {
VitBlock::new(
vb.pp(format!("blocks.{}", i)),
cfg.embed_dim,
cfg.num_heads,
cfg,
)
})
.collect::<Result<_>>()?;
let norm = layer_norm(cfg.embed_dim, 1e-5, vb.pp("norm"))?;
Ok(Self {
patch_embed,
pos_embed,
blocks,
norm,
})
}
}
impl Module for VisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = (&xs.apply(&self.patch_embed)? + &self.pos_embed)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?;
}
xs.apply(&self.norm)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
model: VisionTransformer,
}
impl Encoder {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let model = VisionTransformer::new(cfg, vb.pp("model.visual"))?;
Ok(Self { model })
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.model)
}
}
#[derive(Debug, Clone)]
struct Mlp {
fc1: Linear,
act: candle_nn::Activation,
fc2: Linear,
}
impl Mlp {
fn new(
vb: VarBuilder,
in_features: usize,
hidden_features: usize,
out_features: usize,
act: candle_nn::Activation,
) -> Result<Self> {
let fc1 = linear_b(in_features, hidden_features, true, vb.pp("fc1"))?;
let fc2 = linear_b(hidden_features, out_features, true, vb.pp("fc2"))?;
Ok(Self { fc1, act, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct VisionProjection {
mlp: Mlp,
}
impl VisionProjection {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let mlp = Mlp::new(
vb.pp("mlp"),
cfg.image_embedding_dim,
cfg.hidden_dim,
cfg.model_dim,
cfg.act,
)?;
Ok(Self { mlp })
}
}
impl Module for VisionProjection {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.mlp)
}
}
#[derive(Debug, Clone)]
pub struct VisionEncoder {
encoder: Encoder,
projection: VisionProjection,
}
impl VisionEncoder {
pub fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let projection = VisionProjection::new(cfg, vb.pp("projection"))?;
Ok(Self {
encoder,
projection,
})
}
}
impl Module for VisionEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, c, hp1, wp2) = xs.dims4()?;
let (p1, p2) = (14, 14);
let h = hp1 / p1;
let w = wp2 / p2;
xs.reshape((b, c, h, p1, h, p2))?
.permute((0, 2, 4, 1, 3, 5))?
.reshape((b, h * w, c * p1 * p2))?
.apply(&self.encoder)?
.apply(&self.projection)
}
}
pub struct Model {
pub text_model: PhiModel,
pub vision_encoder: VisionEncoder,
}
impl Model {
pub fn new(config: &Config, vb: VarBuilder) -> Result<Self> {
let text_model = PhiModel::new_v2(&config.phi_config, vb.pp("text_model"))?;
let vision_encoder = VisionEncoder::new(&config.vision_config, vb.pp("vision_encoder"))?;
Ok(Self {
text_model,
vision_encoder,
})
}
pub fn vision_encoder(&self) -> &VisionEncoder {
&self.vision_encoder
}
pub fn text_model(&mut self) -> &mut PhiModel {
&mut self.text_model
}
}
| candle/candle-transformers/src/models/quantized_moondream.rs/0 | {
"file_path": "candle/candle-transformers/src/models/quantized_moondream.rs",
"repo_id": "candle",
"token_count": 3810
} |
//! Stable Diffusion
//!
//! Stable Diffusion is a latent text-to-image diffusion model capable of
//! generating photo-realistic images given any text input.
//!
//! - 💻 [Original Repository](https://github.com/CompVis/stable-diffusion)
//! - 🤗 [Hugging Face](https://huggingface.co/runwayml/stable-diffusion-v1-5)
//! - The default scheduler for the v1.5, v2.1 and XL 1.0 version is the Denoising Diffusion Implicit Model scheduler (DDIM). The original paper and some code can be found in the [associated repo](https://github.com/ermongroup/ddim). The default scheduler for the XL Turbo version is the Euler Ancestral scheduler.
//!
//!
//! # Example
//!
//! <div align=center>
//! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg" alt="rusty robot holding a candle" width=320>
//! </div>
//!
//! _"A rusty robot holding a fire torch in its hand."_ Generated by Stable Diffusion XL using Rust and [candle](https://github.com/huggingface/candle).
//!
//! ```bash
//! # example running with cuda
//! # see the candle-examples/examples/stable-diffusion for all options
//! cargo run --example stable-diffusion --release --features=cuda,cudnn \
//! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)"
//!
//! # with sd-turbo
//! cargo run --example stable-diffusion --release --features=cuda,cudnn \
//! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)" \
//! --sd-version turbo
//!
//! # with flash attention.
//! # feature flag: `--features flash-attn`
//! # cli flag: `--use-flash-attn`.
//! # flash-attention-v2 is only compatible with Ampere, Ada, \
//! # or Hopper GPUs (e.g., A100/H100, RTX 3090/4090).
//! cargo run --example stable-diffusion --release --features=cuda,cudnn \
//! -- --prompt "a cosmonaut on a horse (hd, realistic, high-def)" \
//! --use-flash-attn
//! ```
pub mod attention;
pub mod clip;
pub mod ddim;
pub mod ddpm;
pub mod embeddings;
pub mod euler_ancestral_discrete;
pub mod resnet;
pub mod schedulers;
pub mod unet_2d;
pub mod unet_2d_blocks;
pub mod uni_pc;
pub mod utils;
pub mod vae;
use std::sync::Arc;
use candle::{DType, Device, Result};
use candle_nn as nn;
use self::schedulers::{Scheduler, SchedulerConfig};
#[derive(Clone, Debug)]
pub struct StableDiffusionConfig {
pub width: usize,
pub height: usize,
pub clip: clip::Config,
pub clip2: Option<clip::Config>,
autoencoder: vae::AutoEncoderKLConfig,
unet: unet_2d::UNet2DConditionModelConfig,
scheduler: Arc<dyn SchedulerConfig>,
}
impl StableDiffusionConfig {
pub fn v1_5(
sliced_attention_size: Option<usize>,
height: Option<usize>,
width: Option<usize>,
) -> Self {
let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig {
out_channels,
use_cross_attn,
attention_head_dim,
};
// https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/unet/config.json
let unet = unet_2d::UNet2DConditionModelConfig {
blocks: vec![
bc(320, Some(1), 8),
bc(640, Some(1), 8),
bc(1280, Some(1), 8),
bc(1280, None, 8),
],
center_input_sample: false,
cross_attention_dim: 768,
downsample_padding: 1,
flip_sin_to_cos: true,
freq_shift: 0.,
layers_per_block: 2,
mid_block_scale_factor: 1.,
norm_eps: 1e-5,
norm_num_groups: 32,
sliced_attention_size,
use_linear_projection: false,
};
let autoencoder = vae::AutoEncoderKLConfig {
block_out_channels: vec![128, 256, 512, 512],
layers_per_block: 2,
latent_channels: 4,
norm_num_groups: 32,
use_quant_conv: true,
use_post_quant_conv: true,
};
let height = if let Some(height) = height {
assert_eq!(height % 8, 0, "height has to be divisible by 8");
height
} else {
512
};
let width = if let Some(width) = width {
assert_eq!(width % 8, 0, "width has to be divisible by 8");
width
} else {
512
};
let scheduler = Arc::new(ddim::DDIMSchedulerConfig {
prediction_type: schedulers::PredictionType::Epsilon,
..Default::default()
});
StableDiffusionConfig {
width,
height,
clip: clip::Config::v1_5(),
clip2: None,
autoencoder,
scheduler,
unet,
}
}
fn v2_1_(
sliced_attention_size: Option<usize>,
height: Option<usize>,
width: Option<usize>,
prediction_type: schedulers::PredictionType,
) -> Self {
let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig {
out_channels,
use_cross_attn,
attention_head_dim,
};
// https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/unet/config.json
let unet = unet_2d::UNet2DConditionModelConfig {
blocks: vec![
bc(320, Some(1), 5),
bc(640, Some(1), 10),
bc(1280, Some(1), 20),
bc(1280, None, 20),
],
center_input_sample: false,
cross_attention_dim: 1024,
downsample_padding: 1,
flip_sin_to_cos: true,
freq_shift: 0.,
layers_per_block: 2,
mid_block_scale_factor: 1.,
norm_eps: 1e-5,
norm_num_groups: 32,
sliced_attention_size,
use_linear_projection: true,
};
// https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/vae/config.json
let autoencoder = vae::AutoEncoderKLConfig {
block_out_channels: vec![128, 256, 512, 512],
layers_per_block: 2,
latent_channels: 4,
norm_num_groups: 32,
use_quant_conv: true,
use_post_quant_conv: true,
};
let scheduler = Arc::new(ddim::DDIMSchedulerConfig {
prediction_type,
..Default::default()
});
let height = if let Some(height) = height {
assert_eq!(height % 8, 0, "height has to be divisible by 8");
height
} else {
768
};
let width = if let Some(width) = width {
assert_eq!(width % 8, 0, "width has to be divisible by 8");
width
} else {
768
};
StableDiffusionConfig {
width,
height,
clip: clip::Config::v2_1(),
clip2: None,
autoencoder,
scheduler,
unet,
}
}
pub fn v2_1(
sliced_attention_size: Option<usize>,
height: Option<usize>,
width: Option<usize>,
) -> Self {
// https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/scheduler/scheduler_config.json
Self::v2_1_(
sliced_attention_size,
height,
width,
schedulers::PredictionType::VPrediction,
)
}
fn sdxl_(
sliced_attention_size: Option<usize>,
height: Option<usize>,
width: Option<usize>,
prediction_type: schedulers::PredictionType,
) -> Self {
let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig {
out_channels,
use_cross_attn,
attention_head_dim,
};
// https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json
let unet = unet_2d::UNet2DConditionModelConfig {
blocks: vec![
bc(320, None, 5),
bc(640, Some(2), 10),
bc(1280, Some(10), 20),
],
center_input_sample: false,
cross_attention_dim: 2048,
downsample_padding: 1,
flip_sin_to_cos: true,
freq_shift: 0.,
layers_per_block: 2,
mid_block_scale_factor: 1.,
norm_eps: 1e-5,
norm_num_groups: 32,
sliced_attention_size,
use_linear_projection: true,
};
// https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/vae/config.json
let autoencoder = vae::AutoEncoderKLConfig {
block_out_channels: vec![128, 256, 512, 512],
layers_per_block: 2,
latent_channels: 4,
norm_num_groups: 32,
use_quant_conv: true,
use_post_quant_conv: true,
};
let scheduler = Arc::new(ddim::DDIMSchedulerConfig {
prediction_type,
..Default::default()
});
let height = if let Some(height) = height {
assert_eq!(height % 8, 0, "height has to be divisible by 8");
height
} else {
1024
};
let width = if let Some(width) = width {
assert_eq!(width % 8, 0, "width has to be divisible by 8");
width
} else {
1024
};
StableDiffusionConfig {
width,
height,
clip: clip::Config::sdxl(),
clip2: Some(clip::Config::sdxl2()),
autoencoder,
scheduler,
unet,
}
}
fn sdxl_turbo_(
sliced_attention_size: Option<usize>,
height: Option<usize>,
width: Option<usize>,
prediction_type: schedulers::PredictionType,
) -> Self {
let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig {
out_channels,
use_cross_attn,
attention_head_dim,
};
// https://huggingface.co/stabilityai/sdxl-turbo/blob/main/unet/config.json
let unet = unet_2d::UNet2DConditionModelConfig {
blocks: vec![
bc(320, None, 5),
bc(640, Some(2), 10),
bc(1280, Some(10), 20),
],
center_input_sample: false,
cross_attention_dim: 2048,
downsample_padding: 1,
flip_sin_to_cos: true,
freq_shift: 0.,
layers_per_block: 2,
mid_block_scale_factor: 1.,
norm_eps: 1e-5,
norm_num_groups: 32,
sliced_attention_size,
use_linear_projection: true,
};
// https://huggingface.co/stabilityai/sdxl-turbo/blob/main/vae/config.json
let autoencoder = vae::AutoEncoderKLConfig {
block_out_channels: vec![128, 256, 512, 512],
layers_per_block: 2,
latent_channels: 4,
norm_num_groups: 32,
use_quant_conv: true,
use_post_quant_conv: true,
};
let scheduler = Arc::new(
euler_ancestral_discrete::EulerAncestralDiscreteSchedulerConfig {
prediction_type,
timestep_spacing: schedulers::TimestepSpacing::Trailing,
..Default::default()
},
);
let height = if let Some(height) = height {
assert_eq!(height % 8, 0, "height has to be divisible by 8");
height
} else {
512
};
let width = if let Some(width) = width {
assert_eq!(width % 8, 0, "width has to be divisible by 8");
width
} else {
512
};
Self {
width,
height,
clip: clip::Config::sdxl(),
clip2: Some(clip::Config::sdxl2()),
autoencoder,
scheduler,
unet,
}
}
pub fn sdxl(
sliced_attention_size: Option<usize>,
height: Option<usize>,
width: Option<usize>,
) -> Self {
Self::sdxl_(
sliced_attention_size,
height,
width,
// https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/scheduler/scheduler_config.json
schedulers::PredictionType::Epsilon,
)
}
pub fn sdxl_turbo(
sliced_attention_size: Option<usize>,
height: Option<usize>,
width: Option<usize>,
) -> Self {
Self::sdxl_turbo_(
sliced_attention_size,
height,
width,
// https://huggingface.co/stabilityai/sdxl-turbo/blob/main/scheduler/scheduler_config.json
schedulers::PredictionType::Epsilon,
)
}
pub fn ssd1b(
sliced_attention_size: Option<usize>,
height: Option<usize>,
width: Option<usize>,
) -> Self {
let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig {
out_channels,
use_cross_attn,
attention_head_dim,
};
// https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json
let unet = unet_2d::UNet2DConditionModelConfig {
blocks: vec![
bc(320, None, 5),
bc(640, Some(2), 10),
bc(1280, Some(10), 20),
],
center_input_sample: false,
cross_attention_dim: 2048,
downsample_padding: 1,
flip_sin_to_cos: true,
freq_shift: 0.,
layers_per_block: 2,
mid_block_scale_factor: 1.,
norm_eps: 1e-5,
norm_num_groups: 32,
sliced_attention_size,
use_linear_projection: true,
};
// https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/vae/config.json
let autoencoder = vae::AutoEncoderKLConfig {
block_out_channels: vec![128, 256, 512, 512],
layers_per_block: 2,
latent_channels: 4,
norm_num_groups: 32,
use_quant_conv: true,
use_post_quant_conv: true,
};
let scheduler = Arc::new(ddim::DDIMSchedulerConfig {
..Default::default()
});
let height = if let Some(height) = height {
assert_eq!(height % 8, 0, "height has to be divisible by 8");
height
} else {
1024
};
let width = if let Some(width) = width {
assert_eq!(width % 8, 0, "width has to be divisible by 8");
width
} else {
1024
};
Self {
width,
height,
clip: clip::Config::ssd1b(),
clip2: Some(clip::Config::ssd1b2()),
autoencoder,
scheduler,
unet,
}
}
pub fn build_vae<P: AsRef<std::path::Path>>(
&self,
vae_weights: P,
device: &Device,
dtype: DType,
) -> Result<vae::AutoEncoderKL> {
let vs_ae =
unsafe { nn::VarBuilder::from_mmaped_safetensors(&[vae_weights], dtype, device)? };
// https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json
let autoencoder = vae::AutoEncoderKL::new(vs_ae, 3, 3, self.autoencoder.clone())?;
Ok(autoencoder)
}
pub fn build_unet<P: AsRef<std::path::Path>>(
&self,
unet_weights: P,
device: &Device,
in_channels: usize,
use_flash_attn: bool,
dtype: DType,
) -> Result<unet_2d::UNet2DConditionModel> {
let vs_unet =
unsafe { nn::VarBuilder::from_mmaped_safetensors(&[unet_weights], dtype, device)? };
let unet = unet_2d::UNet2DConditionModel::new(
vs_unet,
in_channels,
4,
use_flash_attn,
self.unet.clone(),
)?;
Ok(unet)
}
pub fn build_scheduler(&self, n_steps: usize) -> Result<Box<dyn Scheduler>> {
self.scheduler.build(n_steps)
}
}
pub fn build_clip_transformer<P: AsRef<std::path::Path>>(
clip: &clip::Config,
clip_weights: P,
device: &Device,
dtype: DType,
) -> Result<clip::ClipTextTransformer> {
let vs = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[clip_weights], dtype, device)? };
let text_model = clip::ClipTextTransformer::new(vs, clip)?;
Ok(text_model)
}
| candle/candle-transformers/src/models/stable_diffusion/mod.rs/0 | {
"file_path": "candle/candle-transformers/src/models/stable_diffusion/mod.rs",
"repo_id": "candle",
"token_count": 8553
} |
//! Whisper Model Implementation
//!
//! Whisper is an automatic speech recognition (ASR) system trained on large amounts
//! of multilingual and multitask supervised data collected from the web. It can be used to
//! convert audio files (in the `.wav` format) to text. Supported features include
//! language detection as well as multilingual speech recognition.
//!
//! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/lmz/candle-whisper)
//! - 💻 [GH Link](https://github.com/openai/whisper)
//! - 💻 Transformers Python [reference implementation](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py)
//!
//!
pub mod audio;
pub mod model;
pub mod quantized_model;
use serde::Deserialize;
// The names in comments correspond to the original implementation:
// https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L17
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub num_mel_bins: usize, // n_mels
pub max_source_positions: usize, // n_audio_ctx
pub d_model: usize, // n_audio_state
pub encoder_attention_heads: usize, // n_audio_head
pub encoder_layers: usize, // n_audio_layer
pub vocab_size: usize, // n_vocab
pub max_target_positions: usize, // n_text_ctx
// pub n_text_state: usize,
pub decoder_attention_heads: usize, // n_text_head
pub decoder_layers: usize, // n_text_layer
#[serde(default)]
pub suppress_tokens: Vec<u32>,
}
pub const DTYPE: candle::DType = candle::DType::F32;
// Audio parameters.
pub const SAMPLE_RATE: usize = 16000;
pub const N_FFT: usize = 400;
pub const HOP_LENGTH: usize = 160;
pub const CHUNK_LENGTH: usize = 30;
pub const N_SAMPLES: usize = CHUNK_LENGTH * SAMPLE_RATE; // 480000 samples in a 30-second chunk
pub const N_FRAMES: usize = N_SAMPLES / HOP_LENGTH; // 3000 frames in a mel spectrogram input
pub const NO_SPEECH_THRESHOLD: f64 = 0.6;
pub const LOGPROB_THRESHOLD: f64 = -1.0;
pub const TEMPERATURES: [f64; 6] = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0];
pub const COMPRESSION_RATIO_THRESHOLD: f64 = 2.4;
// Tokenizer dependent bits.
pub const SOT_TOKEN: &str = "<|startoftranscript|>";
pub const TRANSCRIBE_TOKEN: &str = "<|transcribe|>";
pub const TRANSLATE_TOKEN: &str = "<|translate|>";
pub const NO_TIMESTAMPS_TOKEN: &str = "<|notimestamps|>";
pub const EOT_TOKEN: &str = "<|endoftext|>";
pub const NO_SPEECH_TOKENS: [&str; 2] = ["<|nocaptions|>", "<|nospeech|>"];
| candle/candle-transformers/src/models/whisper/mod.rs/0 | {
"file_path": "candle/candle-transformers/src/models/whisper/mod.rs",
"repo_id": "candle",
"token_count": 1018
} |
//! Utilities for quanitized network layers
//!
//! This module contains various implementations of standard neural network layers, modules and
//! utilities including embedding, linear layers, and various normalization techniques.
//! Most implementations provide quantized weights support.
use crate::models::with_tracing::QMatMul;
use crate::quantized_var_builder::VarBuilder;
use candle::quantized::QTensor;
use candle::{Module, Result, Tensor};
#[derive(Debug, Clone)]
pub struct Embedding {
inner: candle_nn::Embedding,
span: tracing::Span,
}
impl Embedding {
pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> {
let embeddings = vb.get((d1, d2), "weight")?.dequantize(vb.device())?;
let inner = candle_nn::Embedding::new(embeddings, d2);
let span = tracing::span!(tracing::Level::TRACE, "embedding");
Ok(Self { inner, span })
}
pub fn embeddings(&self) -> &Tensor {
self.inner.embeddings()
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
pub struct Linear {
weight: QMatMul,
bias: Option<Tensor>,
}
impl Linear {
pub fn from_arc(weight: std::sync::Arc<QTensor>, bias: Option<Tensor>) -> Result<Self> {
let weight = QMatMul::from_weights(weight)?;
Ok(Self { weight, bias })
}
pub fn from_weights(weight: QMatMul, bias: Option<Tensor>) -> Self {
Self { weight, bias }
}
}
impl Module for Linear {
fn forward(&self, x: &Tensor) -> candle::Result<Tensor> {
let x = x.apply(&self.weight)?;
match &self.bias {
None => Ok(x),
Some(bias) => x.broadcast_add(bias),
}
}
}
pub fn linear_b(in_dim: usize, out_dim: usize, bias: bool, vb: VarBuilder) -> Result<Linear> {
let bias = if bias {
Some(vb.get(out_dim, "bias")?.dequantize(vb.device())?)
} else {
None
};
let weight = QMatMul::new(in_dim, out_dim, vb)?;
Ok(Linear { weight, bias })
}
pub fn linear(in_dim: usize, out_dim: usize, vb: VarBuilder) -> Result<Linear> {
let bias = vb.get(out_dim, "bias")?.dequantize(vb.device())?;
let weight = QMatMul::new(in_dim, out_dim, vb)?;
Ok(Linear {
weight,
bias: Some(bias),
})
}
pub fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<candle_nn::LayerNorm> {
let weight = vb.get(size, "weight")?.dequantize(vb.device())?;
let bias = vb.get(size, "bias")?.dequantize(vb.device())?;
Ok(candle_nn::LayerNorm::new(weight, bias, eps))
}
pub fn layer_norm_no_bias(size: usize, eps: f64, vb: VarBuilder) -> Result<candle_nn::LayerNorm> {
let weight = vb.get(size, "weight")?.dequantize(vb.device())?;
Ok(candle_nn::LayerNorm::new_no_bias(weight, eps))
}
pub fn linear_no_bias(in_dim: usize, out_dim: usize, vb: VarBuilder) -> Result<Linear> {
let weight = QMatMul::new(in_dim, out_dim, vb)?;
Ok(Linear { weight, bias: None })
}
#[derive(Debug, Clone)]
pub struct RmsNorm {
weight: Tensor,
eps: f64,
span: tracing::Span,
}
impl RmsNorm {
pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "rms-norm");
let weight = vb.get(size, "weight")?.dequantize(vb.device())?;
Ok(Self { weight, eps, span })
}
pub fn from_qtensor(weight: QTensor, eps: f64) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "rms-norm");
let weight = weight.dequantize(&weight.device())?;
Ok(Self { weight, eps, span })
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
candle_nn::ops::rms_norm(x, &self.weight, self.eps as f32)
}
}
| candle/candle-transformers/src/quantized_nn.rs/0 | {
"file_path": "candle/candle-transformers/src/quantized_nn.rs",
"repo_id": "candle",
"token_count": 1681
} |
//load the candle Whisper decoder wasm module
import init, { Decoder } from "./build/m.js";
async function fetchArrayBuffer(url) {
const cacheName = "whisper-candle-cache";
const cache = await caches.open(cacheName);
const cachedResponse = await cache.match(url);
if (cachedResponse) {
const data = await cachedResponse.arrayBuffer();
return new Uint8Array(data);
}
const res = await fetch(url, { cache: "force-cache" });
cache.put(url, res.clone());
return new Uint8Array(await res.arrayBuffer());
}
class Whisper {
static instance = {};
// Retrieve the Whisper model. When called for the first time,
// this will load the model and save it for future use.
static async getInstance(params) {
const {
weightsURL,
modelID,
tokenizerURL,
mel_filtersURL,
configURL,
quantized,
is_multilingual,
timestamps,
task,
language,
} = params;
// load individual modelID only once
if (!this.instance[modelID]) {
await init();
self.postMessage({ status: "loading", message: "Loading Model" });
const [
weightsArrayU8,
tokenizerArrayU8,
mel_filtersArrayU8,
configArrayU8,
] = await Promise.all([
fetchArrayBuffer(weightsURL),
fetchArrayBuffer(tokenizerURL),
fetchArrayBuffer(mel_filtersURL),
fetchArrayBuffer(configURL),
]);
this.instance[modelID] = new Decoder(
weightsArrayU8,
tokenizerArrayU8,
mel_filtersArrayU8,
configArrayU8,
quantized,
is_multilingual,
timestamps,
task,
language
);
} else {
self.postMessage({ status: "loading", message: "Model Already Loaded" });
}
return this.instance[modelID];
}
}
self.addEventListener("message", async (event) => {
const {
weightsURL,
modelID,
tokenizerURL,
configURL,
mel_filtersURL,
audioURL,
} = event.data;
try {
self.postMessage({ status: "decoding", message: "Starting Decoder" });
let quantized = false;
if (modelID.includes("quantized")) {
quantized = true;
}
let is_multilingual = false;
if (modelID.includes("multilingual")) {
is_multilingual = true;
}
let timestamps = true;
const decoder = await Whisper.getInstance({
weightsURL,
modelID,
tokenizerURL,
mel_filtersURL,
configURL,
quantized,
is_multilingual,
timestamps,
task: null,
language: null,
});
self.postMessage({ status: "decoding", message: "Loading Audio" });
const audioArrayU8 = await fetchArrayBuffer(audioURL);
self.postMessage({ status: "decoding", message: "Running Decoder..." });
const segments = decoder.decode(audioArrayU8);
// Send the segment back to the main thread as JSON
self.postMessage({
status: "complete",
message: "complete",
output: JSON.parse(segments),
});
} catch (e) {
self.postMessage({ error: e });
}
});
| candle/candle-wasm-examples/whisper/whisperWorker.js/0 | {
"file_path": "candle/candle-wasm-examples/whisper/whisperWorker.js",
"repo_id": "candle",
"token_count": 1215
} |
Run the tests with:
```bash
RUST_LOG=wasm_bindgen_test_runner wasm-pack test --chrome --headless
```
Or:
```bash
wasm-pack test --chrome
```
If you get an "invalid session id" failure in headless mode, check that logs and
it may well be that your ChromeDriver is not at the same version as your
browser.
| candle/candle-wasm-tests/README.md/0 | {
"file_path": "candle/candle-wasm-tests/README.md",
"repo_id": "candle",
"token_count": 98
} |
# Theming
You can use a few environment variables to customize the look and feel of Chat UI. These are by default:
```ini
PUBLIC_APP_NAME=ChatUI
PUBLIC_APP_ASSETS=chatui
PUBLIC_APP_COLOR=blue
PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone."
PUBLIC_APP_DATA_SHARING=
PUBLIC_APP_DISCLAIMER=
```
- `PUBLIC_APP_NAME` The name used as a title throughout the app.
- `PUBLIC_APP_ASSETS` Is used to find logos & favicons in `static/$PUBLIC_APP_ASSETS`, current options are `chatui` and `huggingchat`.
- `PUBLIC_APP_COLOR` Can be any of the [tailwind colors](https://tailwindcss.com/docs/customizing-colors#default-color-palette).
- `PUBLIC_APP_DATA_SHARING` Can be set to 1 to add a toggle in the user settings that lets your users opt-in to data sharing with models creator.
- `PUBLIC_APP_DISCLAIMER` If set to 1, we show a disclaimer about generated outputs on login.
| chat-ui/docs/source/configuration/theming.md/0 | {
"file_path": "chat-ui/docs/source/configuration/theming.md",
"repo_id": "chat-ui",
"token_count": 286
} |
<script lang="ts">
import { onMount, createEventDispatcher } from "svelte";
const dispatch = createEventDispatcher();
let loader: HTMLDivElement | undefined = $state();
let observer: IntersectionObserver;
let intervalId: ReturnType<typeof setInterval> | undefined;
onMount(() => {
if (!loader) {
return;
}
observer = new IntersectionObserver((entries) => {
entries.forEach((entry) => {
if (entry.isIntersecting) {
// Clear any existing interval
if (intervalId) {
clearInterval(intervalId);
}
// Start new interval that dispatches every 250ms
intervalId = setInterval(() => {
dispatch("visible");
}, 250);
} else {
// Clear interval when not intersecting
if (intervalId) {
clearInterval(intervalId);
intervalId = undefined;
}
}
});
});
observer.observe(loader);
return () => {
observer.disconnect();
if (intervalId) {
clearInterval(intervalId);
}
};
});
</script>
<div bind:this={loader} class="flex animate-pulse flex-col gap-4">
<div class="ml-2 h-5 w-4/5 gap-5 rounded bg-gray-200 dark:bg-gray-700"></div>
<div class="ml-2 h-5 w-4/5 gap-5 rounded bg-gray-200 dark:bg-gray-700"></div>
<div class="ml-2 h-5 w-4/5 gap-5 rounded bg-gray-200 dark:bg-gray-700"></div>
</div>
| chat-ui/src/lib/components/InfiniteScroll.svelte/0 | {
"file_path": "chat-ui/src/lib/components/InfiniteScroll.svelte",
"repo_id": "chat-ui",
"token_count": 543
} |
<script lang="ts">
import Modal from "./Modal.svelte";
import CarbonClose from "~icons/carbon/close";
import CarbonBlockchain from "~icons/carbon/blockchain";
interface Props {
preprompt: string;
}
let { preprompt }: Props = $props();
let isOpen = $state(false);
</script>
<button
type="button"
class="mx-auto flex items-center gap-1.5 rounded-full border border-gray-100 bg-gray-50 px-3 py-1 text-xs text-gray-500 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700"
onclick={() => (isOpen = !isOpen)}
onkeydown={(e) => e.key === "Enter" && (isOpen = !isOpen)}
>
<CarbonBlockchain class="text-xxs" /> Using Custom System Prompt
</button>
{#if isOpen}
<Modal on:close={() => (isOpen = false)} width="w-full max-w-2xl">
<div class="flex w-full flex-col gap-5 p-6">
<div class="flex items-start justify-between text-xl font-semibold text-gray-800">
<h2>System Prompt</h2>
<button type="button" class="group" onclick={() => (isOpen = false)}>
<CarbonClose class="mt-auto text-gray-900 group-hover:text-gray-500" />
</button>
</div>
<textarea
disabled
value={preprompt}
class="min-h-[420px] w-full resize-none rounded-lg border bg-gray-50 p-2.5 text-gray-600 max-sm:text-sm"
></textarea>
</div>
</Modal>
{/if}
| chat-ui/src/lib/components/SystemPromptModal.svelte/0 | {
"file_path": "chat-ui/src/lib/components/SystemPromptModal.svelte",
"repo_id": "chat-ui",
"token_count": 535
} |
<script lang="ts">
import type { WebSearchSource } from "$lib/types/WebSearch";
import katex from "katex";
import "katex/dist/contrib/mhchem.mjs";
import DOMPurify from "isomorphic-dompurify";
import { Marked } from "marked";
import type { Tokens, TokenizerExtension, RendererExtension } from "marked";
import CodeBlock from "../CodeBlock.svelte";
interface Props {
content: string;
sources?: WebSearchSource[];
}
let { content, sources = [] }: Props = $props();
interface katexBlockToken extends Tokens.Generic {
type: "katexBlock";
raw: string;
text: string;
displayMode: true;
}
interface katexInlineToken extends Tokens.Generic {
type: "katexInline";
raw: string;
text: string;
displayMode: false;
}
export const katexBlockExtension: TokenizerExtension & RendererExtension = {
name: "katexBlock",
level: "block",
start(src: string): number | undefined {
const match = src.match(/(\${2}|\\\[)/);
return match ? match.index : -1;
},
tokenizer(src: string): katexBlockToken | undefined {
// 1) $$ ... $$
const rule1 = /^\${2}([\s\S]+?)\${2}/;
const match1 = rule1.exec(src);
if (match1) {
const token: katexBlockToken = {
type: "katexBlock",
raw: match1[0],
text: match1[1].trim(),
displayMode: true,
};
return token;
}
// 2) \[ ... \]
const rule2 = /^\\\[([\s\S]+?)\\\]/;
const match2 = rule2.exec(src);
if (match2) {
const token: katexBlockToken = {
type: "katexBlock",
raw: match2[0],
text: match2[1].trim(),
displayMode: true,
};
return token;
}
return undefined;
},
renderer(token) {
if (token.type === "katexBlock") {
return katex.renderToString(token.text, {
throwOnError: false,
displayMode: token.displayMode,
});
}
return undefined;
},
};
const katexInlineExtension: TokenizerExtension & RendererExtension = {
name: "katexInline",
level: "inline",
start(src: string): number | undefined {
const match = src.match(/(\$|\\\()/);
return match ? match.index : -1;
},
tokenizer(src: string): katexInlineToken | undefined {
// 1) $...$
const rule1 = /^\$([^$]+?)\$/;
const match1 = rule1.exec(src);
if (match1) {
const token: katexInlineToken = {
type: "katexInline",
raw: match1[0],
text: match1[1].trim(),
displayMode: false,
};
return token;
}
// 2) \(...\)
const rule2 = /^\\\(([\s\S]+?)\\\)/;
const match2 = rule2.exec(src);
if (match2) {
const token: katexInlineToken = {
type: "katexInline",
raw: match2[0],
text: match2[1].trim(),
displayMode: false,
};
return token;
}
return undefined;
},
renderer(token) {
if (token.type === "katexInline") {
return katex.renderToString(token.text, {
throwOnError: false,
displayMode: token.displayMode,
});
}
return undefined;
},
};
function escapeHTML(content: string) {
return content.replace(
/[<>&"']/g,
(x) =>
({
"<": "<",
">": ">",
"&": "&",
"'": "'",
'"': """,
})[x] || x
);
}
function addInlineCitations(md: string, webSearchSources: WebSearchSource[] = []): string {
const linkStyle =
"color: rgb(59, 130, 246); text-decoration: none; hover:text-decoration: underline;";
return md.replace(/\[(\d+)\]/g, (match: string) => {
const indices: number[] = (match.match(/\d+/g) || []).map(Number);
const links: string = indices
.map((index: number) => {
if (index === 0) return false;
const source = webSearchSources[index - 1];
if (source) {
return `<a href="${source.link}" target="_blank" rel="noreferrer" style="${linkStyle}">${index}</a>`;
}
return "";
})
.filter(Boolean)
.join(", ");
return links ? ` <sup>${links}</sup>` : match;
});
}
const marked = new Marked({
hooks: {
postprocess: (html) => DOMPurify.sanitize(addInlineCitations(html, sources)),
},
extensions: [katexBlockExtension, katexInlineExtension],
renderer: {
link: (href, title, text) =>
`<a href="${href?.replace(/>$/, "")}" target="_blank" rel="noreferrer">${text}</a>`,
html: (html) => escapeHTML(html),
},
gfm: true,
});
DOMPurify.addHook("afterSanitizeAttributes", (node) => {
if (node.tagName === "A") {
node.setAttribute("rel", "noreferrer");
node.setAttribute("target", "_blank");
}
});
</script>
{#each marked.lexer(content) as token}
{#if token.type === "code"}
<CodeBlock lang={token.lang} code={token.text} />
{:else}
{#await marked.parse(token.raw) then parsed}
<!-- eslint-disable-next-line svelte/no-at-html-tags -->
{@html parsed}
{/await}
{/if}
{/each}
| chat-ui/src/lib/components/chat/MarkdownRenderer.svelte/0 | {
"file_path": "chat-ui/src/lib/components/chat/MarkdownRenderer.svelte",
"repo_id": "chat-ui",
"token_count": 2103
} |
<script lang="ts">
import { page } from "$app/state";
import { env as envPublic } from "$env/dynamic/public";
import { base } from "$app/paths";
interface Props {
classNames?: string;
}
let { classNames = "" }: Props = $props();
</script>
{#if envPublic.PUBLIC_APP_ASSETS === "chatui"}
<svg
height="30"
width="30"
viewBox="0 0 30 30"
xmlns="http://www.w3.org/2000/svg"
class={classNames}
>
<path
d="M4.06151 14.1464C4.06151 11.8818 4.9611 9.71004 6.56237 8.10877C8.16364 6.5075 10.3354 5.60791 12.6 5.60791H16.5231C18.6254 5.60791 20.6416 6.44307 22.1282 7.92965C23.6148 9.41624 24.45 11.4325 24.45 13.5348C24.45 15.6372 23.6148 17.6534 22.1282 19.14C20.6416 20.6266 18.6254 21.4618 16.5231 21.4618H7.08459L4.63844 23.8387C4.59547 23.8942 4.53557 23.9343 4.4678 23.9527C4.40004 23.9712 4.32811 23.9671 4.2629 23.941C4.1977 23.9149 4.14276 23.8683 4.10643 23.8082C4.07009 23.7481 4.05432 23.6778 4.06151 23.6079V14.1464Z"
class="fill-primary-500"
/>
</svg>
{:else}
<img
class={classNames}
alt="{envPublic.PUBLIC_APP_NAME} logo"
src="{envPublic.PUBLIC_ORIGIN || page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/logo.svg"
/>
{/if}
| chat-ui/src/lib/components/icons/Logo.svelte/0 | {
"file_path": "chat-ui/src/lib/components/icons/Logo.svelte",
"repo_id": "chat-ui",
"token_count": 574
} |
import type { Migration } from ".";
import { collections } from "$lib/server/database";
import { ObjectId } from "mongodb";
const resetTools: Migration = {
_id: new ObjectId("000000000000000000000007"),
name: "Reset tools to empty",
up: async () => {
const { settings } = collections;
await settings.updateMany({}, { $set: { tools: [] } });
return true;
},
runEveryTime: false,
};
export default resetTools;
| chat-ui/src/lib/migrations/routines/07-reset-tools-in-settings.ts/0 | {
"file_path": "chat-ui/src/lib/migrations/routines/07-reset-tools-in-settings.ts",
"repo_id": "chat-ui",
"token_count": 133
} |
import { makeImageProcessor, type ImageProcessorOptions } from "../images";
import { makeDocumentProcessor, type FileProcessorOptions } from "../document";
import type { EndpointMessage } from "../endpoints";
import type { MessageFile } from "$lib/types/Message";
import type {
BetaImageBlockParam,
BetaMessageParam,
BetaBase64PDFBlock,
} from "@anthropic-ai/sdk/resources/beta/messages/messages.mjs";
import type { ToolResult } from "$lib/types/Tool";
import { downloadFile } from "$lib/server/files/downloadFile";
import type { ObjectId } from "mongodb";
export async function fileToImageBlock(
file: MessageFile,
opts: ImageProcessorOptions<"image/png" | "image/jpeg" | "image/webp">
): Promise<BetaImageBlockParam> {
const processor = makeImageProcessor(opts);
const { image, mime } = await processor(file);
return {
type: "image",
source: {
type: "base64",
media_type: mime,
data: image.toString("base64"),
},
};
}
export async function fileToDocumentBlock(
file: MessageFile,
opts: FileProcessorOptions<"application/pdf">
): Promise<BetaBase64PDFBlock> {
const processor = makeDocumentProcessor(opts);
const { file: document, mime } = await processor(file);
return {
type: "document",
source: {
type: "base64",
media_type: mime,
data: document.toString("base64"),
},
};
}
type NonSystemMessage = EndpointMessage & { from: "user" | "assistant" };
export async function endpointMessagesToAnthropicMessages(
messages: EndpointMessage[],
multimodal: {
image: ImageProcessorOptions<"image/png" | "image/jpeg" | "image/webp">;
document?: FileProcessorOptions<"application/pdf">;
},
conversationId?: ObjectId | undefined
): Promise<BetaMessageParam[]> {
return await Promise.all(
messages
.filter((message): message is NonSystemMessage => message.from !== "system")
.map<Promise<BetaMessageParam>>(async (message) => {
return {
role: message.from,
content: [
...(message.from === "user"
? await Promise.all(
(message.files ?? []).map(async (file) => {
if (file.type === "hash" && conversationId) {
file = await downloadFile(file.value, conversationId);
}
if (file.mime.startsWith("image/")) {
return fileToImageBlock(file, multimodal.image);
} else if (file.mime === "application/pdf" && multimodal.document) {
return fileToDocumentBlock(file, multimodal.document);
} else {
throw new Error(`Unsupported file type: ${file.mime}`);
}
})
)
: []),
{ type: "text", text: message.content },
],
};
})
);
}
export function addToolResults(
messages: BetaMessageParam[],
toolResults: ToolResult[]
): BetaMessageParam[] {
const id = crypto.randomUUID();
if (toolResults.length === 0) {
return messages;
}
return [
...messages,
{
role: "assistant",
content: toolResults.map((result, index) => ({
type: "tool_use",
id: `tool_${index}_${id}`,
name: result.call.name,
input: result.call.parameters,
})),
},
{
role: "user",
content: toolResults.map((result, index) => ({
type: "tool_result",
tool_use_id: `tool_${index}_${id}`,
is_error: result.status === "error",
content: JSON.stringify(
result.status === "error" ? result.message : "outputs" in result ? result.outputs : ""
),
})),
},
];
}
| chat-ui/src/lib/server/endpoints/anthropic/utils.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/anthropic/utils.ts",
"repo_id": "chat-ui",
"token_count": 1331
} |
import type { Message } from "$lib/types/Message";
import { format } from "date-fns";
import type { EndpointMessage } from "./endpoints";
import { downloadFile } from "../files/downloadFile";
import type { ObjectId } from "mongodb";
export async function preprocessMessages(
messages: Message[],
webSearch: Message["webSearch"],
convId: ObjectId
): Promise<EndpointMessage[]> {
return Promise.resolve(messages)
.then((msgs) => addWebSearchContext(msgs, webSearch))
.then((msgs) => downloadFiles(msgs, convId))
.then((msgs) => injectClipboardFiles(msgs));
}
function addWebSearchContext(messages: Message[], webSearch: Message["webSearch"]) {
const webSearchContext = webSearch?.contextSources
.map(({ context }, idx) => `Source [${idx + 1}]\n${context.trim()}`)
.join("\n\n----------\n\n");
// No web search context available, skip
if (!webSearch || !webSearchContext?.trim()) return messages;
// No messages available, skip
if (messages.length === 0) return messages;
const lastQuestion = messages.findLast((el) => el.from === "user")?.content ?? "";
const previousQuestions = messages
.filter((el) => el.from === "user")
.slice(0, -1)
.map((el) => el.content);
const currentDate = format(new Date(), "MMMM d, yyyy");
const finalMessage = {
...messages[messages.length - 1],
content: `I searched the web using the query: ${webSearch.searchQuery}.
Today is ${currentDate} and here are the results.
When answering the question, you must reference the sources you used inline by wrapping the index in brackets like this: [1]. If multiple sources are used, you must reference each one of them without commas like this: [1][2][3].
=====================
${webSearchContext}
=====================
${previousQuestions.length > 0 ? `Previous questions: \n- ${previousQuestions.join("\n- ")}` : ""}
Answer the question: ${lastQuestion}`,
};
return [...messages.slice(0, -1), finalMessage];
}
async function downloadFiles(messages: Message[], convId: ObjectId): Promise<EndpointMessage[]> {
return Promise.all(
messages.map<Promise<EndpointMessage>>((message) =>
Promise.all((message.files ?? []).map((file) => downloadFile(file.value, convId))).then(
(files) => ({ ...message, files })
)
)
);
}
async function injectClipboardFiles(messages: EndpointMessage[]) {
return Promise.all(
messages.map((message) => {
const plaintextFiles = message.files
?.filter((file) => file.mime === "application/vnd.chatui.clipboard")
.map((file) => Buffer.from(file.value, "base64").toString("utf-8"));
if (!plaintextFiles || plaintextFiles.length === 0) return message;
return {
...message,
content: `${plaintextFiles.join("\n\n")}\n\n${message.content}`,
files: message.files?.filter((file) => file.mime !== "application/vnd.chatui.clipboard"),
};
})
);
}
| chat-ui/src/lib/server/endpoints/preprocessMessages.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/preprocessMessages.ts",
"repo_id": "chat-ui",
"token_count": 944
} |
import { Address6, Address4 } from "ip-address";
import dns from "node:dns";
const dnsLookup = (hostname: string): Promise<{ address: string; family: number }> => {
return new Promise((resolve, reject) => {
dns.lookup(hostname, (err, address, family) => {
if (err) return reject(err);
resolve({ address, family });
});
});
};
export async function isURLLocal(URL: URL): Promise<boolean> {
const { address, family } = await dnsLookup(URL.hostname);
if (family === 4) {
const addr = new Address4(address);
const localSubnet = new Address4("127.0.0.0/8");
return addr.isInSubnet(localSubnet);
}
if (family === 6) {
const addr = new Address6(address);
return addr.isLoopback() || addr.isInSubnet(new Address6("::1/128")) || addr.isLinkLocal();
}
throw Error("Unknown IP family");
}
export function isURLStringLocal(url: string) {
try {
const urlObj = new URL(url);
return isURLLocal(urlObj);
} catch (e) {
// assume local if URL parsing fails
return true;
}
}
| chat-ui/src/lib/server/isURLLocal.ts/0 | {
"file_path": "chat-ui/src/lib/server/isURLLocal.ts",
"repo_id": "chat-ui",
"token_count": 363
} |
import type { ToolIOType, ToolOutputComponents } from "$lib/types/Tool";
export const ToolOutputPaths: Record<
ToolOutputComponents,
{
type: ToolIOType;
path: string;
}
> = {
textbox: {
type: "str",
path: "$",
},
markdown: {
type: "str",
path: "$",
},
number: {
type: "float",
path: "$",
},
image: {
type: "file",
path: "$.url",
},
gallery: {
type: "file",
path: "$[*].image.url",
},
audio: {
type: "file",
path: "$.url",
},
video: {
type: "file",
path: "$.video.url",
},
file: {
type: "file",
path: "$.url",
},
json: {
type: "str",
path: "$",
},
};
export const isValidOutputComponent = (
outputComponent: string
): outputComponent is keyof typeof ToolOutputPaths => {
return Object.keys(ToolOutputPaths).includes(outputComponent);
};
| chat-ui/src/lib/server/tools/outputs.ts/0 | {
"file_path": "chat-ui/src/lib/server/tools/outputs.ts",
"repo_id": "chat-ui",
"token_count": 344
} |
import {
chromium,
devices,
type Page,
type BrowserContextOptions,
type Response,
type Browser,
} from "playwright";
import { PlaywrightBlocker } from "@cliqz/adblocker-playwright";
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
import { onExit } from "$lib/server/exitHandler";
const blocker =
env.PLAYWRIGHT_ADBLOCKER === "true"
? await PlaywrightBlocker.fromPrebuiltAdsAndTracking(fetch)
.then((blker) => {
const mostBlocked = blker.blockFonts().blockMedias().blockFrames().blockImages();
if (env.WEBSEARCH_JAVASCRIPT === "false") return mostBlocked.blockScripts();
return mostBlocked;
})
.catch((err) => {
logger.error(err, "Failed to initialize PlaywrightBlocker from prebuilt lists");
return PlaywrightBlocker.empty();
})
: PlaywrightBlocker.empty();
let browserSingleton: Promise<Browser> | undefined;
async function getBrowser() {
const browser = await chromium.launch({ headless: true });
onExit(() => browser.close());
browser.on("disconnected", () => {
logger.warn("Browser closed");
browserSingleton = undefined;
});
return browser;
}
async function getPlaywrightCtx() {
if (!browserSingleton) browserSingleton = getBrowser();
const browser = await browserSingleton;
const device = devices["Desktop Chrome"];
const options: BrowserContextOptions = {
...device,
// Increasing width improves spatial clustering accuracy
screen: {
width: 3840,
height: 1080,
},
viewport: {
width: 3840,
height: 1080,
},
reducedMotion: "reduce",
acceptDownloads: false,
timezoneId: "America/New_York",
locale: "en-US",
};
return browser.newContext(options);
}
export async function withPage<T>(
url: string,
callback: (page: Page, response?: Response) => Promise<T>
): Promise<T> {
const ctx = await getPlaywrightCtx();
try {
const page = await ctx.newPage();
if (env.PLAYWRIGHT_ADBLOCKER === "true") {
await blocker.enableBlockingInPage(page);
}
await page.route("**", (route, request) => {
const requestUrl = request.url();
if (!requestUrl.startsWith("https://")) {
logger.warn(`Blocked request to: ${requestUrl}`);
return route.abort();
}
return route.continue();
});
const res = await page
.goto(url, { waitUntil: "load", timeout: parseInt(env.WEBSEARCH_TIMEOUT) })
.catch(() => {
console.warn(
`Failed to load page within ${parseInt(env.WEBSEARCH_TIMEOUT) / 1000}s: ${url}`
);
});
return await callback(page, res ?? undefined);
} finally {
await ctx.close();
}
}
| chat-ui/src/lib/server/websearch/scrape/playwright.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/scrape/playwright.ts",
"repo_id": "chat-ui",
"token_count": 939
} |
import type { WebSearchSource } from "$lib/types/WebSearch";
import type { ToolCall, ToolResult } from "$lib/types/Tool";
export type MessageUpdate =
| MessageStatusUpdate
| MessageTitleUpdate
| MessageToolUpdate
| MessageWebSearchUpdate
| MessageStreamUpdate
| MessageFileUpdate
| MessageFinalAnswerUpdate
| MessageReasoningUpdate;
export enum MessageUpdateType {
Status = "status",
Title = "title",
Tool = "tool",
WebSearch = "webSearch",
Stream = "stream",
File = "file",
FinalAnswer = "finalAnswer",
Reasoning = "reasoning",
}
// Status
export enum MessageUpdateStatus {
Started = "started",
Error = "error",
Finished = "finished",
KeepAlive = "keepAlive",
}
export interface MessageStatusUpdate {
type: MessageUpdateType.Status;
status: MessageUpdateStatus;
message?: string;
}
// Web search
export enum MessageWebSearchUpdateType {
Update = "update",
Error = "error",
Sources = "sources",
Finished = "finished",
}
export interface BaseMessageWebSearchUpdate<TSubType extends MessageWebSearchUpdateType> {
type: MessageUpdateType.WebSearch;
subtype: TSubType;
}
export interface MessageWebSearchErrorUpdate
extends BaseMessageWebSearchUpdate<MessageWebSearchUpdateType.Error> {
message: string;
args?: string[];
}
export interface MessageWebSearchGeneralUpdate
extends BaseMessageWebSearchUpdate<MessageWebSearchUpdateType.Update> {
message: string;
args?: string[];
}
export interface MessageWebSearchSourcesUpdate
extends BaseMessageWebSearchUpdate<MessageWebSearchUpdateType.Sources> {
message: string;
sources: WebSearchSource[];
}
export type MessageWebSearchFinishedUpdate =
BaseMessageWebSearchUpdate<MessageWebSearchUpdateType.Finished>;
export type MessageWebSearchUpdate =
| MessageWebSearchErrorUpdate
| MessageWebSearchGeneralUpdate
| MessageWebSearchSourcesUpdate
| MessageWebSearchFinishedUpdate;
// Tool
export enum MessageToolUpdateType {
/** A request to call a tool alongside it's parameters */
Call = "call",
/** The result of a tool call */
Result = "result",
/** Error while running tool */
Error = "error",
/** ETA update */
ETA = "eta",
}
interface MessageToolBaseUpdate<TSubType extends MessageToolUpdateType> {
type: MessageUpdateType.Tool;
subtype: TSubType;
uuid: string;
}
export interface MessageToolCallUpdate extends MessageToolBaseUpdate<MessageToolUpdateType.Call> {
call: ToolCall;
}
export interface MessageToolResultUpdate
extends MessageToolBaseUpdate<MessageToolUpdateType.Result> {
result: ToolResult;
}
export interface MessageToolErrorUpdate extends MessageToolBaseUpdate<MessageToolUpdateType.Error> {
message: string;
}
export interface MessageToolETAUpdate extends MessageToolBaseUpdate<MessageToolUpdateType.ETA> {
eta: number;
}
export type MessageToolUpdate =
| MessageToolCallUpdate
| MessageToolResultUpdate
| MessageToolErrorUpdate
| MessageToolETAUpdate;
// Everything else
export interface MessageTitleUpdate {
type: MessageUpdateType.Title;
title: string;
}
export interface MessageStreamUpdate {
type: MessageUpdateType.Stream;
token: string;
}
export enum MessageReasoningUpdateType {
Stream = "stream",
Status = "status",
}
export type MessageReasoningUpdate = MessageReasoningStreamUpdate | MessageReasoningStatusUpdate;
export interface MessageReasoningStreamUpdate {
type: MessageUpdateType.Reasoning;
subtype: MessageReasoningUpdateType.Stream;
token: string;
}
export interface MessageReasoningStatusUpdate {
type: MessageUpdateType.Reasoning;
subtype: MessageReasoningUpdateType.Status;
status: string;
}
export interface MessageFileUpdate {
type: MessageUpdateType.File;
name: string;
sha: string;
mime: string;
}
export interface MessageFinalAnswerUpdate {
type: MessageUpdateType.FinalAnswer;
text: string;
interrupted: boolean;
webSources?: { uri: string; title: string }[];
}
| chat-ui/src/lib/types/MessageUpdate.ts/0 | {
"file_path": "chat-ui/src/lib/types/MessageUpdate.ts",
"repo_id": "chat-ui",
"token_count": 1093
} |
/**
* Chunk array into arrays of length at most `chunkSize`
*
* @param chunkSize must be greater than or equal to 1
*/
export function chunk<T extends unknown[] | string>(arr: T, chunkSize: number): T[] {
if (isNaN(chunkSize) || chunkSize < 1) {
throw new RangeError("Invalid chunk size: " + chunkSize);
}
if (!arr.length) {
return [];
}
/// Small optimization to not chunk buffers unless needed
if (arr.length <= chunkSize) {
return [arr];
}
return range(Math.ceil(arr.length / chunkSize)).map((i) => {
return arr.slice(i * chunkSize, (i + 1) * chunkSize);
}) as T[];
}
function range(n: number, b?: number): number[] {
return b
? Array(b - n)
.fill(0)
.map((_, i) => n + i)
: Array(n)
.fill(0)
.map((_, i) => i);
}
| chat-ui/src/lib/utils/chunk.ts/0 | {
"file_path": "chat-ui/src/lib/utils/chunk.ts",
"repo_id": "chat-ui",
"token_count": 295
} |
import type { MessageFile } from "$lib/types/Message";
import {
type MessageUpdate,
type MessageStreamUpdate,
type MessageToolCallUpdate,
MessageToolUpdateType,
MessageUpdateType,
type MessageToolUpdate,
type MessageWebSearchUpdate,
type MessageWebSearchGeneralUpdate,
type MessageWebSearchSourcesUpdate,
type MessageWebSearchErrorUpdate,
MessageWebSearchUpdateType,
type MessageToolErrorUpdate,
type MessageToolResultUpdate,
} from "$lib/types/MessageUpdate";
import { env as envPublic } from "$env/dynamic/public";
export const isMessageWebSearchUpdate = (update: MessageUpdate): update is MessageWebSearchUpdate =>
update.type === MessageUpdateType.WebSearch;
export const isMessageWebSearchGeneralUpdate = (
update: MessageUpdate
): update is MessageWebSearchGeneralUpdate =>
isMessageWebSearchUpdate(update) && update.subtype === MessageWebSearchUpdateType.Update;
export const isMessageWebSearchSourcesUpdate = (
update: MessageUpdate
): update is MessageWebSearchSourcesUpdate =>
isMessageWebSearchUpdate(update) && update.subtype === MessageWebSearchUpdateType.Sources;
export const isMessageWebSearchErrorUpdate = (
update: MessageUpdate
): update is MessageWebSearchErrorUpdate =>
isMessageWebSearchUpdate(update) && update.subtype === MessageWebSearchUpdateType.Error;
export const isMessageToolUpdate = (update: MessageUpdate): update is MessageToolUpdate =>
update.type === MessageUpdateType.Tool;
export const isMessageToolCallUpdate = (update: MessageUpdate): update is MessageToolCallUpdate =>
isMessageToolUpdate(update) && update.subtype === MessageToolUpdateType.Call;
export const isMessageToolResultUpdate = (
update: MessageUpdate
): update is MessageToolResultUpdate =>
isMessageToolUpdate(update) && update.subtype === MessageToolUpdateType.Result;
export const isMessageToolErrorUpdate = (update: MessageUpdate): update is MessageToolErrorUpdate =>
isMessageToolUpdate(update) && update.subtype === MessageToolUpdateType.Error;
type MessageUpdateRequestOptions = {
base: string;
inputs?: string;
messageId?: string;
isRetry: boolean;
isContinue: boolean;
webSearch: boolean;
tools?: Array<string>;
files?: MessageFile[];
};
export async function fetchMessageUpdates(
conversationId: string,
opts: MessageUpdateRequestOptions,
abortSignal: AbortSignal
): Promise<AsyncGenerator<MessageUpdate>> {
const abortController = new AbortController();
abortSignal.addEventListener("abort", () => abortController.abort());
const form = new FormData();
const optsJSON = JSON.stringify({
inputs: opts.inputs,
id: opts.messageId,
is_retry: opts.isRetry,
is_continue: opts.isContinue,
web_search: opts.webSearch,
tools: opts.tools,
});
opts.files?.forEach((file) => {
const name = file.type + ";" + file.name;
form.append("files", new File([file.value], name, { type: file.mime }));
});
form.append("data", optsJSON);
const response = await fetch(`${opts.base}/conversation/${conversationId}`, {
method: "POST",
body: form,
signal: abortController.signal,
});
if (!response.ok) {
const errorMessage = await response
.json()
.then((obj) => obj.message)
.catch(() => `Request failed with status code ${response.status}: ${response.statusText}`);
throw Error(errorMessage);
}
if (!response.body) {
throw Error("Body not defined");
}
if (!(envPublic.PUBLIC_SMOOTH_UPDATES === "true")) {
return endpointStreamToIterator(response, abortController);
}
return smoothAsyncIterator(
streamMessageUpdatesToFullWords(endpointStreamToIterator(response, abortController))
);
}
async function* endpointStreamToIterator(
response: Response,
abortController: AbortController
): AsyncGenerator<MessageUpdate> {
const reader = response.body?.pipeThrough(new TextDecoderStream()).getReader();
if (!reader) throw Error("Response for endpoint had no body");
// Handle any cases where we must abort
reader.closed.then(() => abortController.abort());
// Handle logic for aborting
abortController.signal.addEventListener("abort", () => reader.cancel());
// ex) If the last response is => {"type": "stream", "token":
// It should be => {"type": "stream", "token": "Hello"} = prev_input_chunk + "Hello"}
let prevChunk = "";
while (!abortController.signal.aborted) {
const { done, value } = await reader.read();
if (done) {
abortController.abort();
break;
}
if (!value) continue;
const { messageUpdates, remainingText } = parseMessageUpdates(prevChunk + value);
prevChunk = remainingText;
for (const messageUpdate of messageUpdates) yield messageUpdate;
}
}
function parseMessageUpdates(value: string): {
messageUpdates: MessageUpdate[];
remainingText: string;
} {
const inputs = value.split("\n");
const messageUpdates: MessageUpdate[] = [];
for (const input of inputs) {
try {
messageUpdates.push(JSON.parse(input) as MessageUpdate);
} catch (error) {
// in case of parsing error, we return what we were able to parse
if (error instanceof SyntaxError) {
return {
messageUpdates,
remainingText: inputs.at(-1) ?? "",
};
}
}
}
return { messageUpdates, remainingText: "" };
}
/**
* Emits all the message updates immediately that aren't "stream" type
* Emits a concatenated "stream" type message update once it detects a full word
* Example: "what" " don" "'t" => "what" " don't"
* Only supports latin languages, ignores others
*/
async function* streamMessageUpdatesToFullWords(
iterator: AsyncGenerator<MessageUpdate>
): AsyncGenerator<MessageUpdate> {
let bufferedStreamUpdates: MessageStreamUpdate[] = [];
const endAlphanumeric = /[a-zA-Z0-9À-ž'`]+$/;
const beginnningAlphanumeric = /^[a-zA-Z0-9À-ž'`]+/;
for await (const messageUpdate of iterator) {
if (messageUpdate.type !== "stream") {
yield messageUpdate;
continue;
}
bufferedStreamUpdates.push(messageUpdate);
let lastIndexEmitted = 0;
for (let i = 1; i < bufferedStreamUpdates.length; i++) {
const prevEndsAlphanumeric = endAlphanumeric.test(bufferedStreamUpdates[i - 1].token);
const currBeginsAlphanumeric = beginnningAlphanumeric.test(bufferedStreamUpdates[i].token);
const shouldCombine = prevEndsAlphanumeric && currBeginsAlphanumeric;
const combinedTooMany = i - lastIndexEmitted >= 5;
if (shouldCombine && !combinedTooMany) continue;
// Combine tokens together and emit
yield {
type: MessageUpdateType.Stream,
token: bufferedStreamUpdates
.slice(lastIndexEmitted, i)
.map((_) => _.token)
.join(""),
};
lastIndexEmitted = i;
}
bufferedStreamUpdates = bufferedStreamUpdates.slice(lastIndexEmitted);
}
for (const messageUpdate of bufferedStreamUpdates) yield messageUpdate;
}
/**
* Attempts to smooth out the time between values emitted by an async iterator
* by waiting for the average time between values to emit the next value
*/
async function* smoothAsyncIterator<T>(iterator: AsyncGenerator<T>): AsyncGenerator<T> {
const eventTarget = new EventTarget();
let done = false;
const valuesBuffer: T[] = [];
const valueTimesMS: number[] = [];
const next = async () => {
const obj = await iterator.next();
if (obj.done) {
done = true;
} else {
valuesBuffer.push(obj.value);
valueTimesMS.push(performance.now());
next();
}
eventTarget.dispatchEvent(new Event("next"));
};
next();
let timeOfLastEmitMS = performance.now();
while (!done || valuesBuffer.length > 0) {
// Only consider the last X times between tokens
const sampledTimesMS = valueTimesMS.slice(-30);
// Get the total time spent in abnormal periods
const anomalyThresholdMS = 2000;
const anomalyDurationMS = sampledTimesMS
.map((time, i, times) => time - times[i - 1])
.slice(1)
.filter((time) => time > anomalyThresholdMS)
.reduce((a, b) => a + b, 0);
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const totalTimeMSBetweenValues = sampledTimesMS.at(-1)! - sampledTimesMS[0];
const timeMSBetweenValues = totalTimeMSBetweenValues - anomalyDurationMS;
const averageTimeMSBetweenValues = Math.min(
200,
timeMSBetweenValues / (sampledTimesMS.length - 1)
);
const timeSinceLastEmitMS = performance.now() - timeOfLastEmitMS;
// Emit after waiting duration or cancel if "next" event is emitted
const gotNext = await Promise.race([
sleep(Math.max(5, averageTimeMSBetweenValues - timeSinceLastEmitMS)),
waitForEvent(eventTarget, "next"),
]);
// Go to next iteration so we can re-calculate when to emit
if (gotNext) continue;
// Nothing in buffer to emit
if (valuesBuffer.length === 0) continue;
// Emit
timeOfLastEmitMS = performance.now();
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
yield valuesBuffer.shift()!;
}
}
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
const waitForEvent = (eventTarget: EventTarget, eventName: string) =>
new Promise<boolean>((resolve) =>
eventTarget.addEventListener(eventName, () => resolve(true), { once: true })
);
| chat-ui/src/lib/utils/messageUpdates.ts/0 | {
"file_path": "chat-ui/src/lib/utils/messageUpdates.ts",
"repo_id": "chat-ui",
"token_count": 2914
} |
import { collections } from "$lib/server/database";
import { ObjectId } from "mongodb";
import { describe, expect, it } from "vitest";
import { insertLegacyConversation, insertSideBranchesConversation } from "./treeHelpers.spec";
import type { Message } from "$lib/types/Message";
import { addSibling } from "./addSibling";
const newMessage: Omit<Message, "id"> = {
content: "new message",
from: "user",
};
Object.freeze(newMessage);
describe("addSibling", async () => {
it("should fail on empty conversations", () => {
const conv = {
_id: new ObjectId(),
rootMessageId: undefined,
messages: [],
};
expect(() => addSibling(conv, newMessage, "not-a-real-id-test")).toThrow(
"Cannot add a sibling to an empty conversation"
);
});
it("should fail on legacy conversations", async () => {
const convId = await insertLegacyConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
expect(() => addSibling(conv, newMessage, conv.messages[0].id)).toThrow(
"Cannot add a sibling to a legacy conversation"
);
});
it("should fail if the sibling message doesn't exist", async () => {
const convId = await insertSideBranchesConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
expect(() => addSibling(conv, newMessage, "not-a-real-id-test")).toThrow(
"The sibling message doesn't exist"
);
});
// TODO: This behaviour should be fixed, we do not need to fail on the root message.
it("should fail if the sibling message is the root message", async () => {
const convId = await insertSideBranchesConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
if (!conv.rootMessageId) throw new Error("Root message not found");
expect(() => addSibling(conv, newMessage, conv.rootMessageId as Message["id"])).toThrow(
"The sibling message is the root message, therefore we can't add a sibling"
);
});
it("should add a sibling to a message", async () => {
const convId = await insertSideBranchesConversation();
const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) });
if (!conv) throw new Error("Conversation not found");
// add sibling and check children count for parnets
const nChildren = conv.messages[1].children?.length;
const siblingId = addSibling(conv, newMessage, conv.messages[2].id);
const nChildrenNew = conv.messages[1].children?.length;
if (!nChildren) throw new Error("No children found");
expect(nChildrenNew).toBe(nChildren + 1);
// make sure siblings have the same ancestors
const sibling = conv.messages.find((m) => m.id === siblingId);
expect(sibling?.ancestors).toEqual(conv.messages[2].ancestors);
});
});
| chat-ui/src/lib/utils/tree/addSibling.spec.ts/0 | {
"file_path": "chat-ui/src/lib/utils/tree/addSibling.spec.ts",
"repo_id": "chat-ui",
"token_count": 950
} |
import { collections } from "$lib/server/database";
import type { Assistant } from "$lib/types/Assistant";
import type { User } from "$lib/types/User";
import { generateQueryTokens } from "$lib/utils/searchTokens.js";
import type { Filter } from "mongodb";
import { env } from "$env/dynamic/private";
import { ReviewStatus } from "$lib/types/Review";
const NUM_PER_PAGE = 24;
export async function GET({ url, locals }) {
const modelId = url.searchParams.get("modelId");
const pageIndex = parseInt(url.searchParams.get("p") ?? "0");
const username = url.searchParams.get("user");
const query = url.searchParams.get("q")?.trim() ?? null;
const showUnfeatured = url.searchParams.get("showUnfeatured") === "true";
const createdByCurrentUser = locals.user?.username && locals.user.username === username;
let user: Pick<User, "_id"> | null = null;
if (username) {
user = await collections.users.findOne<Pick<User, "_id">>(
{ username },
{ projection: { _id: 1 } }
);
if (!user) {
return Response.json({ message: `User "${username}" doesn't exist` }, { status: 404 });
}
}
// if we require featured assistants, that we are not on a user page and we are not an admin who wants to see unfeatured assistants, we show featured assistants
let shouldBeFeatured = {};
if (env.REQUIRE_FEATURED_ASSISTANTS === "true" && !(locals.user?.isAdmin && showUnfeatured)) {
if (!user) {
// only show featured assistants on the community page
shouldBeFeatured = { review: ReviewStatus.APPROVED };
} else if (!createdByCurrentUser) {
// on a user page show assistants that have been approved or are pending
shouldBeFeatured = { review: { $in: [ReviewStatus.APPROVED, ReviewStatus.PENDING] } };
}
}
// fetch the top assistants sorted by user count from biggest to smallest, filter out all assistants with only 1 users. filter by model too if modelId is provided
const filter: Filter<Assistant> = {
...(modelId && { modelId }),
...(user && { createdById: user._id }),
...(query && { searchTokens: { $all: generateQueryTokens(query) } }),
...shouldBeFeatured,
};
const assistants = await collections.assistants
.find(filter)
.skip(NUM_PER_PAGE * pageIndex)
.sort({ userCount: -1 })
.limit(NUM_PER_PAGE)
.toArray();
const numTotalItems = await collections.assistants.countDocuments(filter);
return Response.json({
assistants,
selectedModel: modelId ?? "",
numTotalItems,
numItemsPerPage: NUM_PER_PAGE,
query,
});
}
| chat-ui/src/routes/api/assistants/+server.ts/0 | {
"file_path": "chat-ui/src/routes/api/assistants/+server.ts",
"repo_id": "chat-ui",
"token_count": 807
} |
import { collections } from "$lib/server/database";
import { ObjectId } from "mongodb";
import { error } from "@sveltejs/kit";
import { authCondition } from "$lib/server/auth";
import { UrlDependency } from "$lib/types/UrlDependency";
import { convertLegacyConversation } from "$lib/utils/tree/convertLegacyConversation.js";
export const load = async ({ params, depends, locals }) => {
let conversation;
let shared = false;
// if the conver
if (params.id.length === 7) {
// shared link of length 7
conversation = await collections.sharedConversations.findOne({
_id: params.id,
});
shared = true;
if (!conversation) {
error(404, "Conversation not found");
}
} else {
// todo: add validation on params.id
conversation = await collections.conversations.findOne({
_id: new ObjectId(params.id),
...authCondition(locals),
});
depends(UrlDependency.Conversation);
if (!conversation) {
const conversationExists =
(await collections.conversations.countDocuments({
_id: new ObjectId(params.id),
})) !== 0;
if (conversationExists) {
error(
403,
"You don't have access to this conversation. If someone gave you this link, ask them to use the 'share' feature instead."
);
}
error(404, "Conversation not found.");
}
}
const convertedConv = { ...conversation, ...convertLegacyConversation(conversation) };
return {
messages: convertedConv.messages,
title: convertedConv.title,
model: convertedConv.model,
preprompt: convertedConv.preprompt,
rootMessageId: convertedConv.rootMessageId,
assistant: convertedConv.assistantId
? JSON.parse(
JSON.stringify(
await collections.assistants.findOne({
_id: new ObjectId(convertedConv.assistantId),
})
)
)
: null,
shared,
};
};
export const actions = {
deleteBranch: async ({ request, locals, params }) => {
const data = await request.formData();
const messageId = data.get("messageId");
if (!messageId || typeof messageId !== "string") {
error(400, "Invalid message id");
}
const conversation = await collections.conversations.findOne({
...authCondition(locals),
_id: new ObjectId(params.id),
});
if (!conversation) {
error(404, "Conversation not found");
}
const filteredMessages = conversation.messages
.filter(
(message) =>
// not the message AND the message is not in ancestors
!(message.id === messageId) && message.ancestors && !message.ancestors.includes(messageId)
)
.map((message) => {
// remove the message from children if it's there
if (message.children && message.children.includes(messageId)) {
message.children = message.children.filter((child) => child !== messageId);
}
return message;
});
await collections.conversations.updateOne(
{ _id: conversation._id, ...authCondition(locals) },
{ $set: { messages: filteredMessages } }
);
return { from: "deleteBranch", ok: true };
},
};
| chat-ui/src/routes/conversation/[id]/+page.server.ts/0 | {
"file_path": "chat-ui/src/routes/conversation/[id]/+page.server.ts",
"repo_id": "chat-ui",
"token_count": 1084
} |
import { base } from "$app/paths";
import { authCondition } from "$lib/server/auth.js";
import { collections } from "$lib/server/database";
import { models } from "$lib/server/models";
import { redirect } from "@sveltejs/kit";
export async function load({ params, locals, parent }) {
const model = models.find(({ id }) => id === params.model);
const data = await parent();
if (!model || model.unlisted) {
redirect(302, `${base}/`);
}
if (locals.user?._id ?? locals.sessionId) {
await collections.settings.updateOne(
authCondition(locals),
{
$set: {
activeModel: model.id,
updatedAt: new Date(),
},
$setOnInsert: {
createdAt: new Date(),
},
},
{
upsert: true,
}
);
}
return {
settings: {
...data.settings,
activeModel: model.id,
},
};
}
| chat-ui/src/routes/models/[...model]/+page.server.ts/0 | {
"file_path": "chat-ui/src/routes/models/[...model]/+page.server.ts",
"repo_id": "chat-ui",
"token_count": 324
} |
import { base } from "$app/paths";
import { requiresUser } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { fail, type Actions, redirect } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
import { z } from "zod";
import { sha256 } from "$lib/utils/sha256";
import sharp from "sharp";
import { parseStringToList } from "$lib/utils/parseStringToList";
import { generateSearchTokens } from "$lib/utils/searchTokens";
import { toolFromConfigs } from "$lib/server/tools";
const newAsssistantSchema = z.object({
name: z.string().min(1),
modelId: z.string().min(1),
preprompt: z.string().min(1),
description: z.string().optional(),
exampleInput1: z.string().optional(),
exampleInput2: z.string().optional(),
exampleInput3: z.string().optional(),
exampleInput4: z.string().optional(),
avatar: z.union([z.instanceof(File), z.literal("null")]).optional(),
ragLinkList: z.preprocess(parseStringToList, z.string().url().array().max(10)),
ragDomainList: z.preprocess(parseStringToList, z.string().array()),
ragAllowAll: z.preprocess((v) => v === "true", z.boolean()),
dynamicPrompt: z.preprocess((v) => v === "on", z.boolean()),
temperature: z
.union([z.literal(""), z.coerce.number().min(0.1).max(2)])
.transform((v) => (v === "" ? undefined : v)),
top_p: z
.union([z.literal(""), z.coerce.number().min(0.05).max(1)])
.transform((v) => (v === "" ? undefined : v)),
repetition_penalty: z
.union([z.literal(""), z.coerce.number().min(0.1).max(2)])
.transform((v) => (v === "" ? undefined : v)),
top_k: z
.union([z.literal(""), z.coerce.number().min(5).max(100)])
.transform((v) => (v === "" ? undefined : v)),
tools: z
.string()
.optional()
.transform((v) => (v ? v.split(",") : []))
.transform(async (v) => [
...(await collections.tools
.find({ _id: { $in: v.map((toolId) => new ObjectId(toolId)) } })
.project({ _id: 1 })
.toArray()
.then((tools) => tools.map((tool) => tool._id.toString()))),
...toolFromConfigs
.filter((el) => (v ?? []).includes(el._id.toString()))
.map((el) => el._id.toString()),
])
.optional(),
});
const uploadAvatar = async (avatar: File, assistantId: ObjectId): Promise<string> => {
const hash = await sha256(await avatar.text());
const upload = collections.bucket.openUploadStream(`${assistantId.toString()}`, {
metadata: { type: avatar.type, hash },
});
upload.write((await avatar.arrayBuffer()) as unknown as Buffer);
upload.end();
// only return the filename when upload throws a finish event or a 10s time out occurs
return new Promise((resolve, reject) => {
upload.once("finish", () => resolve(hash));
upload.once("error", reject);
setTimeout(() => reject(new Error("Upload timed out")), 10000);
});
};
export const actions: Actions = {
default: async ({ request, locals, params }) => {
const assistant = await collections.assistants.findOne({
_id: new ObjectId(params.assistantId),
});
if (!assistant) {
throw Error("Assistant not found");
}
if (assistant.createdById.toString() !== (locals.user?._id ?? locals.sessionId).toString()) {
throw Error("You are not the author of this assistant");
}
const formData = Object.fromEntries(await request.formData());
const parse = await newAsssistantSchema.safeParseAsync(formData);
if (!parse.success) {
// Loop through the errors array and create a custom errors array
const errors = parse.error.errors.map((error) => {
return {
field: error.path[0],
message: error.message,
};
});
return fail(400, { error: true, errors });
}
// can only create assistants when logged in, IF login is setup
if (!locals.user && requiresUser) {
const errors = [{ field: "preprompt", message: "Must be logged in. Unauthorized" }];
return fail(400, { error: true, errors });
}
const exampleInputs: string[] = [
parse?.data?.exampleInput1 ?? "",
parse?.data?.exampleInput2 ?? "",
parse?.data?.exampleInput3 ?? "",
parse?.data?.exampleInput4 ?? "",
].filter((input) => !!input);
const deleteAvatar = parse.data.avatar === "null";
let hash;
if (parse.data.avatar && parse.data.avatar !== "null" && parse.data.avatar.size > 0) {
let image;
try {
image = await sharp(await parse.data.avatar.arrayBuffer())
.resize(512, 512, { fit: "inside" })
.jpeg({ quality: 80 })
.toBuffer();
} catch (e) {
const errors = [{ field: "avatar", message: (e as Error).message }];
return fail(400, { error: true, errors });
}
const fileCursor = collections.bucket.find({ filename: assistant._id.toString() });
// Step 2: Delete the existing file if it exists
let fileId = await fileCursor.next();
while (fileId) {
await collections.bucket.delete(fileId._id);
fileId = await fileCursor.next();
}
hash = await uploadAvatar(new File([image], "avatar.jpg"), assistant._id);
} else if (deleteAvatar) {
// delete the avatar
const fileCursor = collections.bucket.find({ filename: assistant._id.toString() });
let fileId = await fileCursor.next();
while (fileId) {
await collections.bucket.delete(fileId._id);
fileId = await fileCursor.next();
}
}
const { acknowledged } = await collections.assistants.updateOne(
{
_id: assistant._id,
},
{
$set: {
name: parse.data.name,
description: parse.data.description,
modelId: parse.data.modelId,
preprompt: parse.data.preprompt,
exampleInputs,
avatar: deleteAvatar ? undefined : (hash ?? assistant.avatar),
updatedAt: new Date(),
rag: {
allowedLinks: parse.data.ragLinkList,
allowedDomains: parse.data.ragDomainList,
allowAllDomains: parse.data.ragAllowAll,
},
tools: parse.data.tools,
dynamicPrompt: parse.data.dynamicPrompt,
searchTokens: generateSearchTokens(parse.data.name),
generateSettings: {
temperature: parse.data.temperature,
top_p: parse.data.top_p,
repetition_penalty: parse.data.repetition_penalty,
top_k: parse.data.top_k,
},
},
}
);
if (acknowledged) {
redirect(302, `${base}/settings/assistants/${assistant._id}`);
} else {
throw Error("Update failed");
}
},
};
| chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/edit/+page.server.ts/0 | {
"file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/edit/+page.server.ts",
"repo_id": "chat-ui",
"token_count": 2384
} |
<script lang="ts">
import Modal from "$lib/components/Modal.svelte";
import ToolEdit from "../../ToolEdit.svelte";
let { data, form = $bindable() } = $props();
</script>
<Modal
on:close={() => window.history.back()}
width="h-[95dvh] w-[90dvw] overflow-hidden rounded-2xl bg-white shadow-2xl outline-none sm:h-[85dvh] xl:w-[1200px] 2xl:h-[75dvh]"
>
<ToolEdit
bind:form
tool={data.tool}
readonly={!data.tool.createdByMe}
on:close={() => {
window.history.back();
}}
/>
</Modal>
| chat-ui/src/routes/tools/[toolId]/edit/+page.svelte/0 | {
"file_path": "chat-ui/src/routes/tools/[toolId]/edit/+page.svelte",
"repo_id": "chat-ui",
"token_count": 219
} |
import adapter from "@sveltejs/adapter-node";
import { vitePreprocess } from "@sveltejs/vite-plugin-svelte";
import dotenv from "dotenv";
import { execSync } from "child_process";
dotenv.config({ path: "./.env.local" });
dotenv.config({ path: "./.env" });
function getCurrentCommitSHA() {
try {
return execSync("git rev-parse HEAD").toString();
} catch (error) {
console.error("Error getting current commit SHA:", error);
return "unknown";
}
}
process.env.PUBLIC_VERSION ??= process.env.npm_package_version;
process.env.PUBLIC_COMMIT_SHA ??= getCurrentCommitSHA();
/** @type {import('@sveltejs/kit').Config} */
const config = {
// Consult https://kit.svelte.dev/docs/integrations#preprocessors
// for more information about preprocessors
preprocess: vitePreprocess(),
kit: {
adapter: adapter(),
paths: {
base: process.env.APP_BASE || "",
relative: false,
},
csrf: {
// handled in hooks.server.ts, because we can have multiple valid origins
checkOrigin: false,
},
csp: {
directives: {
...(process.env.ALLOW_IFRAME === "true" ? {} : { "frame-ancestors": ["'none'"] }),
},
},
},
};
export default config;
| chat-ui/svelte.config.js/0 | {
"file_path": "chat-ui/svelte.config.js",
"repo_id": "chat-ui",
"token_count": 427
} |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
SPEED_TEST_N_EXAMPLES = 500_000
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def select(dataset: datasets.Dataset):
_ = dataset.select(range(0, len(dataset), 2))
@get_duration
def sort(dataset: datasets.Dataset):
_ = dataset.sort("numbers")
@get_duration
def shuffle(dataset: datasets.Dataset):
_ = dataset.shuffle()
@get_duration
def train_test_split(dataset: datasets.Dataset):
_ = dataset.train_test_split(0.1)
@get_duration
def shard(dataset: datasets.Dataset, num_shards=10):
for shard_id in range(num_shards):
_ = dataset.shard(num_shards, shard_id)
def benchmark_indices_mapping():
times = {"num examples": SPEED_TEST_N_EXAMPLES}
functions = (select, sort, shuffle, train_test_split, shard)
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset")
features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")})
dataset = generate_example_dataset(
os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES
)
print("Functions")
for func in functions:
print(func.__name__)
times[func.__name__] = func(dataset)
with open(RESULTS_FILE_PATH, "wb") as f:
f.write(json.dumps(times).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_indices_mapping()
| datasets/benchmarks/benchmark_indices_mapping.py/0 | {
"file_path": "datasets/benchmarks/benchmark_indices_mapping.py",
"repo_id": "datasets",
"token_count": 677
} |
# The cache
The cache is one of the reasons why 🤗 Datasets is so efficient. It stores previously downloaded and processed datasets so when you need to use them again, they are reloaded directly from the cache. This avoids having to download a dataset all over again, or reapplying processing functions. Even after you close and start another Python session, 🤗 Datasets will reload your dataset directly from the cache!
## Fingerprint
How does the cache keeps track of what transforms are applied to a dataset? Well, 🤗 Datasets assigns a fingerprint to the cache file. A fingerprint keeps track of the current state of a dataset. The initial fingerprint is computed using a hash from the Arrow table, or a hash of the Arrow files if the dataset is on disk. Subsequent fingerprints are computed by combining the fingerprint of the previous state, and a hash of the latest transform applied.
<Tip>
Transforms are any of the processing methods from the [How-to Process](./process) guides such as [`Dataset.map`] or [`Dataset.shuffle`].
</Tip>
Here are what the actual fingerprints look like:
```py
>>> from datasets import Dataset
>>> dataset1 = Dataset.from_dict({"a": [0, 1, 2]})
>>> dataset2 = dataset1.map(lambda x: {"a": x["a"] + 1})
>>> print(dataset1._fingerprint, dataset2._fingerprint)
d19493523d95e2dc 5b86abacd4b42434
```
In order for a transform to be hashable, it needs to be picklable by [dill](https://dill.readthedocs.io/en/latest/) or [pickle](https://docs.python.org/3/library/pickle).
When you use a non-hashable transform, 🤗 Datasets uses a random fingerprint instead and raises a warning. The non-hashable transform is considered different from the previous transforms. As a result, 🤗 Datasets will recompute all the transforms. Make sure your transforms are serializable with pickle or dill to avoid this!
An example of when 🤗 Datasets recomputes everything is when caching is disabled. When this happens, the cache files are generated every time and they get written to a temporary directory. Once your Python session ends, the cache files in the temporary directory are deleted. A random hash is assigned to these cache files, instead of a fingerprint.
<Tip>
When caching is disabled, use [`Dataset.save_to_disk`] to save your transformed dataset or it will be deleted once the session ends.
</Tip>
## Hashing
The fingerprint of a dataset is updated by hashing the function passed to `map` as well as the `map` parameters (`batch_size`, `remove_columns`, etc.).
You can check the hash of any Python object using the [`fingerprint.Hasher`]:
```py
>>> from datasets.fingerprint import Hasher
>>> my_func = lambda example: {"length": len(example["text"])}
>>> print(Hasher.hash(my_func))
'3d35e2b3e94c81d6'
```
The hash is computed by dumping the object using a `dill` pickler and hashing the dumped bytes.
The pickler recursively dumps all the variables used in your function, so any change you do to an object that is used in your function, will cause the hash to change.
If one of your functions doesn't seem to have the same hash across sessions, it means at least one of its variables contains a Python object that is not deterministic.
When this happens, feel free to hash any object you find suspicious to try to find the object that caused the hash to change.
For example, if you use a list for which the order of its elements is not deterministic across sessions, then the hash won't be the same across sessions either.
| datasets/docs/source/about_cache.mdx/0 | {
"file_path": "datasets/docs/source/about_cache.mdx",
"repo_id": "datasets",
"token_count": 909
} |
# Cloud storage
🤗 Datasets supports access to cloud storage providers through a `fsspec` FileSystem implementations.
You can save and load datasets from any cloud storage in a Pythonic way.
Take a look at the following table for some example of supported cloud storage providers:
| Storage provider | Filesystem implementation |
|----------------------|---------------------------------------------------------------|
| Amazon S3 | [s3fs](https://s3fs.readthedocs.io/en/latest/) |
| Google Cloud Storage | [gcsfs](https://gcsfs.readthedocs.io/en/latest/) |
| Azure Blob/DataLake | [adlfs](https://github.com/fsspec/adlfs) |
| Dropbox | [dropboxdrivefs](https://github.com/MarineChap/dropboxdrivefs)|
| Google Drive | [gdrivefs](https://github.com/intake/gdrivefs) |
| Oracle Cloud Storage | [ocifs](https://ocifs.readthedocs.io/en/latest/) |
This guide will show you how to save and load datasets with any cloud storage.
Here are examples for S3, Google Cloud Storage, Azure Blob Storage, and Oracle Cloud Object Storage.
## Set up your cloud storage FileSystem
### Amazon S3
1. Install the S3 FileSystem implementation:
```
>>> pip install s3fs
```
2. Define your credentials
To use an anonymous connection, use `anon=True`.
Otherwise, include your `aws_access_key_id` and `aws_secret_access_key` whenever you are interacting with a private S3 bucket.
```py
>>> storage_options = {"anon": True} # for anonymous connection
# or use your credentials
>>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} # for private buckets
# or use a botocore session
>>> import aiobotocore.session
>>> s3_session = aiobotocore.session.AioSession(profile="my_profile_name")
>>> storage_options = {"session": s3_session}
```
3. Create your FileSystem instance
```py
>>> import s3fs
>>> fs = s3fs.S3FileSystem(**storage_options)
```
### Google Cloud Storage
1. Install the Google Cloud Storage implementation:
```
>>> conda install -c conda-forge gcsfs
# or install with pip
>>> pip install gcsfs
```
2. Define your credentials
```py
>>> storage_options={"token": "anon"} # for anonymous connection
# or use your credentials of your default gcloud credentials or from the google metadata service
>>> storage_options={"project": "my-google-project"}
# or use your credentials from elsewhere, see the documentation at https://gcsfs.readthedocs.io/
>>> storage_options={"project": "my-google-project", "token": TOKEN}
```
3. Create your FileSystem instance
```py
>>> import gcsfs
>>> fs = gcsfs.GCSFileSystem(**storage_options)
```
### Azure Blob Storage
1. Install the Azure Blob Storage implementation:
```
>>> conda install -c conda-forge adlfs
# or install with pip
>>> pip install adlfs
```
2. Define your credentials
```py
>>> storage_options = {"anon": True} # for anonymous connection
# or use your credentials
>>> storage_options = {"account_name": ACCOUNT_NAME, "account_key": ACCOUNT_KEY} # gen 2 filesystem
# or use your credentials with the gen 1 filesystem
>>> storage_options={"tenant_id": TENANT_ID, "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET}
```
3. Create your FileSystem instance
```py
>>> import adlfs
>>> fs = adlfs.AzureBlobFileSystem(**storage_options)
```
### Oracle Cloud Object Storage
1. Install the OCI FileSystem implementation:
```
>>> pip install ocifs
```
2. Define your credentials
```py
>>> storage_options = {"config": "~/.oci/config", "region": "us-ashburn-1"}
```
3. Create your FileSystem instance
```py
>>> import ocifs
>>> fs = ocifs.OCIFileSystem(**storage_options)
```
## Load and Save your datasets using your cloud storage FileSystem
### Download and prepare a dataset into a cloud storage
You can download and prepare a dataset into your cloud storage by specifying a remote `output_dir` in `download_and_prepare`.
Don't forget to use the previously defined `storage_options` containing your credentials to write into a private cloud storage.
The `download_and_prepare` method works in two steps:
1. it first downloads the raw data files (if any) in your local cache. You can set your cache directory by passing `cache_dir` to [`load_dataset_builder`]
2. then it generates the dataset in Arrow or Parquet format in your cloud storage by iterating over the raw data files.
Load a dataset builder from the Hugging Face Hub (see [how to load from the Hugging Face Hub](./loading#hugging-face-hub)):
```py
>>> output_dir = "s3://my-bucket/imdb"
>>> builder = load_dataset_builder("imdb")
>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet")
```
Use your own data files (see [how to load local and remote files](./loading#local-and-remote-files)):
```py
>>> data_files = {"train": ["path/to/train.csv"]}
>>> output_dir = "s3://my-bucket/imdb"
>>> builder = load_dataset_builder("csv", data_files=data_files)
>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet")
```
It is highly recommended to save the files as compressed Parquet files to optimize I/O by specifying `file_format="parquet"`.
Otherwise the dataset is saved as an uncompressed Arrow file.
You can also specify the size of the shards using `max_shard_size` (default is 500MB):
```py
>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet", max_shard_size="1GB")
```
#### Dask
Dask is a parallel computing library and it has a pandas-like API for working with larger than memory Parquet datasets in parallel.
Dask can use multiple threads or processes on a single machine, or a cluster of machines to process data in parallel.
Dask supports local data but also data from a cloud storage.
Therefore you can load a dataset saved as sharded Parquet files in Dask with
```py
import dask.dataframe as dd
df = dd.read_parquet(output_dir, storage_options=storage_options)
# or if your dataset is split into train/valid/test
df_train = dd.read_parquet(output_dir + f"/{builder.name}-train-*.parquet", storage_options=storage_options)
df_valid = dd.read_parquet(output_dir + f"/{builder.name}-validation-*.parquet", storage_options=storage_options)
df_test = dd.read_parquet(output_dir + f"/{builder.name}-test-*.parquet", storage_options=storage_options)
```
You can find more about dask dataframes in their [documentation](https://docs.dask.org/en/stable/dataframe.html).
## Saving serialized datasets
After you have processed your dataset, you can save it to your cloud storage with [`Dataset.save_to_disk`]:
```py
# saves encoded_dataset to amazon s3
>>> encoded_dataset.save_to_disk("s3://my-private-datasets/imdb/train", storage_options=storage_options)
# saves encoded_dataset to google cloud storage
>>> encoded_dataset.save_to_disk("gcs://my-private-datasets/imdb/train", storage_options=storage_options)
# saves encoded_dataset to microsoft azure blob/datalake
>>> encoded_dataset.save_to_disk("adl://my-private-datasets/imdb/train", storage_options=storage_options)
```
<Tip>
Remember to define your credentials in your [FileSystem instance](#set-up-your-cloud-storage-filesystem) `fs` whenever you are interacting with a private cloud storage.
</Tip>
## Listing serialized datasets
List files from a cloud storage with your FileSystem instance `fs`, using `fs.ls`:
```py
>>> fs.ls("my-private-datasets/imdb/train", detail=False)
["dataset_info.json.json","dataset.arrow","state.json"]
```
### Load serialized datasets
When you are ready to use your dataset again, reload it with [`Dataset.load_from_disk`]:
```py
>>> from datasets import load_from_disk
# load encoded_dataset from cloud storage
>>> dataset = load_from_disk("s3://a-public-datasets/imdb/train", storage_options=storage_options)
>>> print(len(dataset))
25000
```
| datasets/docs/source/filesystems.mdx/0 | {
"file_path": "datasets/docs/source/filesystems.mdx",
"repo_id": "datasets",
"token_count": 2525
} |
# Loading methods
Methods for listing and loading datasets:
## Datasets
[[autodoc]] datasets.load_dataset
[[autodoc]] datasets.load_from_disk
[[autodoc]] datasets.load_dataset_builder
[[autodoc]] datasets.get_dataset_config_names
[[autodoc]] datasets.get_dataset_infos
[[autodoc]] datasets.get_dataset_split_names
## From files
Configurations used to load data files.
They are used when loading local files or a dataset repository:
- local files: `load_dataset("parquet", data_dir="path/to/data/dir")`
- dataset repository: `load_dataset("allenai/c4")`
You can pass arguments to `load_dataset` to configure data loading.
For example you can specify the `sep` parameter to define the [`~datasets.packaged_modules.csv.CsvConfig`] that is used to load the data:
```python
load_dataset("csv", data_dir="path/to/data/dir", sep="\t")
```
### Text
[[autodoc]] datasets.packaged_modules.text.TextConfig
[[autodoc]] datasets.packaged_modules.text.Text
### CSV
[[autodoc]] datasets.packaged_modules.csv.CsvConfig
[[autodoc]] datasets.packaged_modules.csv.Csv
### JSON
[[autodoc]] datasets.packaged_modules.json.JsonConfig
[[autodoc]] datasets.packaged_modules.json.Json
### XML
[[autodoc]] datasets.packaged_modules.xml.XmlConfig
[[autodoc]] datasets.packaged_modules.xml.Xml
### Parquet
[[autodoc]] datasets.packaged_modules.parquet.ParquetConfig
[[autodoc]] datasets.packaged_modules.parquet.Parquet
### Arrow
[[autodoc]] datasets.packaged_modules.arrow.ArrowConfig
[[autodoc]] datasets.packaged_modules.arrow.Arrow
### SQL
[[autodoc]] datasets.packaged_modules.sql.SqlConfig
[[autodoc]] datasets.packaged_modules.sql.Sql
### Images
[[autodoc]] datasets.packaged_modules.imagefolder.ImageFolderConfig
[[autodoc]] datasets.packaged_modules.imagefolder.ImageFolder
### Audio
[[autodoc]] datasets.packaged_modules.audiofolder.AudioFolderConfig
[[autodoc]] datasets.packaged_modules.audiofolder.AudioFolder
### Videos
[[autodoc]] datasets.packaged_modules.videofolder.VideoFolderConfig
[[autodoc]] datasets.packaged_modules.videofolder.VideoFolder
### WebDataset
[[autodoc]] datasets.packaged_modules.webdataset.WebDataset
| datasets/docs/source/package_reference/loading_methods.mdx/0 | {
"file_path": "datasets/docs/source/package_reference/loading_methods.mdx",
"repo_id": "datasets",
"token_count": 735
} |
# Use with NumPy
This document is a quick introduction to using `datasets` with NumPy, with a particular focus on how to get
`numpy.ndarray` objects out of our datasets, and how to use them to train models based on NumPy such as `scikit-learn` models.
## Dataset format
By default, datasets return regular Python objects: integers, floats, strings, lists, etc..
To get NumPy arrays instead, you can set the format of the dataset to `numpy`:
```py
>>> from datasets import Dataset
>>> data = [[1, 2], [3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("numpy")
>>> ds[0]
{'data': array([1, 2])}
>>> ds[:2]
{'data': array([
[1, 2],
[3, 4]])}
```
<Tip>
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to NumPy arrays.
</Tip>
Note that the exact same procedure applies to `DatasetDict` objects, so that
when setting the format of a `DatasetDict` to `numpy`, all the `Dataset`s there
will be formatted as `numpy`:
```py
>>> from datasets import DatasetDict
>>> data = {"train": {"data": [[1, 2], [3, 4]]}, "test": {"data": [[5, 6], [7, 8]]}}
>>> dds = DatasetDict.from_dict(data)
>>> dds = dds.with_format("numpy")
>>> dds["train"][:2]
{'data': array([
[1, 2],
[3, 4]])}
```
### N-dimensional arrays
If your dataset consists of N-dimensional arrays, you will see that by default they are considered as the same array if the shape is fixed:
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3, 4]], [[5, 6],[7, 8]]] # fixed shape
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("numpy")
>>> ds[0]
{'data': array([[1, 2],
[3, 4]])}
```
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3]], [[4, 5, 6],[7, 8]]] # varying shape
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("numpy")
>>> ds[0]
{'data': array([array([1, 2]), array([3])], dtype=object)}
```
However this logic often requires slow shape comparisons and data copies.
To avoid this, you must explicitly use the [`Array`] feature type and specify the shape of your tensors:
```py
>>> from datasets import Dataset, Features, Array2D
>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]]
>>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')})
>>> ds = Dataset.from_dict({"data": data}, features=features)
>>> ds = ds.with_format("numpy")
>>> ds[0]
{'data': array([[1, 2],
[3, 4]])}
>>> ds[:2]
{'data': array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])}
```
### Other feature types
[`ClassLabel`] data is properly converted to arrays:
```py
>>> from datasets import Dataset, Features, ClassLabel
>>> labels = [0, 0, 1]
>>> features = Features({"label": ClassLabel(names=["negative", "positive"])})
>>> ds = Dataset.from_dict({"label": labels}, features=features)
>>> ds = ds.with_format("numpy")
>>> ds[:3]
{'label': array([0, 0, 1])}
```
String and binary objects are unchanged, since NumPy only supports numbers.
The [`Image`] and [`Audio`] feature types are also supported.
<Tip>
To use the [`Image`] feature type, you'll need to install the `vision` extra as
`pip install datasets[vision]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Image
>>> images = ["path/to/image.png"] * 10
>>> features = Features({"image": Image()})
>>> ds = Dataset.from_dict({"image": images}, features=features)
>>> ds = ds.with_format("numpy")
>>> ds[0]["image"].shape
(512, 512, 3)
>>> ds[0]
{'image': array([[[ 255, 255, 255],
[ 255, 255, 255],
...,
[ 255, 255, 255],
[ 255, 255, 255]]], dtype=uint8)}
>>> ds[:2]["image"].shape
(2, 512, 512, 3)
>>> ds[:2]
{'image': array([[[[ 255, 255, 255],
[ 255, 255, 255],
...,
[ 255, 255, 255],
[ 255, 255, 255]]]], dtype=uint8)}
```
<Tip>
To use the [`Audio`] feature type, you'll need to install the `audio` extra as
`pip install datasets[audio]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio
>>> audio = ["path/to/audio.wav"] * 10
>>> features = Features({"audio": Audio()})
>>> ds = Dataset.from_dict({"audio": audio}, features=features)
>>> ds = ds.with_format("numpy")
>>> ds[0]["audio"]["array"]
array([-0.059021 , -0.03894043, -0.00735474, ..., 0.0133667 ,
0.01809692, 0.00268555], dtype=float32)
>>> ds[0]["audio"]["sampling_rate"]
array(44100, weak_type=True)
```
## Data loading
NumPy doesn't have any built-in data loading capabilities, so you'll either need to materialize the NumPy arrays like `X, y` to use in `scikit-learn` or use a library such as [PyTorch](https://pytorch.org/) to load your data using a `DataLoader`.
### Using `with_format('numpy')`
The easiest way to get NumPy arrays out of a dataset is to use the `with_format('numpy')` method. Lets assume
that we want to train a neural network on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) available
at the HuggingFace Hub at https://huggingface.co/datasets/mnist.
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("mnist")
>>> ds = ds.with_format("numpy")
>>> ds["train"][0]
{'image': array([[ 0, 0, 0, ...],
[ 0, 0, 0, ...],
...,
[ 0, 0, 0, ...],
[ 0, 0, 0, ...]], dtype=uint8),
'label': array(5)}
```
Once the format is set we can feed the dataset to the model based on NumPy in batches using the `Dataset.iter()`
method:
```py
>>> for epoch in range(epochs):
... for batch in ds["train"].iter(batch_size=32):
... x, y = batch["image"], batch["label"]
... ...
```
| datasets/docs/source/use_with_numpy.mdx/0 | {
"file_path": "datasets/docs/source/use_with_numpy.mdx",
"repo_id": "datasets",
"token_count": 2282
} |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""DatasetBuilder base class."""
import abc
import contextlib
import copy
import inspect
import os
import posixpath
import shutil
import textwrap
import time
import urllib
from dataclasses import dataclass
from functools import partial
from pathlib import Path
from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, Union
from unittest.mock import patch
import fsspec
from fsspec.core import url_to_fs
from multiprocess import Pool
from tqdm.contrib.concurrent import thread_map
from . import config, utils
from .arrow_dataset import Dataset
from .arrow_reader import (
ArrowReader,
ReadInstruction,
)
from .arrow_writer import ArrowWriter, ParquetWriter, SchemaInferenceError
from .data_files import DataFilesDict, DataFilesPatternsDict, sanitize_patterns
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadManager, DownloadMode
from .download.streaming_download_manager import StreamingDownloadManager, xjoin
from .exceptions import DatasetGenerationCastError, DatasetGenerationError, FileFormatError, ManualDownloadError
from .features import Features
from .filesystems import (
is_remote_filesystem,
rename,
)
from .fingerprint import Hasher
from .info import DatasetInfo, DatasetInfosDict, PostProcessedInfo
from .iterable_dataset import ArrowExamplesIterable, ExamplesIterable, IterableDataset
from .keyhash import DuplicatedKeysError
from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase
from .splits import Split, SplitDict, SplitGenerator, SplitInfo
from .streaming import extend_dataset_builder_for_streaming
from .table import CastError
from .utils import logging
from .utils import tqdm as hf_tqdm
from .utils._filelock import FileLock
from .utils.file_utils import is_remote_url
from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits
from .utils.py_utils import (
classproperty,
convert_file_size_to_int,
has_sufficient_disk_space,
iflatmap_unordered,
map_nested,
memoize,
size_str,
temporary_assignment,
)
from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs
from .utils.track import tracked_list
if TYPE_CHECKING:
from .load import DatasetModule
logger = logging.get_logger(__name__)
class InvalidConfigName(ValueError):
pass
@dataclass
class BuilderConfig:
"""Base class for `DatasetBuilder` data configuration.
`DatasetBuilder` subclasses with data configuration options should subclass
`BuilderConfig` and add their own properties.
Attributes:
name (`str`, defaults to `default`):
The name of the configuration.
version (`Version` or `str`, defaults to `0.0.0`):
The version of the configuration.
data_dir (`str`, *optional*):
Path to the directory containing the source data.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
description (`str`, *optional*):
A human description of the configuration.
"""
name: str = "default"
version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
data_dir: Optional[str] = None
data_files: Optional[Union[DataFilesDict, DataFilesPatternsDict]] = None
description: Optional[str] = None
def __post_init__(self):
# The config name is used to name the cache directory.
for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
if invalid_char in self.name:
raise InvalidConfigName(
f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
f"They could create issues when creating a directory for this config on Windows filesystem."
)
if self.data_files is not None and not isinstance(self.data_files, (DataFilesDict, DataFilesPatternsDict)):
raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}")
def __eq__(self, o):
# we need to override the default dataclass __eq__ since it doesn't check for
# other attributes that the ones of the signature.
if set(self.__dict__.keys()) != set(o.__dict__.keys()):
return False
return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[Features] = None,
) -> str:
"""
The config id is used to build the cache directory.
By default it is equal to the config name.
However the name of a config is not sufficient to have a unique identifier for the dataset being generated
since it doesn't take into account:
- the config kwargs that can be used to overwrite attributes
- the custom features used to write the dataset
- the data_files for json/text/csv/pandas datasets
Therefore the config id is just the config name with an optional suffix based on these.
"""
# Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
suffix: Optional[str] = None
config_kwargs_to_add_to_suffix = config_kwargs.copy()
# name and version are already used to build the cache directory
config_kwargs_to_add_to_suffix.pop("name", None)
config_kwargs_to_add_to_suffix.pop("version", None)
# data dir handling (when specified it points to the manually downloaded data):
# it was previously ignored before the introduction of config id because we didn't want
# to change the config name. Now it's fine to take it into account for the config id.
# config_kwargs_to_add_to_suffix.pop("data_dir", None)
if "data_dir" in config_kwargs_to_add_to_suffix:
if config_kwargs_to_add_to_suffix["data_dir"] is None:
config_kwargs_to_add_to_suffix.pop("data_dir", None)
else:
# canonicalize the data dir to avoid two paths to the same location having different
# hashes
data_dir = config_kwargs_to_add_to_suffix["data_dir"]
data_dir = os.path.normpath(data_dir)
config_kwargs_to_add_to_suffix["data_dir"] = data_dir
if config_kwargs_to_add_to_suffix:
# we don't care about the order of the kwargs
config_kwargs_to_add_to_suffix = {
k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
}
if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
suffix = ",".join(
str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
)
if len(suffix) > 32: # hash if too long
suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
else:
suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
if custom_features is not None:
m = Hasher()
if suffix:
m.update(suffix)
m.update(custom_features)
suffix = m.hexdigest()
if suffix:
config_id = self.name + "-" + suffix
if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
config_id = self.name + "-" + Hasher.hash(suffix)
return config_id
else:
return self.name
def _resolve_data_files(self, base_path: str, download_config: DownloadConfig) -> None:
if isinstance(self.data_files, DataFilesPatternsDict):
base_path = xjoin(base_path, self.data_dir) if self.data_dir else base_path
self.data_files = self.data_files.resolve(base_path, download_config)
class DatasetBuilder:
"""Abstract base class for all datasets.
`DatasetBuilder` has 3 key methods:
- [`DatasetBuilder.info`]: Documents the dataset, including feature
names, types, shapes, version, splits, citation, etc.
- [`DatasetBuilder.download_and_prepare`]: Downloads the source data
and writes it to disk.
- [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
Some `DatasetBuilder`s expose multiple variants of the
dataset by defining a [`BuilderConfig`] subclass and accepting a
config object (or name) on construction. Configurable datasets expose a
pre-defined set of configurations in [`DatasetBuilder.builder_configs`].
Args:
cache_dir (`str`, *optional*):
Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
dataset_name (`str`, *optional*):
Name of the dataset, if different from the builder name. Useful for packaged builders
like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets
that use the same packaged builder.
config_name (`str`, *optional*):
Name of the dataset configuration.
It affects the data generated on disk. Different configurations will have their own subdirectories and
versions.
If not provided, the default configuration is used (if it exists).
<Added version="2.3.0">
Parameter `name` was renamed to `config_name`.
</Added>
hash (`str`, *optional*):
Hash specific to the dataset code. Used to update the caching directory when the
dataset loading script code is updated (to avoid reusing old data).
The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
base_path (`str`, *optional*):
Base path for relative paths that are used to download files.
This can be a remote URL.
features ([`Features`], *optional*):
Features types to use with this dataset.
It can be used to change the [`Features`] types of a dataset, for example.
token (`str` or `bool`, *optional*):
String or boolean to use as Bearer token for remote files on the
Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
repo_id (`str`, *optional*):
ID of the dataset repository.
Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
For builders like "csv" or "json" that need the user to specify data files. They can be either
local or remote files. For convenience, you can use a `DataFilesDict`.
data_dir (`str`, *optional*):
Path to directory containing source data file(s).
Use only if `data_files` is not passed, in which case it is equivalent to passing
`os.path.join(data_dir, "**")` as `data_files`.
For builders that require manual download, it must be the path to the local directory containing the
manually downloaded data.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
writer_batch_size (`int`, *optional*):
Batch size used by the ArrowWriter.
It defines the number of samples that are kept in memory before writing them
and also the length of the arrow chunks.
None means that the ArrowWriter will use its default value.
**config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
configuration class is [`BuilderConfig`] or a subclass of it.
"""
# Default version
VERSION = None # Default version set in BuilderConfig
# Class for the builder config.
BUILDER_CONFIG_CLASS = BuilderConfig
# Named configurations that modify the data generated by download_and_prepare.
BUILDER_CONFIGS = []
# Optional default config name to be used when name is None
DEFAULT_CONFIG_NAME = None
# Default batch size used by the ArrowWriter
# It defines the number of samples that are kept in memory before writing them
# and also the length of the arrow chunks
# None means that the ArrowWriter will use its default value
DEFAULT_WRITER_BATCH_SIZE = None
def __init__(
self,
cache_dir: Optional[str] = None,
dataset_name: Optional[str] = None,
config_name: Optional[str] = None,
hash: Optional[str] = None,
base_path: Optional[str] = None,
info: Optional[DatasetInfo] = None,
features: Optional[Features] = None,
token: Optional[Union[bool, str]] = None,
repo_id: Optional[str] = None,
data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
data_dir: Optional[str] = None,
storage_options: Optional[dict] = None,
writer_batch_size: Optional[int] = None,
**config_kwargs,
):
# DatasetBuilder name
self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
self.hash: Optional[str] = hash
self.base_path = base_path
self.token = token
self.repo_id = repo_id
self.storage_options = storage_options or {}
self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name
self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
if data_files is not None and not isinstance(data_files, DataFilesDict):
data_files = DataFilesDict.from_patterns(
sanitize_patterns(data_files),
base_path=base_path,
download_config=DownloadConfig(token=token, storage_options=self.storage_options),
)
# Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
config_kwargs["features"] = features
if data_files is not None:
config_kwargs["data_files"] = data_files
if data_dir is not None:
config_kwargs["data_dir"] = data_dir
self.config_kwargs = config_kwargs
self.config, self.config_id = self._create_builder_config(
config_name=config_name,
custom_features=features,
**config_kwargs,
)
# prepare info: DatasetInfo are a standardized dataclass across all datasets
# Prefill datasetinfo
if info is None:
# TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
info = self.get_exported_dataset_info()
info.update(self._info())
info.builder_name = self.name
info.dataset_name = self.dataset_name
info.config_name = self.config.name
info.version = self.config.version
self.info = info
# update info with user specified infos
if features is not None:
self.info.features = features
# Prepare data dirs:
# cache_dir can be a remote bucket on GCS or S3
self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
self._cache_dir_root = (
self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
)
self._cache_downloaded_dir = (
posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
if cache_dir
else str(config.DOWNLOADED_DATASETS_PATH)
)
self._cache_downloaded_dir = (
self._cache_downloaded_dir
if is_remote_url(self._cache_downloaded_dir)
else os.path.expanduser(self._cache_downloaded_dir)
)
# In case there exists a legacy cache directory
self._legacy_relative_data_dir = None
self._cache_dir = self._build_cache_dir()
if not is_remote_url(self._cache_dir_root):
os.makedirs(self._cache_dir_root, exist_ok=True)
lock_path = os.path.join(
self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock"
)
with FileLock(lock_path):
if os.path.exists(self._cache_dir): # check if data exist
if len(os.listdir(self._cache_dir)) > 0:
if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)):
logger.info("Overwrite dataset info from restored data version if exists.")
self.info = DatasetInfo.from_directory(self._cache_dir)
else: # dir exists but no data, remove the empty dir as data aren't available anymore
logger.warning(
f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. "
)
os.rmdir(self._cache_dir)
# Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
self._output_dir = self._cache_dir
self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
# Set download manager
self.dl_manager = None
# Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
self._record_infos = False
# Set in `.download_and_prepare` once the format of the generated dataset is known
self._file_format = None
# Enable streaming (e.g. it patches "open" to work with remote files)
extend_dataset_builder_for_streaming(self)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
# Re-enable streaming, since patched functions are not kept when pickling
extend_dataset_builder_for_streaming(self)
# Must be set for datasets that use 'data_dir' functionality - the ones
# that require users to do additional steps to download the data
# (this is usually due to some external regulations / rules).
# This field should contain a string with user instructions, including
# the list of files that should be present. It will be
# displayed in the dataset documentation.
@property
def manual_download_instructions(self) -> Optional[str]:
return None
def _check_legacy_cache(self) -> Optional[str]:
"""Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13"""
if (
self.__module__.startswith("datasets.")
and not is_remote_url(self._cache_dir_root)
and self.config.name == "default"
):
from .packaged_modules import _PACKAGED_DATASETS_MODULES
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
config_id = config_name + self.config_id[len(self.config.name) :]
hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
legacy_relative_data_dir = posixpath.join(
self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
config_id,
"0.0.0",
hash,
)
legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
if os.path.isdir(legacy_cache_dir):
return legacy_relative_data_dir
def _check_legacy_cache2(self, dataset_module: "DatasetModule") -> Optional[str]:
"""Check for the old cache directory template {cache_dir}/{namespace}___{dataset_name}/{config_name}-xxx from 2.14 and 2.15"""
if (
self.__module__.startswith("datasets.")
and not is_remote_url(self._cache_dir_root)
and not (set(self.config_kwargs) - {"data_files", "data_dir"})
):
from .packaged_modules import _PACKAGED_DATASETS_MODULES_2_15_HASHES
from .utils._dill import Pickler
def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str:
"""
Used to update hash of packaged modules which is used for creating unique cache directories to reflect
different config parameters which are passed in metadata from readme.
"""
params_to_exclude = {"config_name", "version", "description"}
params_to_add_to_hash = {
param: value
for param, value in sorted(config_parameters.items())
if param not in params_to_exclude
}
m = Hasher()
m.update(hash)
m.update(params_to_add_to_hash)
return m.hexdigest()
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
with patch.object(Pickler, "_legacy_no_dict_keys_sorting", True):
config_id = self.config.name + "-" + Hasher.hash({"data_files": self.config.data_files})
hash = _PACKAGED_DATASETS_MODULES_2_15_HASHES.get(self.name, "missing")
if (
dataset_module.builder_configs_parameters.metadata_configs
and self.config.name in dataset_module.builder_configs_parameters.metadata_configs
):
hash = update_hash_with_config_parameters(
hash, dataset_module.builder_configs_parameters.metadata_configs[self.config.name]
)
legacy_relative_data_dir = posixpath.join(
self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
config_id,
"0.0.0",
hash,
)
legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
if os.path.isdir(legacy_cache_dir):
return legacy_relative_data_dir
@classmethod
def get_all_exported_dataset_infos(cls) -> DatasetInfosDict:
"""Empty dict if doesn't exist
Example:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder('vivos')
>>> ds_builder.get_all_exported_dataset_infos()
{'default': DatasetInfo(description='', citation='', homepage='', license='', features={'speaker_id': Value(dtype='string', id=None), 'path': Value(dtype='string', id=None), 'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'sentence': Value(dtype='string', id=None)}, post_processed=None, supervised_keys=None, builder_name=None, dataset_name=None, config_name='default', version=None, splits={'train': SplitInfo(name='train', num_bytes=1722002133, num_examples=11660, shard_lengths=None, dataset_name=None), 'test': SplitInfo(name='test', num_bytes=86120227, num_examples=760, shard_lengths=None, dataset_name=None)}, download_checksums=None, download_size=1475540500, post_processing_size=None, dataset_size=1808122360, size_in_bytes=None)}
```
"""
return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
def get_exported_dataset_info(self) -> DatasetInfo:
"""Empty `DatasetInfo` if doesn't exist
Example:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder('rotten_tomatoes')
>>> ds_builder.get_exported_dataset_info()
DatasetInfo(description='', citation='', homepage='', license='', features={'speaker_id': Value(dtype='string', id=None), 'path': Value(dtype='string', id=None), 'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'sentence': Value(dtype='string', id=None)}, post_processed=None, supervised_keys=None, builder_name=None, dataset_name=None, config_name='default', version=None, splits={'train': SplitInfo(name='train', num_bytes=1722002133, num_examples=11660, shard_lengths=None, dataset_name=None), 'test': SplitInfo(name='test', num_bytes=86120227, num_examples=760, shard_lengths=None, dataset_name=None)}, download_checksums=None, download_size=1475540500, post_processing_size=None, dataset_size=1808122360, size_in_bytes=None)
```
"""
return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
def _create_builder_config(
self, config_name=None, custom_features=None, **config_kwargs
) -> Tuple[BuilderConfig, str]:
"""Create and validate BuilderConfig object as well as a unique config id for this config.
Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
config_kwargs override the defaults kwargs in config
"""
builder_config = None
# try default config
if config_name is None and self.BUILDER_CONFIGS:
if self.DEFAULT_CONFIG_NAME is not None:
builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME)
logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}")
else:
if len(self.BUILDER_CONFIGS) > 1:
if not config_kwargs:
example_of_usage = (
f"load_dataset('{self.repo_id or self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')"
)
raise ValueError(
"Config name is missing."
f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}"
+ f"\nExample of usage:\n\t`{example_of_usage}`"
)
else:
builder_config = self.BUILDER_CONFIGS[0]
logger.info(
f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}"
)
# try to get config by name
if isinstance(config_name, str):
builder_config = self.builder_configs.get(config_name)
if builder_config is None and self.BUILDER_CONFIGS:
raise ValueError(
f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}"
)
# if not using an existing config, then create a new config on the fly
if not builder_config:
if config_name is not None:
config_kwargs["name"] = config_name
elif self.DEFAULT_CONFIG_NAME and not config_kwargs:
# Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed
config_kwargs["name"] = self.DEFAULT_CONFIG_NAME
if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
config_kwargs["version"] = self.VERSION
builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
# otherwise use the config_kwargs to overwrite the attributes
else:
builder_config = copy.deepcopy(builder_config) if config_kwargs else builder_config
for key, value in config_kwargs.items():
if value is not None:
if not hasattr(builder_config, key):
raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.")
setattr(builder_config, key, value)
if not builder_config.name:
raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}")
# resolve data files if needed
builder_config._resolve_data_files(
base_path=self.base_path,
download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
)
# compute the config id that is going to be used for caching
config_id = builder_config.create_config_id(
config_kwargs,
custom_features=custom_features,
)
is_custom = (config_id not in self.builder_configs) and config_id != "default"
if is_custom:
logger.info(f"Using custom data configuration {config_id}")
else:
if (
builder_config.name in self.builder_configs
and builder_config != self.builder_configs[builder_config.name]
):
raise ValueError(
"Cannot name a custom BuilderConfig the same as an available "
f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}"
)
if not builder_config.version:
raise ValueError(f"BuilderConfig {builder_config.name} must have a version")
return builder_config, config_id
@classproperty
@classmethod
@memoize()
def builder_configs(cls) -> Dict[str, BuilderConfig]:
"""Dictionary of pre-defined configurations for this builder class."""
configs = {config.name: config for config in cls.BUILDER_CONFIGS}
if len(configs) != len(cls.BUILDER_CONFIGS):
names = [config.name for config in cls.BUILDER_CONFIGS]
raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}")
return configs
@property
def cache_dir(self):
return self._cache_dir
def _use_legacy_cache_dir_if_possible(self, dataset_module: "DatasetModule"):
# Check for the legacy cache directory template (datasets<3.0.0)
self._legacy_relative_data_dir = (
self._check_legacy_cache2(dataset_module) or self._check_legacy_cache() or None
)
self._cache_dir = self._build_cache_dir()
self._output_dir = self._cache_dir
def _relative_data_dir(self, with_version=True, with_hash=True) -> str:
"""Relative path of this dataset in cache_dir:
Will be:
self.dataset_name/self.config.version/self.hash/
or if a repo_id with a namespace has been specified:
self.namespace___self.dataset_name/self.config.version/self.hash/
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
"""
if self._legacy_relative_data_dir is not None and with_version and with_hash:
return self._legacy_relative_data_dir
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
builder_data_dir = self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}"
builder_data_dir = posixpath.join(builder_data_dir, self.config_id)
if with_version:
builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version))
if with_hash and self.hash and isinstance(self.hash, str):
builder_data_dir = posixpath.join(builder_data_dir, self.hash)
return builder_data_dir
def _build_cache_dir(self):
"""Return the data directory for the current version."""
builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False))
version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True))
def _other_versions_on_disk():
"""Returns previous versions on disk."""
if not os.path.exists(builder_data_dir):
return []
version_dirnames = []
for dir_name in os.listdir(builder_data_dir):
try:
version_dirnames.append((utils.Version(dir_name), dir_name))
except ValueError: # Invalid version (ex: incomplete data dir)
pass
version_dirnames.sort(reverse=True)
return version_dirnames
# Check and warn if other versions exist
if not is_remote_url(builder_data_dir):
version_dirs = _other_versions_on_disk()
if version_dirs:
other_version = version_dirs[0][0]
if other_version != self.config.version:
warn_msg = (
f"Found a different version {str(other_version)} of dataset {self.dataset_name} in "
f"cache_dir {self._cache_dir_root}. Using currently defined version "
f"{str(self.config.version)}."
)
logger.warning(warn_msg)
return version_data_dir
@abc.abstractmethod
def _info(self) -> DatasetInfo:
"""Construct the DatasetInfo object. See `DatasetInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (DatasetInfo) The dataset information
"""
raise NotImplementedError
@classmethod
def get_imported_module_dir(cls):
"""Return the path of the module of this class or subclass."""
return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
def _rename(self, src: str, dst: str):
rename(self._fs, src, dst)
def download_and_prepare(
self,
output_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
verification_mode: Optional[Union[VerificationMode, str]] = None,
dl_manager: Optional[DownloadManager] = None,
base_path: Optional[str] = None,
file_format: str = "arrow",
max_shard_size: Optional[Union[int, str]] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**download_and_prepare_kwargs,
):
"""Downloads and prepares dataset for reading.
Args:
output_dir (`str`, *optional*):
Output directory for the dataset.
Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default.
<Added version="2.5.0"/>
download_config (`DownloadConfig`, *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, *optional*):
Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`.
verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
<Added version="2.9.1"/>
dl_manager (`DownloadManager`, *optional*):
Specific `DownloadManger` to use.
base_path (`str`, *optional*):
Base path for relative paths that are used to download files. This can be a remote url.
If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead.
file_format (`str`, *optional*):
Format of the data files in which the dataset will be written.
Supported formats: "arrow", "parquet". Default to "arrow" format.
If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files.
<Added version="2.5.0"/>
max_shard_size (`Union[str, int]`, *optional*):
Maximum number of bytes written per shard, default is "500MB".
The size is based on uncompressed data size, so in practice your shard files may be smaller than
`max_shard_size` thanks to Parquet compression for example.
<Added version="2.5.0"/>
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.7.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the caching file-system backend, if any.
<Added version="2.5.0"/>
**download_and_prepare_kwargs (additional keyword arguments): Keyword arguments.
Example:
Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare()
```
Download and prepare the dataset as sharded Parquet files locally:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare("./output_dir", file_format="parquet")
```
Download and prepare the dataset as sharded Parquet files in a cloud storage:
```py
>>> from datasets import load_dataset_builder
>>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key}
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet")
```
"""
output_dir = output_dir if output_dir is not None else self._cache_dir
# output_dir can be a remote bucket on GCS or S3
fs, output_dir = url_to_fs(output_dir, **(storage_options or {}))
self._fs = fs
self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
base_path = base_path if base_path is not None else self.base_path
if file_format is not None and file_format not in ["arrow", "parquet"]:
raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'")
self._file_format = file_format
if self._fs._strip_protocol(self._output_dir) == "":
# We don't support the root directory, because it has no dirname,
# and we need a dirname to use a <dirname>.incomplete directory
# when the dataset is being written
raise RuntimeError(
f"Unable to download and prepare the dataset at the root {self._output_dir}. "
f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'"
)
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig(
cache_dir=self._cache_downloaded_dir,
force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD,
force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD,
use_etag=False,
num_proc=num_proc,
token=self.token,
storage_options=self.storage_options,
) # We don't use etag for data files to speed up the process
dl_manager = DownloadManager(
dataset_name=self.dataset_name,
download_config=download_config,
data_dir=self.config.data_dir,
base_path=base_path,
record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS),
)
is_local = not is_remote_filesystem(self._fs)
self.dl_manager = dl_manager
# Prevent parallel local disk operations
if is_local:
# Create parent directory of the output_dir to put the lock file in there
Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
lock_path = self._output_dir + "_builder.lock"
# File locking only with local paths; no file locking on GCS or S3
with FileLock(lock_path) if is_local else contextlib.nullcontext():
# Check if the data already exists
data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME))
if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS:
logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})")
# We need to update the info in case some splits were added in the meantime
# for example when calling load_dataset from multiple workers.
self.info = self._load_info()
self.download_post_processing_resources(dl_manager)
return
logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})")
if is_local: # if cache dir is local, check for available space
if not has_sufficient_disk_space(
self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent
):
raise OSError(
f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})"
)
@contextlib.contextmanager
def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
if not is_local:
self._fs.makedirs(dirname, exist_ok=True)
yield dirname
else:
tmp_dir = dirname + ".incomplete"
os.makedirs(tmp_dir, exist_ok=True)
try:
yield tmp_dir
if os.path.isdir(dirname):
shutil.rmtree(dirname)
# LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory
shutil.move(tmp_dir, dirname)
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
# Print is intentional: we want this to always go to stdout so user has
# information needed to cancel download/preparation if needed.
# This comes right before the progress bar.
if self.info.size_in_bytes:
logger.info(
f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} "
f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, "
f"post-processed: {size_str(self.info.post_processing_size)}, "
f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..."
)
else:
_dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir
logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...")
self._check_manual_download(dl_manager)
# Create a tmp dir and rename to self._output_dir on successful exit.
with incomplete_dir(self._output_dir) as tmp_output_dir:
# Temporarily assign _output_dir to tmp_data_dir to avoid having to forward
# it to every sub function.
with temporary_assignment(self, "_output_dir", tmp_output_dir):
prepare_split_kwargs = {"file_format": file_format}
if max_shard_size is not None:
prepare_split_kwargs["max_shard_size"] = max_shard_size
if num_proc is not None:
prepare_split_kwargs["num_proc"] = num_proc
self._download_and_prepare(
dl_manager=dl_manager,
verification_mode=verification_mode,
**prepare_split_kwargs,
**download_and_prepare_kwargs,
)
# Sync info
self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
if self.info.download_size is not None:
self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
# Save info
self._save_info()
# Download post processing resources
self.download_post_processing_resources(dl_manager)
logger.info(
f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. "
f"Subsequent calls will reuse this data."
)
def _check_manual_download(self, dl_manager):
if self.manual_download_instructions is not None and dl_manager.manual_dir is None:
raise ManualDownloadError(
textwrap.dedent(
f"""\
The dataset {self.dataset_name} with config {self.config.name} requires manual data.
Please follow the manual download instructions:
{self.manual_download_instructions}
Manual data can be loaded with:
datasets.load_dataset("{self.repo_id or self.dataset_name}", data_dir="<path/to/manual/data>")"""
)
)
def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs):
"""Downloads and prepares dataset for reading.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required data and generate
the pre-processed datasets files.
Args:
dl_manager ([`DownloadManager`]):
`DownloadManager` used to download and cache data.
verification_mode ([`VerificationMode`]):
if `ALL_CHECKS`, perform all the verifications including checksums.
if `BASIC_CHECKS`, do not perform checksums, only perform split tests.
if `NO_CHECKS`, do not perform any verification.
prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size`
"""
# Generating data for all splits
split_dict = SplitDict(dataset_name=self.dataset_name)
split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
# Checksums verification
if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:
verify_checksums(
self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
)
# Build splits
for split_generator in split_generators:
if str(split_generator.split_info.name).lower() == "all":
raise ValueError(
"`all` is a special split keyword corresponding to the "
"union of all splits, so cannot be used as key in "
"._split_generator()."
)
logger.info(f"Generating {split_generator.split_info.name} split")
split_dict.add(split_generator.split_info)
try:
# Prepare split will record examples associated to the split
self._prepare_split(split_generator, **prepare_split_kwargs)
except OSError as e:
raise OSError(
"Cannot find data file. "
+ (self.manual_download_instructions or "")
+ "\nOriginal error:\n"
+ str(e)
) from None
# If check_duplicates is set to True , then except DuplicatedKeysError
except DuplicatedKeysError as e:
raise DuplicatedKeysError(
e.key,
e.duplicate_key_indices,
fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py",
) from None
dl_manager.manage_extracted_files()
if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
verify_splits(self.info.splits, split_dict)
# Update the info object with the splits.
self.info.splits = split_dict
self.info.download_size = dl_manager.downloaded_size
def download_post_processing_resources(self, dl_manager):
for split in self.info.splits or []:
for resource_name, resource_file_name in self._post_processing_resources(split).items():
if not not is_remote_filesystem(self._fs):
raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}")
if os.sep in resource_file_name:
raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
resource_path = os.path.join(self._output_dir, resource_file_name)
if not os.path.exists(resource_path):
downloaded_resource_path = self._download_post_processing_resources(
split, resource_name, dl_manager
)
if downloaded_resource_path:
logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}")
shutil.move(downloaded_resource_path, resource_path)
def _load_info(self) -> DatasetInfo:
return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options)
def _save_info(self):
file_lock = (
FileLock(self._output_dir + "_info.lock")
if not is_remote_filesystem(self._fs)
else contextlib.nullcontext()
)
with file_lock:
self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options)
def _save_infos(self):
file_lock = (
FileLock(self._output_dir + "_infos.lock")
if not is_remote_filesystem(self._fs)
else contextlib.nullcontext()
)
with file_lock:
DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
def _make_split_generators_kwargs(self, prepare_split_kwargs):
"""Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
del prepare_split_kwargs
return {}
def as_dataset(
self,
split: Optional[Split] = None,
run_post_process=True,
verification_mode: Optional[Union[VerificationMode, str]] = None,
in_memory=False,
) -> Union[Dataset, DatasetDict]:
"""Return a Dataset for the specified split.
Args:
split (`datasets.Split`):
Which subset of the data to return.
run_post_process (`bool`, defaults to `True`):
Whether to run post-processing dataset transforms and/or add
indexes.
verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
Verification mode determining the checks to run on the
downloaded/processed dataset information (checksums/size/splits/...).
<Added version="2.9.1"/>
in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
Returns:
datasets.Dataset
Example:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder('rotten_tomatoes')
>>> builder.download_and_prepare()
>>> ds = builder.as_dataset(split='train')
>>> ds
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
"""
if self._file_format is not None and self._file_format != "arrow":
raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.')
if is_remote_filesystem(self._fs):
raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.")
if not os.path.exists(self._output_dir):
raise FileNotFoundError(
f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call "
"builder.download_and_prepare(), or use "
"datasets.load_dataset() before trying to access the Dataset object."
)
logger.debug(f"Constructing Dataset for split {split or ', '.join(self.info.splits)}, from {self._output_dir}")
# By default, return all splits
if split is None:
split = {s: s for s in self.info.splits}
verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
# Create a dataset for each of the given splits
datasets = map_nested(
partial(
self._build_single_dataset,
run_post_process=run_post_process,
verification_mode=verification_mode,
in_memory=in_memory,
),
split,
map_tuple=True,
disable_tqdm=True,
)
if isinstance(datasets, dict):
datasets = DatasetDict(datasets)
return datasets
def _build_single_dataset(
self,
split: Union[str, ReadInstruction, Split],
run_post_process: bool,
verification_mode: VerificationMode,
in_memory: bool = False,
):
"""as_dataset for a single split."""
if not isinstance(split, ReadInstruction):
split = str(split)
if split == "all":
split = "+".join(self.info.splits.keys())
split = Split(split)
# Build base dataset
ds = self._as_dataset(
split=split,
in_memory=in_memory,
)
if run_post_process:
for resource_file_name in self._post_processing_resources(split).values():
if os.sep in resource_file_name:
raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
resources_paths = {
resource_name: os.path.join(self._output_dir, resource_file_name)
for resource_name, resource_file_name in self._post_processing_resources(split).items()
}
post_processed = self._post_process(ds, resources_paths)
if post_processed is not None:
ds = post_processed
recorded_checksums = {}
record_checksums = False
for resource_name, resource_path in resources_paths.items():
size_checksum = get_size_checksum_dict(resource_path)
recorded_checksums[resource_name] = size_checksum
if verification_mode == VerificationMode.ALL_CHECKS and record_checksums:
if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
expected_checksums = None
else:
expected_checksums = self.info.post_processed.resources_checksums.get(split)
verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
if self.info.post_processed is None:
self.info.post_processed = PostProcessedInfo()
if self.info.post_processed.resources_checksums is None:
self.info.post_processed.resources_checksums = {}
self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
self.info.post_processing_size = sum(
checksums_dict["num_bytes"]
for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
for checksums_dict in split_checksums_dicts.values()
)
if self.info.dataset_size is not None and self.info.download_size is not None:
self.info.size_in_bytes = (
self.info.dataset_size + self.info.download_size + self.info.post_processing_size
)
self._save_info()
ds._info.post_processed = self.info.post_processed
ds._info.post_processing_size = self.info.post_processing_size
ds._info.size_in_bytes = self.info.size_in_bytes
if self.info.post_processed.features is not None:
if self.info.post_processed.features.type != ds.features.type:
raise ValueError(
f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}"
)
else:
ds.info.features = self.info.post_processed.features
return ds
def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset:
"""Constructs a `Dataset`.
This is the internal implementation to overwrite called when user calls
`as_dataset`. It should read the pre-processed datasets files and generate
the `Dataset` object.
Args:
split (`datasets.Split`):
which subset of the data to read.
in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
Returns:
`Dataset`
"""
cache_dir = self._fs._strip_protocol(self._output_dir)
dataset_name = self.dataset_name
if self._check_legacy_cache():
dataset_name = self.name
dataset_kwargs = ArrowReader(cache_dir, self.info).read(
name=dataset_name,
instructions=split,
split_infos=self.info.splits.values(),
in_memory=in_memory,
)
fingerprint = self._get_dataset_fingerprint(split)
return Dataset(fingerprint=fingerprint, **dataset_kwargs)
def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str:
"""The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs."""
hasher = Hasher()
hasher.update(Path(self._relative_data_dir()).as_posix())
hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder)
fingerprint = hasher.hexdigest()
return fingerprint
def as_streaming_dataset(
self,
split: Optional[str] = None,
base_path: Optional[str] = None,
) -> Union[Dict[str, IterableDataset], IterableDataset]:
if is_remote_filesystem(self._fs):
raise NotImplementedError(
f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet."
)
dl_manager = StreamingDownloadManager(
base_path=base_path or self.base_path,
download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
dataset_name=self.dataset_name,
data_dir=self.config.data_dir,
)
self._check_manual_download(dl_manager)
splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}
# By default, return all splits
if split is None:
splits_generator = splits_generators
elif split in splits_generators:
splits_generator = splits_generators[split]
else:
raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}")
# Create a dataset for each of the given splits
datasets = map_nested(
self._as_streaming_dataset_single,
splits_generator,
map_tuple=True,
)
if isinstance(datasets, dict):
datasets = IterableDatasetDict(datasets)
return datasets
def _as_streaming_dataset_single(
self,
splits_generator,
) -> IterableDataset:
ex_iterable = self._get_examples_iterable_for_split(splits_generator)
# add auth to be able to access and decode audio/image files from private repositories.
token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {}
return IterableDataset(
ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id
)
def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]:
"""Run dataset transforms or add indexes"""
return None
def _post_processing_resources(self, split: str) -> Dict[str, str]:
"""Mapping resource_name -> resource_file_name"""
return {}
def _download_post_processing_resources(
self, split: str, resource_name: str, dl_manager: DownloadManager
) -> Optional[str]:
"""Download the resource using the download manager and return the downloaded path."""
return None
@abc.abstractmethod
def _split_generators(self, dl_manager: Union[DownloadManager, StreamingDownloadManager]):
"""Specify feature dictionary generators and dataset splits.
This function returns a list of `SplitGenerator`s defining how to generate
data and what splits to use.
Example:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'file': 'train_data.zip'},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={'file': 'test_data.zip'},
),
]
The above code will first call `_generate_examples(file='train_data.zip')`
to write the train data, then `_generate_examples(file='test_data.zip')` to
write the test data.
Datasets are typically split into different subsets to be used at various
stages of training and evaluation.
Note that for datasets without a `VALIDATION` split, you can use a
fraction of the `TRAIN` data for evaluation as you iterate on your model
so as not to overfit to the `TEST` data.
For downloads and extractions, use the given `download_manager`.
Note that the `DownloadManager` caches downloads, so it is fine to have each
generator attempt to download the source data.
A good practice is to download all data in this function, and then
distribute the relevant parts to each split with the `gen_kwargs` argument
Args:
dl_manager (`Union[DownloadManager, StreamingDownloadManager]`):
Download manager to download the data
Returns:
`list<SplitGenerator>`.
"""
raise NotImplementedError()
@abc.abstractmethod
def _prepare_split(
self,
split_generator: SplitGenerator,
file_format: str = "arrow",
max_shard_size: Optional[Union[str, int]] = None,
num_proc: Optional[int] = None,
**kwargs,
):
"""Generate the examples and record them on disk.
Args:
split_generator (`SplitGenerator`):
Split generator to process
file_format (`str`, *optional*):
format of the data files in which the dataset will be written.
Supported formats: "arrow", "parquet". Default to "arrow" format.
max_shard_size (`Union[str, int]`, *optional*):
Maximum number of bytes written per shard, default is "500MB".
The size is based on uncompressed data size, so in practice your shard files may be smaller than
`max_shard_size` thanks to Parquet compression for example.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.7.0"/>
**kwargs: Additional kwargs forwarded from _download_and_prepare
"""
raise NotImplementedError()
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
"""Generate the examples on the fly.
Args:
split_generator (`SplitGenerator`):
Split generator to process
"""
raise NotImplementedError()
class GeneratorBasedBuilder(DatasetBuilder):
"""Base class for datasets with data generation based on dict generators.
`GeneratorBasedBuilder` is a convenience class that abstracts away much
of the data writing and reading of `DatasetBuilder`. It expects subclasses to
implement generators of feature dictionaries across the dataset splits
(`_split_generators`). See the method docstrings for details.
"""
@abc.abstractmethod
def _generate_examples(self, **kwargs):
"""Default function generating examples for each `SplitGenerator`.
This function preprocess the examples from the raw data to the preprocessed
dataset files.
This function is called once for each `SplitGenerator` defined in
`_split_generators`. The examples yielded here will be written on
disk.
Args:
**kwargs (additional keyword arguments):
Arguments forwarded from the SplitGenerator.gen_kwargs
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `dict<str feature_name, feature_value>`, a feature dictionary
ready to be encoded and written to disk. The example will be
encoded with `self.info.features.encode_example({...})`.
"""
raise NotImplementedError()
def _prepare_split(
self,
split_generator: SplitGenerator,
check_duplicate_keys: bool,
file_format="arrow",
num_proc: Optional[int] = None,
max_shard_size: Optional[Union[int, str]] = None,
):
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
if self.info.splits is not None:
split_info = self.info.splits[split_generator.name]
else:
split_info = split_generator.split_info
SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = posixpath.join(self._output_dir, fname)
if num_proc and num_proc > 1:
num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
if num_input_shards <= 1:
logger.warning(
f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
)
num_proc = 1
elif num_input_shards < num_proc:
logger.warning(
f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
)
num_proc = num_input_shards
pbar = hf_tqdm(
unit=" examples",
total=split_info.num_examples,
desc=f"Generating {split_info.name} split",
)
_prepare_split_args = {
"fpath": fpath,
"file_format": file_format,
"max_shard_size": max_shard_size,
"split_info": split_info,
"check_duplicate_keys": check_duplicate_keys,
}
if num_proc is None or num_proc == 1:
result = None
gen_kwargs = split_generator.gen_kwargs
job_id = 0
with pbar:
for job_id, done, content in self._prepare_split_single(
gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
):
if done:
result = content
else:
pbar.update(content)
# wrapping everything into lists for consistency with the multiprocessed code path
assert result is not None, "Failed to retrieve results from prepare_split"
examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
[item] for item in result
]
else:
kwargs_per_job = [
{"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
for job_id, gen_kwargs in enumerate(
_split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
)
]
num_jobs = len(kwargs_per_job)
examples_per_job = [None] * num_jobs
bytes_per_job = [None] * num_jobs
features_per_job = [None] * num_jobs
shards_per_job = [None] * num_jobs
shard_lengths_per_job = [None] * num_jobs
with Pool(num_proc) as pool:
with pbar:
for job_id, done, content in iflatmap_unordered(
pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
):
if done:
# the content is the result of the job
(
examples_per_job[job_id],
bytes_per_job[job_id],
features_per_job[job_id],
shards_per_job[job_id],
shard_lengths_per_job[job_id],
) = content
else:
# the content is the number of examples progress update
pbar.update(content)
assert None not in examples_per_job, (
f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
)
total_shards = sum(shards_per_job)
total_num_examples = sum(examples_per_job)
total_num_bytes = sum(bytes_per_job)
features = features_per_job[0]
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(shard_and_job: Tuple[int]):
shard_id, job_id = shard_and_job
global_shard_id = sum(shards_per_job[:job_id]) + shard_id
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
)
shards_and_jobs = [
(shard_id, job_id)
for job_id, num_shards in enumerate(shards_per_job)
for shard_id in range(num_shards)
]
thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64)
split_generator.split_info.shard_lengths = [
shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
]
else:
# don't use any pattern
shard_id, job_id = 0, 0
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace(SUFFIX, ""),
)
if self.info.features is None:
self.info.features = features
def _prepare_split_single(
self,
gen_kwargs: dict,
fpath: str,
file_format: str,
max_shard_size: int,
split_info: SplitInfo,
check_duplicate_keys: bool,
job_id: int,
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
generator = self._generate_examples(**gen_kwargs)
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
embed_local_files = file_format == "parquet"
shard_lengths = []
total_num_examples, total_num_bytes = 0, 0
shard_id = 0
num_examples_progress_update = 0
try:
writer = writer_class(
features=self.info.features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
hash_salt=split_info.name,
check_duplicates=check_duplicate_keys,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
try:
_time = time.time()
for key, record in generator:
if max_shard_size is not None and writer._num_bytes > max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
shard_id += 1
writer = writer_class(
features=writer._features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
hash_salt=split_info.name,
check_duplicates=check_duplicate_keys,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
example = self.info.features.encode_example(record) if self.info.features is not None else record
writer.write(example, key)
num_examples_progress_update += 1
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield job_id, False, num_examples_progress_update
num_examples_progress_update = 0
finally:
yield job_id, False, num_examples_progress_update
num_shards = shard_id + 1
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
except Exception as e:
# Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
e = e.__context__
raise DatasetGenerationError("An error occurred while generating the dataset") from e
yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
super()._download_and_prepare(
dl_manager,
verification_mode,
check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
or verification_mode == VerificationMode.ALL_CHECKS,
**prepare_splits_kwargs,
)
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs)
class ArrowBasedBuilder(DatasetBuilder):
"""Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet)."""
@abc.abstractmethod
def _generate_tables(self, **kwargs):
"""Default function generating examples for each `SplitGenerator`.
This function preprocess the examples from the raw data to the preprocessed
dataset files.
This function is called once for each `SplitGenerator` defined in
`_split_generators`. The examples yielded here will be written on
disk.
Args:
**kwargs (additional keyword arguments):
Arguments forwarded from the SplitGenerator.gen_kwargs
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `pyarrow.Table`, a feature table
ready to be encoded and written to disk.
"""
raise NotImplementedError()
def _prepare_split(
self,
split_generator: SplitGenerator,
file_format: str = "arrow",
num_proc: Optional[int] = None,
max_shard_size: Optional[Union[str, int]] = None,
):
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
try:
split_info = self.info.splits[split_generator.name]
except Exception:
split_info = split_generator.split_info
SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = posixpath.join(self._output_dir, fname)
if num_proc and num_proc > 1:
num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
if num_input_shards <= 1:
logger.warning(
f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
)
num_proc = 1
elif num_input_shards < num_proc:
logger.warning(
f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
)
num_proc = num_input_shards
pbar = hf_tqdm(
unit=" examples",
total=split_info.num_examples,
desc=f"Generating {split_info.name} split",
)
_prepare_split_args = {
"fpath": fpath,
"file_format": file_format,
"max_shard_size": max_shard_size,
}
if num_proc is None or num_proc == 1:
result = None
gen_kwargs = split_generator.gen_kwargs
job_id = 0
with pbar:
for job_id, done, content in self._prepare_split_single(
gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
):
if done:
result = content
else:
pbar.update(content)
# wrapping everything into lists for consistency with the multiprocessed code path
assert result is not None, "Failed to retrieve results from prepare_split"
examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
[item] for item in result
]
else:
kwargs_per_job = [
{"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
for job_id, gen_kwargs in enumerate(
_split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
)
]
num_jobs = len(kwargs_per_job)
examples_per_job = [None] * num_jobs
bytes_per_job = [None] * num_jobs
features_per_job = [None] * num_jobs
shards_per_job = [None] * num_jobs
shard_lengths_per_job = [None] * num_jobs
with Pool(num_proc) as pool:
with pbar:
for job_id, done, content in iflatmap_unordered(
pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
):
if done:
# the content is the result of the job
(
examples_per_job[job_id],
bytes_per_job[job_id],
features_per_job[job_id],
shards_per_job[job_id],
shard_lengths_per_job[job_id],
) = content
else:
# the content is the number of examples progress update
pbar.update(content)
assert None not in examples_per_job, (
f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
)
total_shards = sum(shards_per_job)
total_num_examples = sum(examples_per_job)
total_num_bytes = sum(bytes_per_job)
features = features_per_job[0]
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(shard_id_and_job: Tuple[int]):
shard_id, job_id = shard_id_and_job
global_shard_id = sum(shards_per_job[:job_id]) + shard_id
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
)
shard_ids_and_jobs = [
(shard_id, job_id)
for job_id, num_shards in enumerate(shards_per_job)
for shard_id in range(num_shards)
]
thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64)
split_generator.split_info.shard_lengths = [
shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
]
else:
# don't use any pattern
shard_id, job_id = 0, 0
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace(SUFFIX, ""),
)
if self.info.features is None:
self.info.features = features
def _prepare_split_single(
self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
gen_kwargs = {k: tracked_list(v) if isinstance(v, list) else v for k, v in gen_kwargs.items()}
generator = self._generate_tables(**gen_kwargs)
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
embed_local_files = file_format == "parquet"
shard_lengths = []
total_num_examples, total_num_bytes = 0, 0
shard_id = 0
num_examples_progress_update = 0
try:
writer = writer_class(
features=self.info.features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
try:
_time = time.time()
for _, table in generator:
if max_shard_size is not None and writer._num_bytes > max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
shard_id += 1
writer = writer_class(
features=writer._features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
try:
writer.write_table(table)
except CastError as cast_error:
raise DatasetGenerationCastError.from_cast_error(
cast_error=cast_error,
builder_name=self.info.builder_name,
gen_kwargs=gen_kwargs,
token=self.token,
)
num_examples_progress_update += len(table)
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield job_id, False, num_examples_progress_update
num_examples_progress_update = 0
finally:
yield job_id, False, num_examples_progress_update
num_shards = shard_id + 1
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
except Exception as e:
# Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
e = e.__context__
if isinstance(e, DatasetGenerationError):
raise
raise DatasetGenerationError("An error occurred while generating the dataset") from e
yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs)
| datasets/src/datasets/builder.py/0 | {
"file_path": "datasets/src/datasets/builder.py",
"repo_id": "datasets",
"token_count": 41220
} |
import io
import os
from typing import Iterable, List, Optional, Tuple, Union
from ..utils.file_utils import ( # noqa: F401 # backward compatibility
SINGLE_FILE_COMPRESSION_PROTOCOLS,
ArchiveIterable,
FilesIterable,
_get_extraction_protocol,
_get_path_extension,
_prepare_path_and_storage_options,
is_relative_path,
url_or_path_join,
xbasename,
xdirname,
xet_parse,
xexists,
xgetsize,
xglob,
xgzip_open,
xisdir,
xisfile,
xjoin,
xlistdir,
xnumpy_load,
xopen,
xpandas_read_csv,
xpandas_read_excel,
xPath,
xpyarrow_parquet_read_table,
xrelpath,
xsio_loadmat,
xsplit,
xsplitext,
xwalk,
xxml_dom_minidom_parse,
)
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .download_config import DownloadConfig
logger = get_logger(__name__)
class StreamingDownloadManager:
"""
Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
built-in `open` function to stream data from remote files.
"""
is_streaming = True
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
self.download_config = download_config or DownloadConfig()
self.downloaded_size = None
self.record_checksums = False
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
"""Normalize URL(s) of files to stream data from.
This is the lazy version of `DownloadManager.download` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
url_or_urls = map_nested(self._download_single, url_or_urls, map_tuple=True)
return url_or_urls
def _download_single(self, urlpath: str) -> str:
urlpath = str(urlpath)
if is_relative_path(urlpath):
# append the relative path to the base_path
urlpath = url_or_path_join(self._base_path, urlpath)
return urlpath
def extract(self, url_or_urls):
"""Add extraction protocol for given url(s) for streaming.
This is the lazy version of `DownloadManager.extract` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
return urlpaths
def _extract(self, urlpath: str) -> str:
urlpath = str(urlpath)
protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
# get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
path = urlpath.split("::")[0]
extension = _get_path_extension(path)
if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
raise NotImplementedError(
f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
f"Please use `dl_manager.iter_archive` instead.\n\n"
f"Example usage:\n\n"
f"\turl = dl_manager.download(url)\n"
f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
f"\tfor filename, file in tar_archive_iterator:\n"
f"\t\t..."
)
if protocol is None:
# no extraction
return urlpath
elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
# there is one single file which is the uncompressed file
inner_file = os.path.basename(urlpath.split("::")[0])
inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
return f"{protocol}://{inner_file}::{urlpath}"
else:
return f"{protocol}://::{urlpath}"
def download_and_extract(self, url_or_urls):
"""Prepare given `url_or_urls` for streaming (add extraction protocol).
This is the lazy version of `DownloadManager.download_and_extract` for streaming.
Is equivalent to:
```
urls = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) to stream from data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
"""
return self.extract(self.download(url_or_urls))
def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
"""Iterate over files within an archive.
Args:
urlpath_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object.
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(urlpath_or_buf, "read"):
return ArchiveIterable.from_buf(urlpath_or_buf)
else:
return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
"""Iterate over files.
Args:
urlpaths (`str` or `list` of `str`):
Root paths.
Yields:
str: File URL path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
def manage_extracted_files(self):
pass
def get_recorded_sizes_checksums(self):
pass
| datasets/src/datasets/download/streaming_download_manager.py/0 | {
"file_path": "datasets/src/datasets/download/streaming_download_manager.py",
"repo_id": "datasets",
"token_count": 3364
} |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
def __init__(self, features=None, token_per_repo_id=None, **tf_tensor_kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: F401 - import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
if config.DECORD_AVAILABLE and "decord" in sys.modules:
# We need to import torch first, otherwise later it can cause issues
# e.g. "RuntimeError: random_device could not be read"
# when running `torch.tensor(value).share_memory_()`
if config.TORCH_AVAILABLE:
import torch # noqa
from decord import VideoReader
from decord.bridge import to_tensorflow
if isinstance(value, VideoReader):
value._hf_bridge_out = to_tensorflow
return value
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import tensorflow as tf
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| datasets/src/datasets/formatting/tf_formatter.py/0 | {
"file_path": "datasets/src/datasets/formatting/tf_formatter.py",
"repo_id": "datasets",
"token_count": 2181
} |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Access datasets."""
import filecmp
import glob
import importlib
import inspect
import json
import os
import posixpath
import shutil
import signal
import time
import warnings
from collections import Counter
from contextlib import nullcontext
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
import fsspec
import requests
import yaml
from fsspec.core import url_to_fs
from huggingface_hub import DatasetCard, DatasetCardData, HfApi
from huggingface_hub.utils import (
EntryNotFoundError,
GatedRepoError,
LocalEntryNotFoundError,
OfflineModeIsEnabled,
RepositoryNotFoundError,
RevisionNotFoundError,
get_session,
)
from . import __version__, config
from .arrow_dataset import Dataset
from .builder import BuilderConfig, DatasetBuilder
from .data_files import (
DEFAULT_PATTERNS_ALL,
DataFilesDict,
DataFilesList,
DataFilesPatternsDict,
DataFilesPatternsList,
EmptyDatasetError,
get_data_patterns,
get_metadata_patterns,
sanitize_patterns,
)
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadMode
from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin
from .exceptions import DataFilesNotFoundError, DatasetNotFoundError
from .features import Features
from .fingerprint import Hasher
from .info import DatasetInfo, DatasetInfosDict
from .iterable_dataset import IterableDataset
from .naming import camelcase_to_snakecase, snakecase_to_camelcase
from .packaged_modules import (
_EXTENSION_TO_MODULE,
_MODULE_SUPPORTS_METADATA,
_MODULE_TO_EXTENSIONS,
_PACKAGED_DATASETS_MODULES,
_hash_python_lines,
)
from .splits import Split
from .utils import _dataset_viewer
from .utils.file_utils import (
_raise_if_offline_mode_is_enabled,
cached_path,
get_datasets_user_agent,
init_hf_modules,
is_relative_path,
relative_to_absolute_path,
url_or_path_join,
)
from .utils.hub import hf_dataset_url
from .utils.info_utils import VerificationMode, is_small_dataset
from .utils.logging import get_logger
from .utils.metadata import MetadataConfigs
from .utils.py_utils import get_imports, lock_importable_file
from .utils.typing import PathLike
from .utils.version import Version
logger = get_logger(__name__)
ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + [".zip"]
def _raise_timeout_error(signum, frame):
raise ValueError(
"Loading this dataset requires you to execute custom code contained in the dataset repository on your local "
"machine. Please set the option `trust_remote_code=True` to permit loading of this dataset."
)
def resolve_trust_remote_code(trust_remote_code: Optional[bool], repo_id: str) -> bool:
"""
Copied and adapted from Transformers
https://github.com/huggingface/transformers/blob/2098d343cc4b4b9d2aea84b3cf1eb5a1e610deff/src/transformers/dynamic_module_utils.py#L589
"""
trust_remote_code = trust_remote_code if trust_remote_code is not None else config.HF_DATASETS_TRUST_REMOTE_CODE
if trust_remote_code is None:
if config.TIME_OUT_REMOTE_CODE > 0:
try:
signal.signal(signal.SIGALRM, _raise_timeout_error)
signal.alarm(config.TIME_OUT_REMOTE_CODE)
while trust_remote_code is None:
answer = input(
f"The repository for {repo_id} contains custom code which must be executed to correctly "
f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n"
f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n"
f"Do you wish to run the custom code? [y/N] "
)
if answer.lower() in ["yes", "y", "1"]:
trust_remote_code = True
elif answer.lower() in ["no", "n", "0", ""]:
trust_remote_code = False
signal.alarm(0)
except Exception:
# OS which does not support signal.SIGALRM
raise ValueError(
f"The repository for {repo_id} contains custom code which must be executed to correctly "
f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n"
f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
)
else:
# For the CI which might put the timeout at 0
_raise_timeout_error(None, None)
return trust_remote_code
def init_dynamic_modules(
name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
):
"""
Create a module with name `name` in which you can add dynamic modules
such as datasets. The module can be imported using its name.
The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
be overridden by specifying a path to another directory in `hf_modules_cache`.
"""
hf_modules_cache = init_hf_modules(hf_modules_cache)
dynamic_modules_path = os.path.join(hf_modules_cache, name)
os.makedirs(dynamic_modules_path, exist_ok=True)
if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
pass
return dynamic_modules_path
def import_main_class(module_path) -> Optional[Type[DatasetBuilder]]:
"""Import a module at module_path and return its main class: a DatasetBuilder"""
module = importlib.import_module(module_path)
# Find the main class in our imported module
module_main_cls = None
for name, obj in module.__dict__.items():
if inspect.isclass(obj) and issubclass(obj, DatasetBuilder):
if inspect.isabstract(obj):
continue
module_main_cls = obj
obj_module = inspect.getmodule(obj)
if obj_module is not None and module == obj_module:
break
return module_main_cls
class _InitializeConfiguredDatasetBuilder:
"""
From https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class
See also ConfiguredDatasetBuilder.__reduce__
When called with the param value as the only argument, returns an
un-initialized instance of the parameterized class. Subsequent __setstate__
will be called by pickle.
"""
def __call__(self, builder_cls, metadata_configs, default_config_name, name):
# make a simple object which has no complex __init__ (this one will do)
obj = _InitializeConfiguredDatasetBuilder()
obj.__class__ = configure_builder_class(
builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name
)
return obj
def configure_builder_class(
builder_cls: Type[DatasetBuilder],
builder_configs: List[BuilderConfig],
default_config_name: Optional[str],
dataset_name: str,
) -> Type[DatasetBuilder]:
"""
Dynamically create a builder class with custom builder configs parsed from README.md file,
i.e. set BUILDER_CONFIGS class variable of a builder class to custom configs list.
"""
class ConfiguredDatasetBuilder(builder_cls):
BUILDER_CONFIGS = builder_configs
DEFAULT_CONFIG_NAME = default_config_name
__module__ = builder_cls.__module__ # so that the actual packaged builder can be imported
def __reduce__(self): # to make dynamically created class pickable, see _InitializeParameterizedDatasetBuilder
parent_builder_cls = self.__class__.__mro__[1]
return (
_InitializeConfiguredDatasetBuilder(),
(
parent_builder_cls,
self.BUILDER_CONFIGS,
self.DEFAULT_CONFIG_NAME,
self.dataset_name,
),
self.__dict__.copy(),
)
ConfiguredDatasetBuilder.__name__ = (
f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
)
ConfiguredDatasetBuilder.__qualname__ = (
f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
)
return ConfiguredDatasetBuilder
def get_dataset_builder_class(
dataset_module: "DatasetModule", dataset_name: Optional[str] = None
) -> Type[DatasetBuilder]:
with (
lock_importable_file(dataset_module.importable_file_path)
if dataset_module.importable_file_path
else nullcontext()
):
builder_cls = import_main_class(dataset_module.module_path)
if dataset_module.builder_configs_parameters.builder_configs:
dataset_name = dataset_name or dataset_module.builder_kwargs.get("dataset_name")
if dataset_name is None:
raise ValueError("dataset_name should be specified but got None")
builder_cls = configure_builder_class(
builder_cls,
builder_configs=dataset_module.builder_configs_parameters.builder_configs,
default_config_name=dataset_module.builder_configs_parameters.default_config_name,
dataset_name=dataset_name,
)
return builder_cls
def files_to_hash(file_paths: List[str]) -> str:
"""
Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
"""
# List all python files in directories if directories are supplied as part of external imports
to_use_files: List[Union[Path, str]] = []
for file_path in file_paths:
if os.path.isdir(file_path):
to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
else:
to_use_files.append(file_path)
# Get the code from all these files
lines = []
for file_path in to_use_files:
with open(file_path, encoding="utf-8") as f:
lines.extend(f.readlines())
return _hash_python_lines(lines)
def increase_load_count(name: str):
"""Update the download count of a dataset."""
if not config.HF_HUB_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
try:
get_session().head(
"/".join((config.S3_DATASETS_BUCKET_PREFIX, name, name + ".py")),
headers={"User-Agent": get_datasets_user_agent()},
timeout=3,
)
except Exception:
pass
def _download_additional_modules(
name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]
) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
"""
Download additional module for a module <name>.py at URL (or local path) <base_path>/<name>.py
The imports must have been parsed first using ``get_imports``.
If some modules need to be installed with pip, an error is raised showing how to install them.
This function return the list of downloaded modules as tuples (import_name, module_file_path).
The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``.
"""
local_imports = []
library_imports = []
download_config = download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading extra modules"
for import_type, import_name, import_path, sub_directory in imports:
if import_type == "library":
library_imports.append((import_name, import_path)) # Import from a library
continue
if import_name == name:
raise ValueError(
f"Error in the {name} script, importing relative {import_name} module "
f"but {import_name} is the name of the script. "
f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
f"comment pointing to the original relative import file path."
)
if import_type == "internal":
url_or_filename = url_or_path_join(base_path, import_path + ".py")
elif import_type == "external":
url_or_filename = import_path
else:
raise ValueError("Wrong import_type")
local_import_path = cached_path(
url_or_filename,
download_config=download_config,
)
if sub_directory is not None:
local_import_path = os.path.join(local_import_path, sub_directory)
local_imports.append((import_name, local_import_path))
return local_imports, library_imports
def _check_library_imports(name: str, library_imports: List[Tuple[str, str]]) -> None:
# Check library imports
needs_to_be_installed = {}
for library_import_name, library_import_path in library_imports:
try:
lib = importlib.import_module(library_import_name) # noqa F841
except ImportError:
if library_import_name not in needs_to_be_installed or library_import_path != library_import_name:
needs_to_be_installed[library_import_name] = library_import_path
if needs_to_be_installed:
_dependencies_str = "dependencies" if len(needs_to_be_installed) > 1 else "dependency"
_them_str = "them" if len(needs_to_be_installed) > 1 else "it"
if "sklearn" in needs_to_be_installed.keys():
needs_to_be_installed["sklearn"] = "scikit-learn"
if "Bio" in needs_to_be_installed.keys():
needs_to_be_installed["Bio"] = "biopython"
raise ImportError(
f"To be able to use {name}, you need to install the following {_dependencies_str}: "
f"{', '.join(needs_to_be_installed)}.\nPlease install {_them_str} using 'pip install "
f"{' '.join(needs_to_be_installed.values())}' for instance."
)
def _copy_script_and_other_resources_in_importable_dir(
name: str,
importable_directory_path: str,
subdirectory_name: str,
original_local_path: str,
local_imports: List[Tuple[str, str]],
additional_files: List[Tuple[str, str]],
download_mode: Optional[Union[DownloadMode, str]],
) -> str:
"""Copy a script and its required imports to an importable directory
Args:
name (str): name of the resource to load
importable_directory_path (str): path to the loadable folder in the dynamic modules directory
subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script
original_local_path (str): local path to the resource script
local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy)
additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy)
download_mode (Optional[Union[DownloadMode, str]]): download mode
Return:
importable_file: path to an importable module with importlib.import_module
"""
# Define a directory with a unique name in our dataset folder
# path is: ./datasets/dataset_name/hash_from_code/script.py
# we use a hash as subdirectory_name to be able to have multiple versions of a dataset processing file together
importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name)
importable_file = os.path.join(importable_subdirectory, name + ".py")
# Prevent parallel disk operations
with lock_importable_file(importable_file):
# Create main dataset folder if needed
if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path):
shutil.rmtree(importable_directory_path)
os.makedirs(importable_directory_path, exist_ok=True)
# add an __init__ file to the main dataset folder if needed
init_file_path = os.path.join(importable_directory_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Create hash dataset folder if needed
os.makedirs(importable_subdirectory, exist_ok=True)
# add an __init__ file to the hash dataset folder if needed
init_file_path = os.path.join(importable_subdirectory, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Copy dataset.py file in hash folder if needed
if not os.path.exists(importable_file):
shutil.copyfile(original_local_path, importable_file)
# Record metadata associating original dataset path with local unique folder
# Use os.path.splitext to split extension from importable_local_file
meta_path = os.path.splitext(importable_file)[0] + ".json"
if not os.path.exists(meta_path):
meta = {"original file path": original_local_path, "local file path": importable_file}
# the filename is *.py in our case, so better rename to filename.json instead of filename.py.json
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
# Copy all the additional imports
for import_name, import_path in local_imports:
if os.path.isfile(import_path):
full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py")
if not os.path.exists(full_path_local_import):
shutil.copyfile(import_path, full_path_local_import)
elif os.path.isdir(import_path):
full_path_local_import = os.path.join(importable_subdirectory, import_name)
if not os.path.exists(full_path_local_import):
shutil.copytree(import_path, full_path_local_import)
else:
raise ImportError(f"Error with local import at {import_path}")
# Copy additional files like dataset_infos.json file if needed
for file_name, original_path in additional_files:
destination_additional_path = os.path.join(importable_subdirectory, file_name)
if not os.path.exists(destination_additional_path) or not filecmp.cmp(
original_path, destination_additional_path
):
shutil.copyfile(original_path, destination_additional_path)
return importable_file
def _get_importable_file_path(
dynamic_modules_path: str,
module_namespace: str,
subdirectory_name: str,
name: str,
) -> str:
importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
return os.path.join(importable_directory_path, subdirectory_name, name.split("/")[-1] + ".py")
def _create_importable_file(
local_path: str,
local_imports: List[Tuple[str, str]],
additional_files: List[Tuple[str, str]],
dynamic_modules_path: str,
module_namespace: str,
subdirectory_name: str,
name: str,
download_mode: DownloadMode,
) -> None:
importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
Path(importable_directory_path).mkdir(parents=True, exist_ok=True)
(Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True)
importable_local_file = _copy_script_and_other_resources_in_importable_dir(
name=name.split("/")[-1],
importable_directory_path=importable_directory_path,
subdirectory_name=subdirectory_name,
original_local_path=local_path,
local_imports=local_imports,
additional_files=additional_files,
download_mode=download_mode,
)
logger.debug(f"Created importable dataset file at {importable_local_file}")
def _load_importable_file(
dynamic_modules_path: str,
module_namespace: str,
subdirectory_name: str,
name: str,
) -> Tuple[str, str]:
module_path = ".".join(
[
os.path.basename(dynamic_modules_path),
module_namespace,
name.replace("/", "--"),
subdirectory_name,
name.split("/")[-1],
]
)
return module_path, subdirectory_name
def infer_module_for_data_files_list(
data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
) -> Tuple[Optional[str], dict]:
"""Infer module (and builder kwargs) from list of data files.
It picks the module based on the most common file extension.
In case of a draw ".parquet" is the favorite, and then alphabetical order.
Args:
data_files_list (DataFilesList): List of data files.
download_config (bool or str, optional): Mainly use `token` or `storage_options` to support different platforms and auth types.
Returns:
tuple[str, dict[str, Any]]: Tuple with
- inferred module name
- dict of builder kwargs
"""
extensions_counter = Counter(
("." + suffix.lower(), xbasename(filepath) in ("metadata.jsonl", "metadata.csv"))
for filepath in data_files_list[: config.DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE]
for suffix in xbasename(filepath).split(".")[1:]
)
if extensions_counter:
def sort_key(ext_count: Tuple[Tuple[str, bool], int]) -> Tuple[int, bool]:
"""Sort by count and set ".parquet" as the favorite in case of a draw, and ignore metadata files"""
(ext, is_metadata), count = ext_count
return (not is_metadata, count, ext == ".parquet", ext)
for (ext, _), _ in sorted(extensions_counter.items(), key=sort_key, reverse=True):
if ext in _EXTENSION_TO_MODULE:
return _EXTENSION_TO_MODULE[ext]
elif ext == ".zip":
return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config)
return None, {}
def infer_module_for_data_files_list_in_archives(
data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
) -> Tuple[Optional[str], dict]:
"""Infer module (and builder kwargs) from list of archive data files.
Args:
data_files_list (DataFilesList): List of data files.
download_config (bool or str, optional): Mainly use `token` or `storage_options` to support different platforms and auth types.
Returns:
tuple[str, dict[str, Any]]: Tuple with
- inferred module name
- dict of builder kwargs
"""
archived_files = []
archive_files_counter = 0
for filepath in data_files_list:
if str(filepath).endswith(".zip"):
archive_files_counter += 1
if archive_files_counter > config.GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE:
break
extracted = xjoin(StreamingDownloadManager().extract(filepath), "**")
archived_files += [
f.split("::")[0]
for f in xglob(extracted, recursive=True, download_config=download_config)[
: config.ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE
]
]
extensions_counter = Counter(
"." + suffix.lower() for filepath in archived_files for suffix in xbasename(filepath).split(".")[1:]
)
if extensions_counter:
most_common = extensions_counter.most_common(1)[0][0]
if most_common in _EXTENSION_TO_MODULE:
return _EXTENSION_TO_MODULE[most_common]
return None, {}
def infer_module_for_data_files(
data_files: DataFilesDict, path: Optional[str] = None, download_config: Optional[DownloadConfig] = None
) -> Tuple[Optional[str], Dict[str, Any]]:
"""Infer module (and builder kwargs) from data files. Raise if module names for different splits don't match.
Args:
data_files ([`DataFilesDict`]): Dict of list of data files.
path (str, *optional*): Dataset name or path.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters to authenticate on the Hugging Face Hub for private remote files.
Returns:
tuple[str, dict[str, Any]]: Tuple with
- inferred module name
- builder kwargs
"""
split_modules = {
split: infer_module_for_data_files_list(data_files_list, download_config=download_config)
for split, data_files_list in data_files.items()
}
module_name, default_builder_kwargs = next(iter(split_modules.values()))
if any((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values()):
raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}")
if not module_name:
raise DataFilesNotFoundError("No (supported) data files found" + (f" in {path}" if path else ""))
return module_name, default_builder_kwargs
def create_builder_configs_from_metadata_configs(
module_path: str,
metadata_configs: MetadataConfigs,
supports_metadata: bool,
base_path: Optional[str] = None,
default_builder_kwargs: Dict[str, Any] = None,
download_config: Optional[DownloadConfig] = None,
) -> Tuple[List[BuilderConfig], str]:
builder_cls = import_main_class(module_path)
builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS
default_config_name = metadata_configs.get_default_config_name()
builder_configs = []
default_builder_kwargs = {} if default_builder_kwargs is None else default_builder_kwargs
base_path = base_path if base_path is not None else ""
for config_name, config_params in metadata_configs.items():
config_data_files = config_params.get("data_files")
config_data_dir = config_params.get("data_dir")
config_base_path = xjoin(base_path, config_data_dir) if config_data_dir else base_path
try:
config_patterns = (
sanitize_patterns(config_data_files)
if config_data_files is not None
else get_data_patterns(config_base_path, download_config=download_config)
)
config_data_files_dict = DataFilesPatternsDict.from_patterns(
config_patterns,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
)
except EmptyDatasetError as e:
raise EmptyDatasetError(
f"Dataset at '{base_path}' doesn't contain data files matching the patterns for config '{config_name}',"
f" check `data_files` and `data_fir` parameters in the `configs` YAML field in README.md. "
) from e
if config_data_files is None and supports_metadata and config_patterns != DEFAULT_PATTERNS_ALL:
try:
config_metadata_patterns = get_metadata_patterns(base_path, download_config=download_config)
except FileNotFoundError:
config_metadata_patterns = None
if config_metadata_patterns is not None:
config_metadata_data_files_list = DataFilesPatternsList.from_patterns(config_metadata_patterns)
config_data_files_dict = DataFilesPatternsDict(
{
split: data_files_list + config_metadata_data_files_list
for split, data_files_list in config_data_files_dict.items()
}
)
ignored_params = [
param for param in config_params if not hasattr(builder_config_cls, param) and param != "default"
]
if ignored_params:
logger.warning(
f"Some datasets params were ignored: {ignored_params}. "
"Make sure to use only valid params for the dataset builder and to have "
"a up-to-date version of the `datasets` library."
)
builder_configs.append(
builder_config_cls(
name=config_name,
data_files=config_data_files_dict,
data_dir=config_data_dir,
**{
param: value
for param, value in {**default_builder_kwargs, **config_params}.items()
if hasattr(builder_config_cls, param) and param not in ("default", "data_files", "data_dir")
},
)
)
return builder_configs, default_config_name
@dataclass
class BuilderConfigsParameters:
"""Dataclass containing objects related to creation of builder configurations from yaml's metadata content.
Attributes:
metadata_configs (`MetadataConfigs`, *optional*):
Configs parsed from yaml's metadata.
builder_configs (`list[BuilderConfig]`, *optional*):
List of BuilderConfig objects created from metadata_configs above.
default_config_name (`str`):
Name of default config taken from yaml's metadata.
"""
metadata_configs: Optional[MetadataConfigs] = None
builder_configs: Optional[List[BuilderConfig]] = None
default_config_name: Optional[str] = None
@dataclass
class DatasetModule:
module_path: str
hash: str
builder_kwargs: dict
builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters)
dataset_infos: Optional[DatasetInfosDict] = None
importable_file_path: Optional[str] = None
class _DatasetModuleFactory:
def get_module(self) -> DatasetModule:
raise NotImplementedError
class LocalDatasetModuleFactoryWithScript(_DatasetModuleFactory):
"""Get the module of a local dataset. The dataset script is loaded from a local script."""
def __init__(
self,
path: str,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
trust_remote_code: Optional[bool] = None,
):
self.path = path
self.name = Path(path).stem
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
self.trust_remote_code = trust_remote_code
def get_module(self) -> DatasetModule:
if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
warnings.warn(
f"The repository for {self.name} contains custom code which must be executed to correctly "
f"load the dataset. You can inspect the repository content at {self.path}\n"
f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
FutureWarning,
)
# get script and other files
dataset_infos_path = Path(self.path).parent / config.DATASETDICT_INFOS_FILENAME
dataset_readme_path = Path(self.path).parent / config.REPOCARD_FILENAME
imports = get_imports(self.path)
local_imports, library_imports = _download_additional_modules(
name=self.name,
base_path=str(Path(self.path).parent),
imports=imports,
download_config=self.download_config,
)
additional_files = []
if dataset_infos_path.is_file():
additional_files.append((config.DATASETDICT_INFOS_FILENAME, str(dataset_infos_path)))
if dataset_readme_path.is_file():
additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
hash = files_to_hash([self.path] + [loc[1] for loc in local_imports])
importable_file_path = _get_importable_file_path(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
if not os.path.exists(importable_file_path):
trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
if trust_remote_code:
_create_importable_file(
local_path=self.path,
local_imports=local_imports,
additional_files=additional_files,
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
download_mode=self.download_mode,
)
else:
raise ValueError(
f"Loading {self.name} requires you to execute the dataset script in that"
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
" set the option `trust_remote_code=True` to remove this error."
)
_check_library_imports(name=self.name, library_imports=library_imports)
module_path, hash = _load_importable_file(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {"base_path": str(Path(self.path).parent)}
return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
class LocalDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
"""Get the module of a dataset loaded from the user's data files. The dataset builder module to use is inferred
from the data files extensions."""
def __init__(
self,
path: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
):
if data_dir and os.path.isabs(data_dir):
raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}")
self.path = Path(path).as_posix()
self.name = Path(path).stem
self.data_files = data_files
self.data_dir = data_dir
self.download_mode = download_mode
def get_module(self) -> DatasetModule:
readme_path = os.path.join(self.path, config.REPOCARD_FILENAME)
standalone_yaml_path = os.path.join(self.path, config.REPOYAML_FILENAME)
dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData()
if os.path.exists(standalone_yaml_path):
with open(standalone_yaml_path, "r", encoding="utf-8") as f:
standalone_yaml_data = yaml.safe_load(f.read())
if standalone_yaml_data:
_dataset_card_data_dict = dataset_card_data.to_dict()
_dataset_card_data_dict.update(standalone_yaml_data)
dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
# we need a set of data files to find which dataset builder to use
# because we need to infer module name by files extensions
base_path = Path(self.path, self.data_dir or "").expanduser().resolve().as_posix()
if self.data_files is not None:
patterns = sanitize_patterns(self.data_files)
elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
else:
patterns = get_data_patterns(base_path)
data_files = DataFilesDict.from_patterns(
patterns,
base_path=base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
)
module_name, default_builder_kwargs = infer_module_for_data_files(
data_files=data_files,
path=self.path,
)
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata:
try:
metadata_patterns = get_metadata_patterns(base_path)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, base_path=base_path)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
if metadata_configs:
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
base_path=base_path,
supports_metadata=supports_metadata,
default_builder_kwargs=default_builder_kwargs,
)
else:
builder_configs: List[BuilderConfig] = [
import_main_class(module_path).BUILDER_CONFIG_CLASS(
data_files=data_files,
**default_builder_kwargs,
)
]
default_config_name = None
builder_kwargs = {
"base_path": self.path,
"dataset_name": camelcase_to_snakecase(Path(self.path).name),
}
if self.data_dir:
builder_kwargs["data_files"] = data_files
# this file is deprecated and was created automatically in old versions of push_to_hub
if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)):
with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
legacy_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
if len(legacy_dataset_infos) == 1:
# old config e.g. named "username--dataset_name"
legacy_config_name = next(iter(legacy_dataset_infos))
legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
legacy_dataset_infos.update(dataset_infos)
dataset_infos = legacy_dataset_infos
if default_config_name is None and len(dataset_infos) == 1:
default_config_name = next(iter(dataset_infos))
hash = Hasher.hash({"dataset_infos": dataset_infos, "builder_configs": builder_configs})
return DatasetModule(
module_path,
hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
)
class PackagedDatasetModuleFactory(_DatasetModuleFactory):
"""Get the dataset builder module from the ones that are packaged with the library: csv, json, etc."""
def __init__(
self,
name: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
):
self.name = name
self.data_files = data_files
self.data_dir = data_dir
self.download_config = download_config
self.download_mode = download_mode
increase_load_count(name)
def get_module(self) -> DatasetModule:
base_path = Path(self.data_dir or "").expanduser().resolve().as_posix()
patterns = (
sanitize_patterns(self.data_files)
if self.data_files is not None
else get_data_patterns(base_path, download_config=self.download_config)
)
data_files = DataFilesDict.from_patterns(
patterns,
download_config=self.download_config,
base_path=base_path,
)
supports_metadata = self.name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
try:
metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(
metadata_patterns, download_config=self.download_config, base_path=base_path
)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, hash = _PACKAGED_DATASETS_MODULES[self.name]
builder_kwargs = {
"data_files": data_files,
"dataset_name": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs)
class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
"""
Get the module of a dataset loaded from data files of a dataset repository.
The dataset builder module to use is inferred from the data files extensions.
"""
def __init__(
self,
name: str,
commit_hash: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
use_exported_dataset_infos: bool = False,
):
self.name = name
self.commit_hash = commit_hash
self.data_files = data_files
self.data_dir = data_dir
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.use_exported_dataset_infos = use_exported_dataset_infos
increase_load_count(name)
def get_module(self) -> DatasetModule:
# Get the Dataset Card and fix the revision in case there are new commits in the meantime
api = HfApi(
endpoint=config.HF_ENDPOINT,
token=self.download_config.token,
library_name="datasets",
library_version=__version__,
user_agent=get_datasets_user_agent(self.download_config.user_agent),
)
try:
dataset_readme_path = api.hf_hub_download(
repo_id=self.name,
filename=config.REPOCARD_FILENAME,
repo_type="dataset",
revision=self.commit_hash,
proxies=self.download_config.proxies,
)
dataset_card_data = DatasetCard.load(dataset_readme_path).data
except EntryNotFoundError:
dataset_card_data = DatasetCardData()
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading standalone yaml"
try:
standalone_yaml_path = cached_path(
hf_dataset_url(self.name, config.REPOYAML_FILENAME, revision=self.commit_hash),
download_config=download_config,
)
with open(standalone_yaml_path, "r", encoding="utf-8") as f:
standalone_yaml_data = yaml.safe_load(f.read())
if standalone_yaml_data:
_dataset_card_data_dict = dataset_card_data.to_dict()
_dataset_card_data_dict.update(standalone_yaml_data)
dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
except FileNotFoundError:
pass
base_path = f"hf://datasets/{self.name}@{self.commit_hash}/{self.data_dir or ''}".rstrip("/")
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
if config.USE_PARQUET_EXPORT and self.use_exported_dataset_infos:
try:
exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
exported_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
for config_name in exported_dataset_infos
}
)
except _dataset_viewer.DatasetViewerError:
exported_dataset_infos = None
else:
exported_dataset_infos = None
if exported_dataset_infos:
exported_dataset_infos.update(dataset_infos)
dataset_infos = exported_dataset_infos
# we need a set of data files to find which dataset builder to use
# because we need to infer module name by files extensions
if self.data_files is not None:
patterns = sanitize_patterns(self.data_files)
elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
else:
patterns = get_data_patterns(base_path, download_config=self.download_config)
data_files = DataFilesDict.from_patterns(
patterns,
base_path=base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
download_config=self.download_config,
)
module_name, default_builder_kwargs = infer_module_for_data_files(
data_files=data_files,
path=self.name,
download_config=self.download_config,
)
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata:
try:
metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(
metadata_patterns, download_config=self.download_config, base_path=base_path
)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
if metadata_configs:
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
base_path=base_path,
supports_metadata=supports_metadata,
default_builder_kwargs=default_builder_kwargs,
download_config=self.download_config,
)
else:
builder_configs: List[BuilderConfig] = [
import_main_class(module_path).BUILDER_CONFIG_CLASS(
data_files=data_files,
**default_builder_kwargs,
)
]
default_config_name = None
builder_kwargs = {
"base_path": hf_dataset_url(self.name, "", revision=self.commit_hash).rstrip("/"),
"repo_id": self.name,
"dataset_name": camelcase_to_snakecase(Path(self.name).name),
}
if self.data_dir:
builder_kwargs["data_files"] = data_files
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading metadata"
try:
# this file is deprecated and was created automatically in old versions of push_to_hub
dataset_infos_path = cached_path(
hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.commit_hash),
download_config=download_config,
)
with open(dataset_infos_path, encoding="utf-8") as f:
legacy_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
if len(legacy_dataset_infos) == 1:
# old config e.g. named "username--dataset_name"
legacy_config_name = next(iter(legacy_dataset_infos))
legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
legacy_dataset_infos.update(dataset_infos)
dataset_infos = legacy_dataset_infos
except FileNotFoundError:
pass
if default_config_name is None and len(dataset_infos) == 1:
default_config_name = next(iter(dataset_infos))
return DatasetModule(
module_path,
self.commit_hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
)
class HubDatasetModuleFactoryWithParquetExport(_DatasetModuleFactory):
"""
Get the module of a dataset loaded from parquet files of a dataset repository parquet export.
"""
def __init__(
self,
name: str,
commit_hash: str,
download_config: Optional[DownloadConfig] = None,
):
self.name = name
self.commit_hash = commit_hash
self.download_config = download_config or DownloadConfig()
increase_load_count(name)
def get_module(self) -> DatasetModule:
exported_parquet_files = _dataset_viewer.get_exported_parquet_files(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
for config_name in exported_dataset_infos
}
)
parquet_commit_hash = (
HfApi(
endpoint=config.HF_ENDPOINT,
token=self.download_config.token,
library_name="datasets",
library_version=__version__,
user_agent=get_datasets_user_agent(self.download_config.user_agent),
)
.dataset_info(
self.name,
revision="refs/convert/parquet",
token=self.download_config.token,
timeout=100.0,
)
.sha
) # fix the revision in case there are new commits in the meantime
metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(
parquet_commit_hash=parquet_commit_hash,
exported_parquet_files=exported_parquet_files,
dataset_infos=dataset_infos,
)
module_path, _ = _PACKAGED_DATASETS_MODULES["parquet"]
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
supports_metadata=False,
download_config=self.download_config,
)
builder_kwargs = {
"repo_id": self.name,
"dataset_name": camelcase_to_snakecase(Path(self.name).name),
}
return DatasetModule(
module_path,
self.commit_hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
)
class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory):
"""
Get the module of a dataset from a dataset repository.
The dataset script comes from the script inside the dataset repository.
"""
def __init__(
self,
name: str,
commit_hash: str,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
trust_remote_code: Optional[bool] = None,
):
self.name = name
self.commit_hash = commit_hash
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
self.trust_remote_code = trust_remote_code
increase_load_count(name)
def download_loading_script(self) -> str:
file_path = hf_dataset_url(self.name, self.name.split("/")[-1] + ".py", revision=self.commit_hash)
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading builder script"
return cached_path(file_path, download_config=download_config)
def download_dataset_infos_file(self) -> str:
dataset_infos = hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.commit_hash)
# Download the dataset infos file if available
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading metadata"
try:
return cached_path(
dataset_infos,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
return None
def download_dataset_readme_file(self) -> str:
readme_url = hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=self.commit_hash)
# Download the dataset infos file if available
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading readme"
try:
return cached_path(
readme_url,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
return None
def get_module(self) -> DatasetModule:
if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
warnings.warn(
f"The repository for {self.name} contains custom code which must be executed to correctly "
f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{self.name}\n"
f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
FutureWarning,
)
# get script and other files
local_path = self.download_loading_script()
dataset_infos_path = self.download_dataset_infos_file()
dataset_readme_path = self.download_dataset_readme_file()
imports = get_imports(local_path)
local_imports, library_imports = _download_additional_modules(
name=self.name,
base_path=hf_dataset_url(self.name, "", revision=self.commit_hash),
imports=imports,
download_config=self.download_config,
)
additional_files = []
if dataset_infos_path:
additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path))
if dataset_readme_path:
additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
importable_file_path = _get_importable_file_path(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
if not os.path.exists(importable_file_path):
trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
if trust_remote_code:
_create_importable_file(
local_path=local_path,
local_imports=local_imports,
additional_files=additional_files,
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
download_mode=self.download_mode,
)
else:
raise ValueError(
f"Loading {self.name} requires you to execute the dataset script in that"
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
" set the option `trust_remote_code=True` to remove this error."
)
_check_library_imports(name=self.name, library_imports=library_imports)
module_path, hash = _load_importable_file(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {
"base_path": hf_dataset_url(self.name, "", revision=self.commit_hash).rstrip("/"),
"repo_id": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
class CachedDatasetModuleFactory(_DatasetModuleFactory):
"""
Get the module of a dataset that has been loaded once already and cached.
The script that is loaded from the cache is the most recent one with a matching name.
"""
def __init__(
self,
name: str,
cache_dir: Optional[str] = None,
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.cache_dir = cache_dir
self.dynamic_modules_path = dynamic_modules_path
assert self.name.count("/") <= 1
def get_module(self) -> DatasetModule:
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
importable_directory_path = os.path.join(dynamic_modules_path, "datasets", self.name.replace("/", "--"))
hashes = (
[h for h in os.listdir(importable_directory_path) if len(h) == 64]
if os.path.isdir(importable_directory_path)
else None
)
if hashes:
# get most recent
def _get_modification_time(module_hash):
return (
(Path(importable_directory_path) / module_hash / (self.name.split("/")[-1] + ".py"))
.stat()
.st_mtime
)
hash = sorted(hashes, key=_get_modification_time)[-1]
warning_msg = (
f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
f"couldn't be found locally at {self.name}"
)
if not config.HF_HUB_OFFLINE:
warning_msg += ", or remotely on the Hugging Face Hub."
logger.warning(warning_msg)
importable_file_path = _get_importable_file_path(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
module_path, hash = _load_importable_file(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {
"repo_id": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
cache_dir = os.path.expanduser(str(self.cache_dir or config.HF_DATASETS_CACHE))
namespace_and_dataset_name = self.name.split("/")
namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
cached_relative_path = "___".join(namespace_and_dataset_name)
cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
cached_directory_paths = [
cached_directory_path
for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
if os.path.isdir(cached_directory_path)
]
if cached_directory_paths:
builder_kwargs = {
"repo_id": self.name,
"dataset_name": self.name.split("/")[-1],
}
warning_msg = f"Using the latest cached version of the dataset since {self.name} couldn't be found on the Hugging Face Hub"
if config.HF_HUB_OFFLINE:
warning_msg += " (offline mode is enabled)."
logger.warning(warning_msg)
return DatasetModule(
"datasets.packaged_modules.cache.cache",
"auto",
{**builder_kwargs, "version": "auto"},
)
raise FileNotFoundError(f"Dataset {self.name} is not cached in {self.cache_dir}")
def dataset_module_factory(
path: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[Dict, List, str, DataFilesDict]] = None,
cache_dir: Optional[str] = None,
trust_remote_code: Optional[bool] = None,
_require_default_config_name=True,
_require_custom_configs=False,
**download_kwargs,
) -> DatasetModule:
"""
Download/extract/cache a dataset module.
Dataset codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
Args:
path (str): Path or name of the dataset.
Depending on ``path``, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
For local datasets:
- if ``path`` is a local directory (containing data files only)
-> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
e.g. ``'./path/to/directory/with/my/csv/data'``.
- if ``path`` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory):
-> load the dataset builder from the dataset script
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
For datasets on the Hugging Face Hub (list all available datasets with ``huggingface_hub.list_datasets()``)
- if ``path`` is a dataset repository on the HF hub (containing data files only)
-> load a generic dataset builder (csv, text etc.) based on the content of the repository
e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files.
- if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
-> load the dataset builder from the dataset script in the dataset repository
e.g. ``glue``, ``squad``, ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
By default, the datasets are stored inside the `datasets_modules` module.
data_dir (:obj:`str`, optional): Directory with the data files. Used only if `data_files` is not specified,
in which case it's equal to pass `os.path.join(data_dir, "**")` as `data_files`.
data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration.
cache_dir (`str`, *optional*):
Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
<Added version="2.16.0"/>
trust_remote_code (`bool`, defaults to `False`):
Whether or not to allow for datasets defined on the Hub using a dataset script. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
<Added version="2.16.0"/>
<Changed version="2.20.0">
`trust_remote_code` defaults to `False` if not specified.
</Changed>
**download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
the attributes in download_config if supplied.
Returns:
DatasetModule
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
download_config.extract_compressed_file = True
download_config.force_extract = True
download_config.force_download = download_mode == DownloadMode.FORCE_REDOWNLOAD
filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
if not filename.endswith(".py"):
filename = filename + ".py"
combined_path = os.path.join(path, filename)
# We have several ways to get a dataset builder:
#
# - if path is the name of a packaged dataset module
# -> use the packaged module (json, csv, etc.)
#
# - if os.path.join(path, name) is a local python file
# -> use the module from the python file
# - if path is a local directory (but no python file)
# -> use a packaged module (csv, text etc.) based on content of the directory
#
# - if path has one "/" and is dataset repository on the HF hub with a python file
# -> the module from the python file in the dataset repository
# - if path has one "/" and is dataset repository on the HF hub without a python file
# -> use a packaged module (csv, text etc.) based on content of the repository
# Try packaged
if path in _PACKAGED_DATASETS_MODULES:
return PackagedDatasetModuleFactory(
path,
data_dir=data_dir,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
).get_module()
# Try locally
elif path.endswith(filename):
if os.path.isfile(path):
return LocalDatasetModuleFactoryWithScript(
path,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
trust_remote_code=trust_remote_code,
).get_module()
else:
raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(path)}")
elif os.path.isfile(combined_path):
return LocalDatasetModuleFactoryWithScript(
combined_path,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
trust_remote_code=trust_remote_code,
).get_module()
elif os.path.isdir(path):
return LocalDatasetModuleFactoryWithoutScript(
path, data_dir=data_dir, data_files=data_files, download_mode=download_mode
).get_module()
# Try remotely
elif is_relative_path(path) and path.count("/") <= 1:
try:
# Get the Dataset Card + get the revision + check authentication all at in one call
# We fix the commit_hash in case there are new commits in the meantime
api = HfApi(
endpoint=config.HF_ENDPOINT,
token=download_config.token,
library_name="datasets",
library_version=__version__,
user_agent=get_datasets_user_agent(download_config.user_agent),
)
try:
_raise_if_offline_mode_is_enabled()
dataset_readme_path = api.hf_hub_download(
repo_id=path,
filename=config.REPOCARD_FILENAME,
repo_type="dataset",
revision=revision,
proxies=download_config.proxies,
)
commit_hash = os.path.basename(os.path.dirname(dataset_readme_path))
except LocalEntryNotFoundError as e:
if isinstance(
e.__cause__,
(
OfflineModeIsEnabled,
requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
),
):
raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({e.__class__.__name__})") from e
else:
raise
except EntryNotFoundError:
commit_hash = api.dataset_info(
path,
revision=revision,
timeout=100.0,
).sha
except (
OfflineModeIsEnabled,
requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
) as e:
raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({e.__class__.__name__})") from e
except GatedRepoError as e:
message = f"Dataset '{path}' is a gated dataset on the Hub."
if e.response.status_code == 401:
message += " You must be authenticated to access it."
elif e.response.status_code == 403:
message += f" Visit the dataset page at https://huggingface.co/datasets/{path} to ask for access."
raise DatasetNotFoundError(message) from e
except RevisionNotFoundError as e:
raise DatasetNotFoundError(
f"Revision '{revision}' doesn't exist for dataset '{path}' on the Hub."
) from e
except RepositoryNotFoundError as e:
raise DatasetNotFoundError(f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed.") from e
try:
dataset_script_path = api.hf_hub_download(
repo_id=path,
filename=filename,
repo_type="dataset",
revision=commit_hash,
proxies=download_config.proxies,
)
if _require_custom_configs or (revision and revision != "main"):
can_load_config_from_parquet_export = False
elif _require_default_config_name:
with open(dataset_script_path, "r", encoding="utf-8") as f:
can_load_config_from_parquet_export = "DEFAULT_CONFIG_NAME" not in f.read()
else:
can_load_config_from_parquet_export = True
if config.USE_PARQUET_EXPORT and can_load_config_from_parquet_export:
# If the parquet export is ready (parquet files + info available for the current sha), we can use it instead
# This fails when the dataset has multiple configs and a default config and
# the user didn't specify a configuration name (_require_default_config_name=True).
try:
out = HubDatasetModuleFactoryWithParquetExport(
path, download_config=download_config, commit_hash=commit_hash
).get_module()
logger.info("Loading the dataset from the Parquet export on Hugging Face.")
return out
except _dataset_viewer.DatasetViewerError:
pass
# Otherwise we must use the dataset script if the user trusts it
return HubDatasetModuleFactoryWithScript(
path,
commit_hash=commit_hash,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
trust_remote_code=trust_remote_code,
).get_module()
except EntryNotFoundError:
# Use the infos from the parquet export except in some cases:
if data_dir or data_files or (revision and revision != "main"):
use_exported_dataset_infos = False
else:
use_exported_dataset_infos = True
return HubDatasetModuleFactoryWithoutScript(
path,
commit_hash=commit_hash,
data_dir=data_dir,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
use_exported_dataset_infos=use_exported_dataset_infos,
).get_module()
except GatedRepoError as e:
message = f"Dataset '{path}' is a gated dataset on the Hub."
if e.response.status_code == 401:
message += " You must be authenticated to access it."
elif e.response.status_code == 403:
message += f" Visit the dataset page at https://huggingface.co/datasets/{path} to ask for access."
raise DatasetNotFoundError(message) from e
except RevisionNotFoundError as e:
raise DatasetNotFoundError(
f"Revision '{revision}' doesn't exist for dataset '{path}' on the Hub."
) from e
except Exception as e1:
# All the attempts failed, before raising the error we should check if the module is already cached
try:
return CachedDatasetModuleFactory(
path, dynamic_modules_path=dynamic_modules_path, cache_dir=cache_dir
).get_module()
except Exception:
# If it's not in the cache, then it doesn't exist.
if isinstance(e1, OfflineModeIsEnabled):
raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None
if isinstance(e1, (DataFilesNotFoundError, DatasetNotFoundError, EmptyDatasetError)):
raise e1 from None
if isinstance(e1, FileNotFoundError):
if trust_remote_code:
raise FileNotFoundError(
f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory. "
f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}"
) from None
else:
raise FileNotFoundError(
f"Couldn't find any data file at {relative_to_absolute_path(path)}. "
f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}"
) from None
raise e1 from None
elif trust_remote_code:
raise FileNotFoundError(
f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory."
)
else:
raise FileNotFoundError(f"Couldn't find any data file at {relative_to_absolute_path(path)}.")
def load_dataset_builder(
path: str,
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
cache_dir: Optional[str] = None,
features: Optional[Features] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
storage_options: Optional[Dict] = None,
trust_remote_code: Optional[bool] = None,
_require_default_config_name=True,
**config_kwargs,
) -> DatasetBuilder:
"""Load a dataset builder which can be used to:
- Inspect general information that is required to build a dataset (cache directory, config, dataset info, features, data files, etc.)
- Download and prepare the dataset as Arrow files in the cache
- Get a streaming dataset without downloading or caching anything
You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
A dataset is a directory that contains some data files in generic formats (JSON, CSV, Parquet, etc.) and possibly
in a generic structure (Webdataset, ImageFolder, AudioFolder, VideoFolder, etc.)
Args:
path (`str`):
Path or name of the dataset.
- if `path` is a dataset repository on the HF hub (list all available datasets with [`huggingface_hub.list_datasets`])
-> load the dataset builder from supported files in the repository (csv, json, parquet, etc.)
e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing the data files.
- if `path` is a local directory
-> load the dataset builder from supported files in the directory (csv, json, parquet, etc.)
e.g. `'./path/to/directory/with/my/csv/data'`.
- if `path` is the name of a dataset builder and `data_files` or `data_dir` is specified
(available builders are "json", "csv", "parquet", "arrow", "text", "xml", "webdataset", "imagefolder", "audiofolder", "videofolder")
-> load the dataset builder from the files in `data_files` or `data_dir`
e.g. `'parquet'`.
It can also point to a local dataset script but this is not recommended.
name (`str`, *optional*):
Defining the name of the dataset configuration.
data_dir (`str`, *optional*):
Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
cache_dir (`str`, *optional*):
Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
features ([`Features`], *optional*):
Set the features type to use for this dataset.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
revision ([`Version`] or `str`, *optional*):
Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
storage_options (`dict`, *optional*, defaults to `None`):
**Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
<Added version="2.11.0"/>
trust_remote_code (`bool`, defaults to `False`):
Whether or not to allow for datasets defined on the Hub using a dataset script. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
<Added version="2.16.0"/>
<Changed version="2.20.0">
`trust_remote_code` defaults to `False` if not specified.
</Changed>
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the [`BuilderConfig`]
and used in the [`DatasetBuilder`].
Returns:
[`DatasetBuilder`]
Example:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder('cornell-movie-review-data/rotten_tomatoes')
>>> ds_builder.info.features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
if token is not None:
download_config = download_config.copy() if download_config else DownloadConfig()
download_config.token = token
if storage_options is not None:
download_config = download_config.copy() if download_config else DownloadConfig()
download_config.storage_options.update(storage_options)
dataset_module = dataset_module_factory(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
data_dir=data_dir,
data_files=data_files,
cache_dir=cache_dir,
trust_remote_code=trust_remote_code,
_require_default_config_name=_require_default_config_name,
_require_custom_configs=bool(config_kwargs),
)
# Get dataset builder class from the processing script
builder_kwargs = dataset_module.builder_kwargs
data_dir = builder_kwargs.pop("data_dir", data_dir)
data_files = builder_kwargs.pop("data_files", data_files)
config_name = builder_kwargs.pop(
"config_name", name or dataset_module.builder_configs_parameters.default_config_name
)
dataset_name = builder_kwargs.pop("dataset_name", None)
info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None
if (
path in _PACKAGED_DATASETS_MODULES
and data_files is None
and dataset_module.builder_configs_parameters.builder_configs[0].data_files is None
):
error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder."
example_extensions = [
extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path
]
if example_extensions:
error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`'
raise ValueError(error_msg)
builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name)
# Instantiate the dataset builder
builder_instance: DatasetBuilder = builder_cls(
cache_dir=cache_dir,
dataset_name=dataset_name,
config_name=config_name,
data_dir=data_dir,
data_files=data_files,
hash=dataset_module.hash,
info=info,
features=features,
token=token,
storage_options=storage_options,
**builder_kwargs,
**config_kwargs,
)
builder_instance._use_legacy_cache_dir_if_possible(dataset_module)
return builder_instance
def load_dataset(
path: str,
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
split: Optional[Union[str, Split]] = None,
cache_dir: Optional[str] = None,
features: Optional[Features] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
verification_mode: Optional[Union[VerificationMode, str]] = None,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
revision: Optional[Union[str, Version]] = None,
token: Optional[Union[bool, str]] = None,
streaming: bool = False,
num_proc: Optional[int] = None,
storage_options: Optional[Dict] = None,
trust_remote_code: bool = None,
**config_kwargs,
) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
"""Load a dataset from the Hugging Face Hub, or a local dataset.
You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
A dataset is a directory that contains some data files in generic formats (JSON, CSV, Parquet, etc.) and possibly
in a generic structure (Webdataset, ImageFolder, AudioFolder, VideoFolder, etc.)
This function does the following under the hood:
1. Load a dataset builder:
* Find the most common data format in the dataset and pick its associated builder (JSON, CSV, Parquet, Webdataset, ImageFolder, AudioFolder, etc.)
* Find which file goes into which split (e.g. train/test) based on file and directory names or on the YAML configuration
* It is also possible to specify `data_files` manually, and which dataset builder to use (e.g. "parquet").
2. Run the dataset builder:
In the general case:
* Download the data files from the dataset if they are not already available locally or cached.
* Process and cache the dataset in typed Arrow tables for caching.
Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types.
They can be directly accessed from disk, loaded in RAM or even streamed over the web.
In the streaming case:
* Don't download or cache anything. Instead, the dataset is lazily loaded and will be streamed on-the-fly when iterating on it.
3. Return a dataset built from the requested splits in `split` (default: all).
It can also use a custom dataset builder if the dataset contains a dataset script, but this feature is mostly for backward compatibility.
In this case the dataset script file must be named after the dataset repository or directory and end with ".py".
Args:
path (`str`):
Path or name of the dataset.
- if `path` is a dataset repository on the HF hub (list all available datasets with [`huggingface_hub.list_datasets`])
-> load the dataset from supported files in the repository (csv, json, parquet, etc.)
e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing the data files.
- if `path` is a local directory
-> load the dataset from supported files in the directory (csv, json, parquet, etc.)
e.g. `'./path/to/directory/with/my/csv/data'`.
- if `path` is the name of a dataset builder and `data_files` or `data_dir` is specified
(available builders are "json", "csv", "parquet", "arrow", "text", "xml", "webdataset", "imagefolder", "audiofolder", "videofolder")
-> load the dataset from the files in `data_files` or `data_dir`
e.g. `'parquet'`.
It can also point to a local dataset script but this is not recommended.
name (`str`, *optional*):
Defining the name of the dataset configuration.
data_dir (`str`, *optional*):
Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
split (`Split` or `str`):
Which split of the data to load.
If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
If given, will return a single Dataset.
Splits can be combined and specified like in tensorflow-datasets.
cache_dir (`str`, *optional*):
Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
features (`Features`, *optional*):
Set the features type to use for this dataset.
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
<Added version="2.9.1"/>
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the dataset
will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
nonzero. See more details in the [improve performance](../cache#improve-performance) section.
save_infos (`bool`, defaults to `False`):
Save the dataset information (checksums/size/splits/...).
revision ([`Version`] or `str`, *optional*):
Version of the dataset script to load.
As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If `True`, or not specified, will get token from `"~/.huggingface"`.
streaming (`bool`, defaults to `False`):
If set to `True`, don't download the data files. Instead, it streams the data progressively while
iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case.
Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
like rar and xz are not yet supported. The tgz format doesn't allow streaming.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.7.0"/>
storage_options (`dict`, *optional*, defaults to `None`):
**Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
<Added version="2.11.0"/>
trust_remote_code (`bool`, defaults to `False`):
Whether or not to allow for datasets defined on the Hub using a dataset script. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
<Added version="2.16.0"/>
<Changed version="2.20.0">
`trust_remote_code` defaults to `False` if not specified.
</Changed>
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the `BuilderConfig`
and used in the [`DatasetBuilder`].
Returns:
[`Dataset`] or [`DatasetDict`]:
- if `split` is not `None`: the dataset requested,
- if `split` is `None`, a [`~datasets.DatasetDict`] with each split.
or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True`
- if `split` is not `None`, the dataset is requested
- if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split.
Example:
Load a dataset from the Hugging Face Hub:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('cornell-movie-review-data/rotten_tomatoes', split='train')
# Load a subset or dataset configuration (here 'sst2')
>>> from datasets import load_dataset
>>> ds = load_dataset('nyu-mll/glue', 'sst2', split='train')
# Manual mapping of data files to splits
>>> data_files = {'train': 'train.csv', 'test': 'test.csv'}
>>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files)
# Manual selection of a directory to load
>>> ds = load_dataset('namespace/your_dataset_name', data_dir='folder_name')
```
Load a local dataset:
```py
# Load a CSV file
>>> from datasets import load_dataset
>>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv')
# Load a JSON file
>>> from datasets import load_dataset
>>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json')
# Load from a local loading script (not recommended)
>>> from datasets import load_dataset
>>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train')
```
Load an [`~datasets.IterableDataset`]:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('cornell-movie-review-data/rotten_tomatoes', split='train', streaming=True)
```
Load an image dataset with the `ImageFolder` dataset builder:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train')
```
"""
if data_files is not None and not data_files:
raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).")
if Path(path, config.DATASET_STATE_JSON_FILENAME).exists():
raise ValueError(
"You are trying to load a dataset that was saved using `save_to_disk`. "
"Please use `load_from_disk` instead."
)
if streaming and num_proc is not None:
raise NotImplementedError(
"Loading a streaming dataset in parallel with `num_proc` is not implemented. "
"To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead."
)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
verification_mode = VerificationMode(
(verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS
)
# Create a dataset builder
builder_instance = load_dataset_builder(
path=path,
name=name,
data_dir=data_dir,
data_files=data_files,
cache_dir=cache_dir,
features=features,
download_config=download_config,
download_mode=download_mode,
revision=revision,
token=token,
storage_options=storage_options,
trust_remote_code=trust_remote_code,
_require_default_config_name=name is None,
**config_kwargs,
)
# Return iterable dataset in case of streaming
if streaming:
return builder_instance.as_streaming_dataset(split=split)
# Download and prepare data
builder_instance.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
num_proc=num_proc,
storage_options=storage_options,
)
# Build dataset for splits
keep_in_memory = (
keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
)
ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory)
if save_infos:
builder_instance._save_infos()
return ds
def load_from_disk(
dataset_path: PathLike, keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None
) -> Union[Dataset, DatasetDict]:
"""
Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or
from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
Args:
dataset_path (`path-like`):
Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`)
of the [`Dataset`] or [`DatasetDict`] directory where the dataset/dataset-dict will be
loaded from.
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the dataset
will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
nonzero. See more details in the [improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Returns:
[`Dataset`] or [`DatasetDict`]:
- If `dataset_path` is a path of a dataset directory: the dataset requested.
- If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split.
Example:
```py
>>> from datasets import load_from_disk
>>> ds = load_from_disk('path/to/dataset/directory')
```
"""
fs: fsspec.AbstractFileSystem
fs, *_ = url_to_fs(dataset_path, **(storage_options or {}))
if not fs.exists(dataset_path):
raise FileNotFoundError(f"Directory {dataset_path} not found")
if fs.isfile(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile(
posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME)
):
return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
elif fs.isfile(posixpath.join(dataset_path, config.DATASETDICT_JSON_FILENAME)):
return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
else:
raise FileNotFoundError(
f"Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory."
)
| datasets/src/datasets/load.py/0 | {
"file_path": "datasets/src/datasets/load.py",
"repo_id": "datasets",
"token_count": 44301
} |
from typing import List
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Image
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
# Obtained with:
# ```
# import PIL.Image
# IMAGE_EXTENSIONS = []
# PIL.Image.init()
# for ext, format in PIL.Image.EXTENSION.items():
# if format in PIL.Image.OPEN:
# IMAGE_EXTENSIONS.append(ext[1:])
# ```
# We intentionally do not run this code on launch because:
# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
IMAGE_EXTENSIONS = [
".blp",
".bmp",
".dib",
".bufr",
".cur",
".pcx",
".dcx",
".dds",
".ps",
".eps",
".fit",
".fits",
".fli",
".flc",
".ftc",
".ftu",
".gbr",
".gif",
".grib",
# ".h5", # may contain zero or several images
# ".hdf", # may contain zero or several images
".png",
".apng",
".jp2",
".j2k",
".jpc",
".jpf",
".jpx",
".j2c",
".icns",
".ico",
".im",
".iim",
".tif",
".tiff",
".jfif",
".jpe",
".jpg",
".jpeg",
".mpg",
".mpeg",
".msp",
".pcd",
".pxr",
".pbm",
".pgm",
".ppm",
".pnm",
".psd",
".bw",
".rgb",
".rgba",
".sgi",
".ras",
".tga",
".icb",
".vda",
".vst",
".webp",
".wmf",
".emf",
".xbm",
".xpm",
]
ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
| datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py",
"repo_id": "datasets",
"token_count": 893
} |
#
# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.
# This file coems from the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
"""
Binary tensor encodings for PyTorch and NumPy.
This defines efficient binary encodings for tensors. The format is 8 byte
aligned and can be used directly for computations when transmitted, say,
via RDMA. The format is supported by WebDataset with the `.ten` filename
extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used
for fast tensor storage with LMDB and in disk files (which can be memory
mapped)
Data is encoded as a series of chunks:
- magic number (int64)
- length in bytes (int64)
- bytes (multiple of 64 bytes long)
Arrays are a header chunk followed by a data chunk.
Header chunks have the following structure:
- dtype (int64)
- 8 byte array name
- ndim (int64)
- dim[0]
- dim[1]
- ...
"""
import struct
import sys
import numpy as np
def bytelen(a):
"""Determine the length of a in bytes."""
if hasattr(a, "nbytes"):
return a.nbytes
elif isinstance(a, (bytearray, bytes)):
return len(a)
else:
raise ValueError(a, "cannot determine nbytes")
def bytedata(a):
"""Return a the raw data corresponding to a."""
if isinstance(a, (bytearray, bytes, memoryview)):
return a
elif hasattr(a, "data"):
return a.data
else:
raise ValueError(a, "cannot return bytedata")
# tables for converting between long/short NumPy dtypes
long_to_short = """
float16 f2
float32 f4
float64 f8
int8 i1
int16 i2
int32 i4
int64 i8
uint8 u1
uint16 u2
unit32 u4
uint64 u8
""".strip()
long_to_short = [x.split() for x in long_to_short.split("\n")]
long_to_short = {x[0]: x[1] for x in long_to_short}
short_to_long = {v: k for k, v in long_to_short.items()}
def check_acceptable_input_type(data, allow64):
"""Check that the data has an acceptable type for tensor encoding.
:param data: array
:param allow64: allow 64 bit types
"""
for a in data:
if a.dtype.name not in long_to_short:
raise ValueError("unsupported dataypte")
if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]:
raise ValueError("64 bit datatypes not allowed unless explicitly enabled")
def str64(s):
"""Convert a string to an int64."""
s = s + "\0" * (8 - len(s))
s = s.encode("ascii")
return struct.unpack("@q", s)[0]
def unstr64(i):
"""Convert an int64 to a string."""
b = struct.pack("@q", i)
return b.decode("ascii").strip("\0")
def check_infos(data, infos, required_infos=None):
"""Verify the info strings."""
if required_infos is False or required_infos is None:
return data
if required_infos is True:
return data, infos
if not isinstance(required_infos, (tuple, list)):
raise ValueError("required_infos must be tuple or list")
for required, actual in zip(required_infos, infos):
raise ValueError(f"actual info {actual} doesn't match required info {required}")
return data
def encode_header(a, info=""):
"""Encode an array header as a byte array."""
if a.ndim >= 10:
raise ValueError("too many dimensions")
if a.nbytes != np.prod(a.shape) * a.itemsize:
raise ValueError("mismatch between size and shape")
if a.dtype.name not in long_to_short:
raise ValueError("unsupported array type")
header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape)
return bytedata(np.array(header, dtype="i8"))
def decode_header(h):
"""Decode a byte array into an array header."""
h = np.frombuffer(h, dtype="i8")
if unstr64(h[0]) not in short_to_long:
raise ValueError("unsupported array type")
dtype = np.dtype(short_to_long[unstr64(h[0])])
info = unstr64(h[1])
rank = int(h[2])
shape = tuple(h[3 : 3 + rank])
return shape, dtype, info
def encode_list(l, infos=None): # noqa: E741
"""Given a list of arrays, encode them into a list of byte arrays."""
if infos is None:
infos = [""]
else:
if len(l) != len(infos):
raise ValueError(f"length of list {l} must muatch length of infos {infos}")
result = []
for i, a in enumerate(l):
header = encode_header(a, infos[i % len(infos)])
result += [header, bytedata(a)]
return result
def decode_list(l, infos=False): # noqa: E741
"""Given a list of byte arrays, decode them into arrays."""
result = []
infos0 = []
for header, data in zip(l[::2], l[1::2]):
shape, dtype, info = decode_header(header)
a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape)
result += [a]
infos0 += [info]
return check_infos(result, infos0, infos)
magic_str = "~TenBin~"
magic = str64(magic_str)
magic_bytes = unstr64(magic).encode("ascii")
def roundup(n, k=64):
"""Round up to the next multiple of 64."""
return k * ((n + k - 1) // k)
def encode_chunks(l): # noqa: E741
"""Encode a list of chunks into a single byte array, with lengths and magics.."""
size = sum(16 + roundup(b.nbytes) for b in l)
result = bytearray(size)
offset = 0
for b in l:
result[offset : offset + 8] = magic_bytes
offset += 8
result[offset : offset + 8] = struct.pack("@q", b.nbytes)
offset += 8
result[offset : offset + bytelen(b)] = b
offset += roundup(bytelen(b))
return result
def decode_chunks(buf):
"""Decode a byte array into a list of chunks."""
result = []
offset = 0
total = bytelen(buf)
while offset < total:
if magic_bytes != buf[offset : offset + 8]:
raise ValueError("magic bytes mismatch")
offset += 8
nbytes = struct.unpack("@q", buf[offset : offset + 8])[0]
offset += 8
b = buf[offset : offset + nbytes]
offset += roundup(nbytes)
result.append(b)
return result
def encode_buffer(l, infos=None): # noqa: E741
"""Encode a list of arrays into a single byte array."""
if not isinstance(l, list):
raise ValueError("requires list")
return encode_chunks(encode_list(l, infos=infos))
def decode_buffer(buf, infos=False):
"""Decode a byte array into a list of arrays."""
return decode_list(decode_chunks(buf), infos=infos)
def write_chunk(stream, buf):
"""Write a byte chunk to the stream with magics, length, and padding."""
nbytes = bytelen(buf)
stream.write(magic_bytes)
stream.write(struct.pack("@q", nbytes))
stream.write(bytedata(buf))
padding = roundup(nbytes) - nbytes
if padding > 0:
stream.write(b"\0" * padding)
def read_chunk(stream):
"""Read a byte chunk from a stream with magics, length, and padding."""
magic = stream.read(8)
if magic == b"":
return None
if magic != magic_bytes:
raise ValueError("magic number does not match")
nbytes = stream.read(8)
nbytes = struct.unpack("@q", nbytes)[0]
if nbytes < 0:
raise ValueError("negative nbytes")
data = stream.read(nbytes)
padding = roundup(nbytes) - nbytes
if padding > 0:
stream.read(padding)
return data
def write(stream, l, infos=None): # noqa: E741
"""Write a list of arrays to a stream, with magics, length, and padding."""
for chunk in encode_list(l, infos=infos):
write_chunk(stream, chunk)
def read(stream, n=sys.maxsize, infos=False):
"""Read a list of arrays from a stream, with magics, length, and padding."""
chunks = []
for _ in range(n):
header = read_chunk(stream)
if header is None:
break
data = read_chunk(stream)
if data is None:
raise ValueError("premature EOF")
chunks += [header, data]
return decode_list(chunks, infos=infos)
def save(fname, *args, infos=None, nocheck=False):
"""Save a list of arrays to a file, with magics, length, and padding."""
if not nocheck and not fname.endswith(".ten"):
raise ValueError("file name should end in .ten")
with open(fname, "wb") as stream:
write(stream, args, infos=infos)
def load(fname, infos=False, nocheck=False):
"""Read a list of arrays from a file, with magics, length, and padding."""
if not nocheck and not fname.endswith(".ten"):
raise ValueError("file name should end in .ten")
with open(fname, "rb") as stream:
return read(stream, infos=infos)
| datasets/src/datasets/packaged_modules/webdataset/_tenbin.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/webdataset/_tenbin.py",
"repo_id": "datasets",
"token_count": 3409
} |
"""Contains utilities to flag a feature as "experimental" in datasets."""
import warnings
from functools import wraps
from typing import Callable
def experimental(fn: Callable) -> Callable:
"""Decorator to flag a feature as experimental.
An experimental feature trigger a warning when used as it might be subject to breaking changes in the future.
Args:
fn (`Callable`):
The function to flag as experimental.
Returns:
`Callable`: The decorated function.
Example:
```python
>>> from datasets.utils import experimental
>>> @experimental
... def my_function():
... print("Hello world!")
>>> my_function()
UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future.
Hello world!
```
"""
@wraps(fn)
def _inner_fn(*args, **kwargs):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."),
UserWarning,
)
return fn(*args, **kwargs)
return _inner_fn
| datasets/src/datasets/utils/experimental.py/0 | {
"file_path": "datasets/src/datasets/utils/experimental.py",
"repo_id": "datasets",
"token_count": 386
} |
from typing import List
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sharding without ambiguity for users
lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
)
)
max_length = max(lists_lengths.values(), default=0)
return max(1, max_length)
def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]:
"""
Get the range of shard indices per job.
If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard.
The shards indices order is preserved: e.g. all the first shards are given the first job.
Moreover all the jobs are given approximately the same number of shards.
Example:
```python
>>> _distribute_shards(2, max_num_jobs=4)
[range(0, 1), range(1, 2)]
>>> _distribute_shards(10, max_num_jobs=3)
[range(0, 4), range(4, 7), range(7, 10)]
```
"""
shards_indices_per_group = []
for group_idx in range(max_num_jobs):
num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
shard_indices = range(start, start + num_shards_to_add)
shards_indices_per_group.append(shard_indices)
return shards_indices_per_group
def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]:
"""Split the gen_kwargs into `max_num_job` gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)
if num_shards == 1:
return [dict(gen_kwargs)]
else:
shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(value, list)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(shard_indices_per_group))
]
def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], list)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:
"""Return a shuffled copy of the input gen_kwargs"""
# We must shuffle all the lists, and lists of the same size must have the same shuffling.
# This way entangled lists of (shard, shard_metadata) are still in the right order.
# First, let's generate the shuffled indices per list size
list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)}
indices_per_size = {}
for size in list_sizes:
indices_per_size[size] = list(range(size))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
shuffled_kwargs = dict(gen_kwargs)
for key, value in shuffled_kwargs.items():
if isinstance(value, list):
shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
return shuffled_kwargs
| datasets/src/datasets/utils/sharding.py/0 | {
"file_path": "datasets/src/datasets/utils/sharding.py",
"repo_id": "datasets",
"token_count": 1742
} |
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True)
def disable_implicit_token(monkeypatch):
monkeypatch.setattr("huggingface_hub.constants.HF_HUB_DISABLE_IMPLICIT_TOKEN", True)
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
| datasets/tests/conftest.py/0 | {
"file_path": "datasets/tests/conftest.py",
"repo_id": "datasets",
"token_count": 909
} |
from pathlib import Path
import pytest
from datasets import load_dataset
from datasets.packaged_modules.cache.cache import Cache
SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata"
SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata"
SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME = "hf-internal-testing/DatasetWithCapitalLetters"
def test_cache(text_dir: Path, tmp_path: Path):
cache_dir = tmp_path / "test_cache"
ds = load_dataset(str(text_dir), cache_dir=str(cache_dir))
hash = Path(ds["train"].cache_files[0]["filename"]).parts[-2]
cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash=hash)
reloaded = cache.as_dataset()
assert list(ds) == list(reloaded)
assert list(ds["train"]) == list(reloaded["train"])
def test_cache_streaming(text_dir: Path, tmp_path: Path):
cache_dir = tmp_path / "test_cache_streaming"
ds = load_dataset(str(text_dir), cache_dir=str(cache_dir))
hash = Path(ds["train"].cache_files[0]["filename"]).parts[-2]
cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash=hash)
reloaded = cache.as_streaming_dataset()
assert list(ds) == list(reloaded)
assert list(ds["train"]) == list(reloaded["train"])
def test_cache_auto_hash(text_dir: Path, tmp_path: Path):
cache_dir = tmp_path / "test_cache_auto_hash"
ds = load_dataset(str(text_dir), cache_dir=str(cache_dir))
cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto")
reloaded = cache.as_dataset()
assert list(ds) == list(reloaded)
assert list(ds["train"]) == list(reloaded["train"])
def test_cache_auto_hash_with_custom_config(text_dir: Path, tmp_path: Path):
cache_dir = tmp_path / "test_cache_auto_hash_with_custom_config"
ds = load_dataset(str(text_dir), sample_by="paragraph", cache_dir=str(cache_dir))
another_ds = load_dataset(str(text_dir), cache_dir=str(cache_dir))
cache = Cache(
cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto", sample_by="paragraph"
)
another_cache = Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto")
assert cache.config_id.endswith("paragraph")
assert not another_cache.config_id.endswith("paragraph")
reloaded = cache.as_dataset()
another_reloaded = another_cache.as_dataset()
assert list(ds) == list(reloaded)
assert list(ds["train"]) == list(reloaded["train"])
assert list(another_ds) == list(another_reloaded)
assert list(another_ds["train"]) == list(another_reloaded["train"])
def test_cache_missing(text_dir: Path, tmp_path: Path):
cache_dir = tmp_path / "test_cache_missing"
load_dataset(str(text_dir), cache_dir=str(cache_dir))
Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, version="auto", hash="auto").download_and_prepare()
with pytest.raises(ValueError):
Cache(cache_dir=str(cache_dir), dataset_name="missing", version="auto", hash="auto").download_and_prepare()
with pytest.raises(ValueError):
Cache(cache_dir=str(cache_dir), dataset_name=text_dir.name, hash="missing").download_and_prepare()
with pytest.raises(ValueError):
Cache(
cache_dir=str(cache_dir), dataset_name=text_dir.name, config_name="missing", version="auto", hash="auto"
).download_and_prepare()
@pytest.mark.integration
def test_cache_multi_configs(tmp_path: Path):
cache_dir = tmp_path / "test_cache_multi_configs"
repo_id = SAMPLE_DATASET_TWO_CONFIG_IN_METADATA
dataset_name = repo_id.split("/")[-1]
config_name = "v1"
ds = load_dataset(repo_id, config_name, cache_dir=str(cache_dir))
cache = Cache(
cache_dir=str(cache_dir),
dataset_name=dataset_name,
repo_id=repo_id,
config_name=config_name,
version="auto",
hash="auto",
)
reloaded = cache.as_dataset()
assert list(ds) == list(reloaded)
assert len(ds["train"]) == len(reloaded["train"])
with pytest.raises(ValueError) as excinfo:
Cache(
cache_dir=str(cache_dir),
dataset_name=dataset_name,
repo_id=repo_id,
config_name="missing",
version="auto",
hash="auto",
)
assert config_name in str(excinfo.value)
@pytest.mark.integration
def test_cache_single_config(tmp_path: Path):
cache_dir = tmp_path / "test_cache_single_config"
repo_id = SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA
dataset_name = repo_id.split("/")[-1]
config_name = "custom"
ds = load_dataset(repo_id, cache_dir=str(cache_dir))
cache = Cache(cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, version="auto", hash="auto")
reloaded = cache.as_dataset()
assert list(ds) == list(reloaded)
assert len(ds["train"]) == len(reloaded["train"])
cache = Cache(
cache_dir=str(cache_dir),
dataset_name=dataset_name,
config_name=config_name,
repo_id=repo_id,
version="auto",
hash="auto",
)
reloaded = cache.as_dataset()
assert list(ds) == list(reloaded)
assert len(ds["train"]) == len(reloaded["train"])
with pytest.raises(ValueError) as excinfo:
Cache(
cache_dir=str(cache_dir),
dataset_name=dataset_name,
repo_id=repo_id,
config_name="missing",
version="auto",
hash="auto",
)
assert config_name in str(excinfo.value)
@pytest.mark.integration
def test_cache_capital_letters(tmp_path: Path):
cache_dir = tmp_path / "test_cache_capital_letters"
repo_id = SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME
dataset_name = repo_id.split("/")[-1]
ds = load_dataset(repo_id, cache_dir=str(cache_dir))
cache = Cache(cache_dir=str(cache_dir), dataset_name=dataset_name, repo_id=repo_id, version="auto", hash="auto")
reloaded = cache.as_dataset()
assert list(ds) == list(reloaded)
assert len(ds["train"]) == len(reloaded["train"])
cache = Cache(
cache_dir=str(cache_dir),
dataset_name=dataset_name,
repo_id=repo_id,
version="auto",
hash="auto",
)
reloaded = cache.as_dataset()
assert list(ds) == list(reloaded)
assert len(ds["train"]) == len(reloaded["train"])
| datasets/tests/packaged_modules/test_cache.py/0 | {
"file_path": "datasets/tests/packaged_modules/test_cache.py",
"repo_id": "datasets",
"token_count": 2721
} |
import os
import tempfile
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
from datasets import load_from_disk
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.features import ClassLabel, Features, Sequence, Value
from datasets.iterable_dataset import IterableDataset
from datasets.splits import NamedSplit
from .utils import (
assert_arrow_memory_doesnt_increase,
assert_arrow_memory_increases,
require_numpy1_on_windows,
require_polars,
require_tf,
require_torch,
)
class DatasetDictTest(TestCase):
def _create_dummy_dataset(self, multiple_columns=False):
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
dset = Dataset.from_dict(data)
else:
dset = Dataset.from_dict(
{"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]}
)
return dset
def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict:
return DatasetDict(
{
"train": self._create_dummy_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_dataset(multiple_columns=multiple_columns),
}
)
def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset:
def gen():
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
for v1, v2 in zip(data["col_1"], data["col_2"]):
yield {"col_1": v1, "col_2": v2}
else:
for x in range(30):
yield {"filename": "my_name-train" + "_" + f"{x:03d}"}
return IterableDataset.from_generator(gen)
def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict:
return IterableDatasetDict(
{
"train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
}
)
def test_flatten(self):
dset_split = Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
)
dset = DatasetDict({"train": dset_split, "test": dset_split})
dset = dset.flatten()
self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]})
self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(
dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")})
)
del dset
def test_set_format_numpy(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="numpy", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.reset_format()
with dset.formatted_as(type="numpy", columns=["col_1"]):
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], None)
self.assertEqual(dset_split.format["format_kwargs"], {})
self.assertEqual(dset_split.format["columns"], dset_split.column_names)
self.assertEqual(dset_split.format["output_all_columns"], False)
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="numpy", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], np.str_)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
@require_numpy1_on_windows
@require_torch
def test_set_format_torch(self):
import torch
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="torch", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="torch", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="torch")
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
del dset
@require_tf
def test_set_format_tf(self):
import tensorflow as tf
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="tensorflow", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3)
dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="tensorflow", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a")
del dset
def test_set_format_pandas(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="pandas", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 1)
self.assertIsInstance(dset_split[0], pd.DataFrame)
self.assertListEqual(list(dset_split[0].shape), [1, 1])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="pandas", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 2)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
@require_polars
def test_set_format_polars(self):
import polars as pl
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="polars", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 1)
self.assertIsInstance(dset_split[0], pl.DataFrame)
self.assertEqual(dset_split[0].shape, (1, 1))
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="polars", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 2)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
def test_set_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_transform(transform=transform, columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], "custom")
self.assertEqual(len(dset_split[0].keys()), 1)
self.assertEqual(dset_split[0]["col_1"], "3")
self.assertEqual(dset_split[:2]["col_1"], ["3", "2"])
self.assertEqual(dset_split["col_1"][:2], ["3", "2"])
prev_format = dset[list(dset.keys())[0]].format
for dset_split in dset.values():
dset_split.set_format(**dset_split.format)
self.assertEqual(prev_format, dset_split.format)
dset.set_transform(transform=transform, columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].keys()), 2)
self.assertEqual(dset_split[0]["col_2"], "A")
del dset
def test_with_format(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_format("numpy", columns=["col_1"])
dset.set_format("numpy", columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_with_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_transform(transform, columns=["col_1"])
dset.set_transform(transform, columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_cast(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
features = dset["train"].features
features["col_1"] = Value("float64")
dset = dset.cast(features)
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertEqual(dset_split.features["col_1"], Value("float64"))
self.assertIsInstance(dset_split[0]["col_1"], float)
del dset
def test_remove_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.remove_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertListEqual(dset_split._format_columns, ["col_2"])
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
del dset
def test_rename_column(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"])
del dset
def test_select_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=[])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.select_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
self.assertListEqual(dset_split._format_columns, ["col_1"])
def test_map(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys()))
self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"])
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
mapped_dsets_2: DatasetDict = mapped_dsets_1.map(
lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys()))
self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"]))
del dsets, mapped_dsets_1, mapped_dsets_2
def test_iterable_map(self):
dsets = self._create_dummy_iterable_dataset_dict()
fn_kwargs = {"n": 3}
mapped_dsets: IterableDatasetDict = dsets.map(
lambda x, n: {"foo": [n] * len(x["filename"])},
batched=True,
fn_kwargs=fn_kwargs,
)
mapped_example = next(iter(mapped_dsets["train"]))
self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"]))
self.assertLessEqual(mapped_example["foo"], 3)
del dsets, mapped_dsets
def test_filter(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys()))
self.assertEqual(len(filtered_dsets_1["train"]), 10)
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
filtered_dsets_2: DatasetDict = filtered_dsets_1.filter(
lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys()))
self.assertEqual(len(filtered_dsets_2["train"]), 5)
filtered_dsets_3: DatasetDict = dsets.filter(
lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys()))
self.assertEqual(len(filtered_dsets_3["train"]), 10)
del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3
def test_iterable_filter(self):
dsets = self._create_dummy_iterable_dataset_dict()
example = next(iter(dsets["train"]))
fn_kwargs = {"n": 3}
filtered_dsets: IterableDatasetDict = dsets.filter(
lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs
)
filtered_example = next(iter(filtered_dsets["train"]))
self.assertListEqual(list(example.keys()), list(filtered_example.keys()))
self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3
del dsets, filtered_dsets
def test_sort(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
sorted_dsets_1: DatasetDict = dsets.sort("filename")
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]],
sorted(f"{x:03d}" for x in range(30)),
)
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
sorted_dsets_2: DatasetDict = sorted_dsets_1.sort(
"filename", indices_cache_file_names=indices_cache_file_names, reverse=True
)
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]],
sorted((f"{x:03d}" for x in range(30)), reverse=True),
)
del dsets, sorted_dsets_1, sorted_dsets_2
def test_shuffle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
seeds = {
"train": 1234,
"test": 1234,
}
dsets_shuffled = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"])
self.assertEqual(len(dsets_shuffled["train"]), 30)
self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028")
self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010")
self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")}))
self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")}))
# Reproducibility
indices_cache_file_names_2 = {
"train": os.path.join(tmp_dir, "train_2.arrow"),
"test": os.path.join(tmp_dir, "test_2.arrow"),
}
dsets_shuffled_2 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"])
seeds = {
"train": 1234,
"test": 1,
}
indices_cache_file_names_3 = {
"train": os.path.join(tmp_dir, "train_3.arrow"),
"test": os.path.join(tmp_dir, "test_3.arrow"),
}
dsets_shuffled_3 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False
)
self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"])
# other input types
dsets_shuffled_int = dsets.shuffle(42)
dsets_shuffled_alias = dsets.shuffle(seed=42)
dsets_shuffled_none = dsets.shuffle()
self.assertEqual(len(dsets_shuffled_int["train"]), 30)
self.assertEqual(len(dsets_shuffled_alias["train"]), 30)
self.assertEqual(len(dsets_shuffled_none["train"]), 30)
del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3
del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none
def test_flatten_indices(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
dsets_shuffled = dsets.shuffle(
seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertIsNotNone(dsets_shuffled["train"]._indices)
self.assertIsNotNone(dsets_shuffled["test"]._indices)
dsets_flat = dsets_shuffled.flatten_indices()
self.assertIsNone(dsets_flat["train"]._indices)
self.assertIsNone(dsets_flat["test"]._indices)
del dsets, dsets_shuffled, dsets_flat
def test_check_values_type(self):
dsets = self._create_dummy_dataset_dict()
dsets["bad_split"] = None
self.assertRaises(TypeError, dsets.map, lambda x: x)
self.assertRaises(TypeError, dsets.filter, lambda x: True)
self.assertRaises(TypeError, dsets.shuffle)
self.assertRaises(TypeError, dsets.sort, "filename")
del dsets
def test_serialization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
del reloaded_dsets
del dsets["test"]
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
del dsets, reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2})
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 3)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_proc=2)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 2)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
def test_load_from_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
del dsets
dsets = load_from_disk(tmp_dir)
self.assertListEqual(sorted(dsets), ["test", "train"])
self.assertEqual(len(dsets["train"]), 30)
self.assertListEqual(dsets["train"].column_names, ["filename"])
self.assertEqual(len(dsets["test"]), 30)
self.assertListEqual(dsets["test"].column_names, ["filename"])
del dsets
def test_align_labels_with_mapping(self):
train_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
}
)
test_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]),
}
)
train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]}
test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]}
label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1}
id2label = {v: k for k, v in label2id.items()}
train_expected_labels = [2, 2, 1, 1, 0, 0]
test_expected_labels = [2, 2, 0, 0, 1, 1]
train_expected_label_names = [id2label[idx] for idx in train_expected_labels]
test_expected_label_names = [id2label[idx] for idx in test_expected_labels]
dsets = DatasetDict(
{
"train": Dataset.from_dict(train_data, features=train_features),
"test": Dataset.from_dict(test_data, features=test_features),
}
)
dsets = dsets.align_labels_with_mapping(label2id, "input_labels")
self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"])
self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"])
train_aligned_label_names = [
dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"]
]
test_aligned_label_names = [
dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"]
]
self.assertListEqual(train_expected_label_names, train_aligned_label_names)
self.assertListEqual(test_expected_label_names, test_aligned_label_names)
def test_dummy_datasetdict_serialize_fs(mockfs):
dataset_dict = DatasetDict(
{
"train": Dataset.from_dict({"a": range(30)}),
"test": Dataset.from_dict({"a": range(10)}),
}
)
dataset_path = "mock://my_dataset"
dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options)
assert mockfs.isdir(dataset_path)
assert mockfs.glob(dataset_path + "/*")
reloaded = DatasetDict.load_from_disk(dataset_path, storage_options=mockfs.storage_options)
assert list(reloaded) == list(dataset_dict)
for k in dataset_dict:
assert reloaded[k].features == dataset_dict[k].features
assert reloaded[k].to_dict() == dataset_dict[k].to_dict()
def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_csv_split(split, csv_path, tmp_path):
if split:
path = {split: csv_path}
else:
split = "train"
path = {"train": csv_path, "test": csv_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_csv(path, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path):
if split:
path = {split: jsonl_path}
else:
split = "train"
path = {"train": jsonl_path, "test": jsonl_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_json(path, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path):
if split:
path = {split: parquet_path}
else:
split = "train"
path = {"train": parquet_path, "test": parquet_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = DatasetDict.from_text(path, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
| datasets/tests/test_dataset_dict.py/0 | {
"file_path": "datasets/tests/test_dataset_dict.py",
"repo_id": "datasets",
"token_count": 17837
} |
import pickle
from copy import deepcopy
from itertools import chain, cycle, islice
from unittest.mock import patch
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pytest
from datasets import Dataset, load_dataset
from datasets.combine import concatenate_datasets, interleave_datasets
from datasets.distributed import split_dataset_by_node
from datasets.features import (
ClassLabel,
Features,
Image,
Value,
)
from datasets.formatting import Formatter, get_format_type_from_alias
from datasets.info import DatasetInfo
from datasets.iterable_dataset import (
ArrowExamplesIterable,
BufferShuffledExamplesIterable,
CyclingMultiSourcesExamplesIterable,
ExamplesIterable,
FilteredExamplesIterable,
FormattedExamplesIterable,
FormattingConfig,
HorizontallyConcatenatedMultiSourcesExamplesIterable,
IterableDataset,
MappedExamplesIterable,
RandomlyCyclingMultiSourcesExamplesIterable,
RebatchedArrowExamplesIterable,
RepeatExamplesIterable,
SelectColumnsIterable,
ShuffledDataSourcesArrowExamplesIterable,
ShuffledDataSourcesExamplesIterable,
ShufflingConfig,
SkipExamplesIterable,
StepExamplesIterable,
TakeExamplesIterable,
VerticallyConcatenatedMultiSourcesExamplesIterable,
_BaseExamplesIterable,
_batch_to_examples,
_convert_to_arrow,
_examples_to_batch,
)
from .utils import (
assert_arrow_memory_doesnt_increase,
is_rng_equal,
require_dill_gt_0_3_2,
require_jax,
require_not_windows,
require_numpy1_on_windows,
require_polars,
require_pyspark,
require_tf,
require_torch,
require_torchdata_stateful_dataloader,
)
DEFAULT_N_EXAMPLES = 20
DEFAULT_BATCH_SIZE = 4
DEFAULT_FILEPATH = "file.txt"
SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script
def generate_examples_fn(**kwargs):
kwargs = kwargs.copy()
n = kwargs.pop("n", DEFAULT_N_EXAMPLES)
filepaths = kwargs.pop("filepaths", None)
for filepath in filepaths or [DEFAULT_FILEPATH]:
if filepaths is not None:
kwargs["filepath"] = filepath
for i in range(n):
yield f"{filepath}_{i}", {"id": i, **kwargs}
def generate_tables_fn(**kwargs):
kwargs = kwargs.copy()
n = kwargs.pop("n", DEFAULT_N_EXAMPLES)
batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE)
filepaths = kwargs.pop("filepaths", None)
for filepath in filepaths or [DEFAULT_FILEPATH]:
buffer = []
batch_idx = 0
if filepaths is not None:
kwargs["filepath"] = filepath
for i in range(n):
buffer.append({"id": i, **kwargs})
if len(buffer) == batch_size:
yield f"{filepath}_{batch_idx}", pa.Table.from_pylist(buffer)
buffer = []
batch_idx += 1
yield batch_idx, pa.Table.from_pylist(buffer)
@pytest.fixture
def dataset():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
@pytest.fixture
def dataset_with_several_columns():
ex_iterable = ExamplesIterable(
generate_examples_fn,
{"filepath": ["data0.txt", "data1.txt", "data2.txt"], "metadata": {"sources": ["https://foo.bar"]}},
)
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
@pytest.fixture
def arrow_file(tmp_path_factory, dataset: IterableDataset):
filename = str(tmp_path_factory.mktemp("data") / "file.arrow")
Dataset.from_generator(dataset.__iter__).map(cache_file_name=filename)
return filename
def assert_load_state_dict_resumes_iteration(ex_iterable: _BaseExamplesIterable):
ex_iterable._init_state_dict()
state_dicts = [ex_iterable.state_dict()]
examples = []
for _, example in ex_iterable:
state_dicts.append(ex_iterable.state_dict())
examples.append(example)
for i, state_dict in enumerate(state_dicts):
ex_iterable.load_state_dict(state_dict)
examples_after_resuming = [example for _, example in ex_iterable]
assert examples_after_resuming == examples[i:], f"resuming from idx {i} with {state_dict=}"
def assert_load_state_dict_resumes_arrow_iteration(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is not None
ex_iterable._init_state_dict()
state_dicts = [ex_iterable.state_dict()]
examples = []
indices = [0]
for _, pa_table in ex_iterable.iter_arrow():
state_dicts.append(ex_iterable.state_dict())
examples.extend(pa_table.to_pylist())
indices.append(indices[-1] + len(pa_table))
for i, state_dict in zip(indices, state_dicts):
ex_iterable.load_state_dict(state_dict)
examples_after_resuming = [
example for _, pa_table in ex_iterable.iter_arrow() for example in pa_table.to_pylist()
]
assert examples_after_resuming == examples[i:], f"resuming from idx {i} with {state_dict=}"
################################
#
# Utilities tests
#
################################
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_convert_to_arrow(batch_size, drop_last_batch):
examples = [{"foo": i} for i in range(10)]
full_table = pa.Table.from_pylist(examples)
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
_convert_to_arrow(
list(enumerate(examples)),
batch_size=batch_size,
drop_last_batch=drop_last_batch,
)
)
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for _, subtable in subtables)
else:
assert all(len(subtable) == batch_size for _, subtable in subtables[:-1])
assert len(subtables[-1][1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables([subtable for _, subtable in subtables])
assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
################################
#
# _BaseExampleIterable tests
#
################################
def test_examples_iterable():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
expected = list(generate_examples_fn())
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert ex_iterable.iter_arrow is None
assert_load_state_dict_resumes_iteration(ex_iterable)
def test_examples_iterable_with_kwargs():
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"})
expected = list(generate_examples_fn(filepaths=["0.txt", "1.txt"], split="train"))
assert list(ex_iterable) == expected
assert all("split" in ex for _, ex in ex_iterable)
assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"]
assert_load_state_dict_resumes_iteration(ex_iterable)
def test_examples_iterable_shuffle_data_sources():
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]})
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40))
expected = list(generate_examples_fn(filepaths=["1.txt", "0.txt"])) # shuffle the filepaths
assert list(ex_iterable) == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
def test_examples_iterable_shuffle_shards_and_metadata():
def gen(filepaths, all_metadata):
for i, (filepath, metadata) in enumerate(zip(filepaths, all_metadata)):
yield i, {"filepath": filepath, "metadata": metadata}
ex_iterable = ExamplesIterable(
gen,
{
"filepaths": [f"{i}.txt" for i in range(100)],
"all_metadata": [{"id": str(i)} for i in range(100)],
},
)
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(42))
out = list(ex_iterable)
filepaths_ids = [x["filepath"].split(".")[0] for _, x in out]
metadata_ids = [x["metadata"]["id"] for _, x in out]
assert filepaths_ids == metadata_ids, "entangled lists of shards/metadata should be shuffled the same way"
assert_load_state_dict_resumes_iteration(ex_iterable)
def test_arrow_examples_iterable():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {})
expected = sum([pa_table.to_pylist() for _, pa_table in generate_tables_fn()], [])
assert next(iter(ex_iterable))[1] == expected[0]
assert [example for _, example in ex_iterable] == expected
expected = list(generate_tables_fn())
assert list(ex_iterable.iter_arrow()) == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
def test_arrow_examples_iterable_with_kwargs():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"})
expected = sum(
[pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")], []
)
assert [example for _, example in ex_iterable] == expected
assert all("split" in ex for _, ex in ex_iterable)
assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"]
expected = list(generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train"))
assert list(ex_iterable.iter_arrow()) == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
def test_arrow_examples_iterable_shuffle_data_sources():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"]})
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40))
expected = sum(
[pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["1.txt", "0.txt"])], []
) # shuffle the filepaths
assert [example for _, example in ex_iterable] == expected
expected = list(generate_tables_fn(filepaths=["1.txt", "0.txt"]))
assert list(ex_iterable.iter_arrow()) == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize(
"tables",
[
[pa.table({"foo": range(10)})],
[pa.table({"foo": range(5 * i, 5 * (i + 1))}) for i in range(2)],
[pa.table({"foo": range(5 * i, 5 * (i + 1))}) for i in range(7)],
[pa.table({"foo": [i]}) for i in range(10)],
],
)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 7, 9, 10, 11, 13, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_rebatched_arrow_examples_iterable(tables, batch_size, drop_last_batch):
full_table = pa.concat_tables(tables)
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
def gen(tables):
for i, table in enumerate(tables):
yield str(i), table
ex_iterable = ArrowExamplesIterable(gen, {"tables": tables})
ex_iterable = RebatchedArrowExamplesIterable(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch)
subtables = list(ex_iterable.iter_arrow())
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for _, subtable in subtables)
else:
assert all(len(subtable) == batch_size for _, subtable in subtables[:-1])
assert len(subtables[-1][1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables([subtable for _, subtable in subtables])
assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
assert_load_state_dict_resumes_iteration(ex_iterable)
assert_load_state_dict_resumes_arrow_iteration(ex_iterable)
@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456])
def test_buffer_shuffled_examples_iterable(seed):
n, buffer_size = 100, 30
generator = np.random.default_rng(seed)
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = BufferShuffledExamplesIterable(base_ex_iterable, buffer_size=buffer_size, generator=generator)
rng = deepcopy(generator)
expected_indices_used_for_shuffling = list(
islice(BufferShuffledExamplesIterable._iter_random_indices(rng, buffer_size=buffer_size), n - buffer_size)
)
# indices to pick in the shuffle buffer should all be in the right range
assert all(0 <= index_to_pick < buffer_size for index_to_pick in expected_indices_used_for_shuffling)
# it should be random indices
assert expected_indices_used_for_shuffling != list(range(buffer_size))
# The final order of examples is the result of a shuffle buffer.
all_examples = list(generate_examples_fn(n=n))
# We create a buffer and we pick random examples from it.
buffer, rest = all_examples[:buffer_size], all_examples[buffer_size:]
expected = []
for i, index_to_pick in enumerate(expected_indices_used_for_shuffling):
expected.append(buffer[index_to_pick])
# The picked examples are directly replaced by the next examples from the iterable.
buffer[index_to_pick] = rest.pop(0)
# Once we have reached the end of the iterable, we shuffle the buffer and return the remaining examples.
rng.shuffle(buffer)
expected += buffer
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert sorted(ex_iterable) == sorted(all_examples)
def test_cycling_multi_sources_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = CyclingMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = list(chain(*zip(generate_examples_fn(text="foo"), generate_examples_fn(text="bar"))))
# The cycling stops as soon as one iterable is out of examples (here ex_iterable1), so the last sample from ex_iterable2 is unecessary
expected = expected[:-1]
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert all((x["id"], x["text"]) == (i // 2, "bar" if i % 2 else "foo") for i, (_, x) in enumerate(ex_iterable))
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)])
def test_randomly_cycling_multi_sources_examples_iterable(probabilities):
seed = 42
generator = np.random.default_rng(seed)
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
[ex_iterable1, ex_iterable2], generator=generator, probabilities=probabilities
)
# The source used randomly changes at each example. It stops when one of the iterators is empty.
rng = deepcopy(generator)
iterators = (generate_examples_fn(text="foo"), generate_examples_fn(text="bar"))
indices_iterator = cycle(rng.choice(len(iterators), size=1000, p=probabilities))
expected = []
lengths = [len(list(ex_iterable1)), len(list(ex_iterable2))]
for i in indices_iterator:
if lengths[0] == 0 or lengths[1] == 0:
break
for key, example in iterators[i]:
expected.append((key, example))
lengths[i] -= 1
break
else:
break
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)])
@pytest.mark.parametrize("stopping_strategy", ["first_exhausted", "all_exhausted"])
@pytest.mark.parametrize("step", [-1, 0, 5, 20, 30, 300])
def test_randomly_cycling_multi_sources_examples_iterable_state(probabilities, stopping_strategy, step):
seed = 42
generator = np.random.default_rng(seed)
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
[ex_iterable1, ex_iterable2],
generator=generator,
probabilities=probabilities,
stopping_strategy=stopping_strategy,
)
step = min(step, len(list(ex_iterable)) - 1)
ex_iterable._init_state_dict()
state_dict = ex_iterable.state_dict()
examples = []
for i, x in enumerate(ex_iterable):
examples.append(x)
if i == step:
state_dict = ex_iterable.state_dict()
ex_iterable.load_state_dict(state_dict)
assert examples[step + 1 :] == list(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id
(3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0
(3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [{**x, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id
(3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0
(3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_drop_last_batch(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
is_empty = False
if batched is False:
# `drop_last_batch` has no effect here
expected = [{**x, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
if len(examples) < batch_size: # ignore last batch
break
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size]
if all_examples:
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
else:
is_empty = True
if not is_empty:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
else:
with pytest.raises(StopIteration):
next(iter(ex_iterable))
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x, index: {"id+idx": x["id"] + index}, False, None), # add the index to the id
(
25,
lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]},
True,
10,
), # add the index to the id
(5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, None), # same with bs=None
(5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, -1), # same with bs<=0
],
)
def test_mapped_examples_iterable_with_indices(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [{**x, **func(x, idx)} for idx, x in enumerate(all_examples)]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
indices = list(range(batch_offset, batch_offset + len(examples)))
transformed_batch = func(batch, indices)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size, remove_columns",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None, ["extra_column"]), # just add 1 to the id
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10, ["extra_column"]), # same with bs=10
(
50,
lambda x: {"foo": ["bar"] * np.random.default_rng(x["id"][0]).integers(0, 10)},
True,
8,
["extra_column", "id"],
), # make a duplicate of each example
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None, ["extra_column"]), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1, ["extra_column"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_remove_columns(n, func, batched, batch_size, remove_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns]
if batched is False:
expected = [{**{k: v for k, v in x.items() if k not in columns_to_remove}, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = {k: v for k, v in _examples_to_batch(all_examples).items() if k not in columns_to_remove}
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
# issue #7345 and PR #7353
@pytest.mark.parametrize("batched", [False, True])
@pytest.mark.parametrize("batch_size", [None, 2])
@pytest.mark.parametrize("input_columns", [None, ["i"]])
@pytest.mark.parametrize("remove_columns", [None, ["i"]])
@pytest.mark.parametrize("new_output", [False, True])
def test_iterable_dataset_vs_dataset_map(batched, batch_size, input_columns, remove_columns, new_output):
if input_columns is not None and not new_output:
return
ds1 = Dataset.from_list([{"i": i} for i in range(4)])
if batched:
def f1(i):
return {"i": [j + 1 for j in i]}
else:
def f1(i):
return {"i": i + 1}
if input_columns is None:
def f2(x):
return f1(x["i"])
else:
f2 = f1
if new_output:
f = f2
else:
def f(x):
x["i"] = f2(x)["i"]
return x
r = [
list(
ds2.map(
f,
batch_size=batch_size,
batched=batched,
remove_columns=remove_columns,
input_columns=input_columns,
)
)
for ds2 in [ds1, ds1.to_iterable_dataset()]
]
r[1] = [x for x in r[1] if len(x) > 0]
assert len(r[0]) == len(r[1])
assert all(x == y for x, y in zip(*r))
@pytest.mark.parametrize(
"n, func, batched, batch_size, fn_kwargs",
[
(3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, None),
(3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, {"y": 3}),
(25, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, 10, {"y": 3}),
(5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, None, {"y": 3}), # same with bs=None
(5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, -1, {"y": 3}), # same with bs<=0
],
)
def test_mapped_examples_iterable_fn_kwargs(n, func, batched, batch_size, fn_kwargs):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if fn_kwargs is None:
fn_kwargs = {}
if batched is False:
expected = [{**x, **func(x, **fn_kwargs)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch, **fn_kwargs)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: {"id+1": id_ + 1}, False, None, ["id"]), # just add 1 to the id
(25, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, 10, ["id"]), # same with bs=10
(5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, None, ["id"]), # same with bs=None
(5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, -1, ["id"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_input_columns(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [{**x, **func(*[x[col] for col in columns_to_input])} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(*[batch[col] for col in columns_to_input])
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1)
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
assert_load_state_dict_resumes_arrow_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_arrow_format_from_arrow_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"n": n})
base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1)
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
assert_load_state_dict_resumes_arrow_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_drop_last_batch_and_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1)
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
drop_last_batch=True,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
is_empty = False
if batched is False:
# `drop_last_batch` has no effect here
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
if len(examples) < batch_size: # ignore last batch
break
batch = pa.Table.from_pylist(examples)
out = func(batch)
all_transformed_examples.extend(
out.to_pylist()
) # we don't merge with input since they're arrow tables and not dictionaries
all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size]
if all_examples:
expected = all_transformed_examples
else:
is_empty = True
if not is_empty:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
else:
with pytest.raises(StopIteration):
next(iter(ex_iterable))
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(
3,
lambda t, index: t.append_column("id+idx", pc.add(t["id"], index)),
False,
None,
), # add the index to the id
(
25,
lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)),
True,
10,
), # add the index to the id
(5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, None), # same with bs=None
(5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, -1), # same with bs<=0
],
)
def test_mapped_examples_iterable_with_indices_and_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1)
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
with_indices=True,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x]), i).to_pylist()[0] for i, x in enumerate(all_examples)]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch, list(range(batch_offset, batch_offset + len(batch)))).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
assert_load_state_dict_resumes_arrow_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size, remove_columns",
[
(
3,
lambda t: t.append_column("id+1", pc.add(t["id"], 1)),
False,
None,
["extra_column"],
), # just add 1 to the id
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10, ["extra_column"]), # same with bs=10
(
50,
lambda t: pa.table({"foo": ["bar"] * np.random.default_rng(t["id"][0].as_py()).integers(0, 10)}),
True,
8,
["extra_column", "id"],
), # make a duplicate of each example
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None, ["extra_column"]), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1, ["extra_column"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_remove_columns_arrow_format(n, func, batched, batch_size, remove_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"})
base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1)
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
remove_columns=remove_columns,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns]
if batched is False:
expected = [
{**{k: v for k, v in func(pa.Table.from_pylist([x])).to_pylist()[0].items() if k not in columns_to_remove}}
for x in all_examples
]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(
[{k: v for k, v in x.items() if k not in columns_to_remove} for x in func(batch).to_pylist()]
)
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
assert_load_state_dict_resumes_arrow_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size, fn_kwargs",
[
(3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, None),
(3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, {"y": 3}),
(25, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, 10, {"y": 3}),
(5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, None, {"y": 3}), # same with bs=None
(5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, -1, {"y": 3}), # same with bs<=0
],
)
def test_mapped_examples_iterable_fn_kwargs_and_arrow_format(n, func, batched, batch_size, fn_kwargs):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1)
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if fn_kwargs is None:
fn_kwargs = {}
if batched is False:
expected = [func(pa.Table.from_pylist([x]), **fn_kwargs).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch, **fn_kwargs).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
assert_load_state_dict_resumes_arrow_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: pa.table({"id+1": pc.add(id_, 1)}), False, None, ["id"]), # just add 1 to the id
(25, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, 10, ["id"]), # same with bs=10
(5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, None, ["id"]), # same with bs=None
(5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, -1, ["id"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_input_columns_and_arrow_format(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
base_ex_iterable = RebatchedArrowExamplesIterable(base_ex_iterable, batch_size=batch_size if batched else 1)
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
input_columns=input_columns,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [
func(*[pa.Table.from_pylist([x])[col] for col in columns_to_input]).to_pylist()[0] for x in all_examples
]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(*[batch[col] for col in columns_to_input]).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
assert_load_state_dict_resumes_arrow_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: x["id"] % 2 == 0, False, None), # keep even number
(3, lambda x: [x["id"][0] % 2 == 0], True, 1), # same with bs=1
(25, lambda x: [i % 2 == 0 for i in x["id"]], True, 10), # same with bs=10
(5, lambda x: [i % 2 == 0 for i in x["id"]], True, None), # same with bs=None
(5, lambda x: [i % 2 == 0 for i in x["id"]], True, -1), # same with bs<=0
(3, lambda x: False, False, None), # return 0 examples
(3, lambda x: [False] * len(x["id"]), True, 10), # same with bs=10
],
)
def test_filtered_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [x for x in all_examples if func(x)]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
mask = func(batch)
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
if expected:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x, index: index % 2 == 0, False, None), # keep even number
(25, lambda x, indices: [idx % 2 == 0 for idx in indices], True, 10), # same with bs=10
(5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, None), # same with bs=None
(5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, -1), # same with bs<=0
],
)
def test_filtered_examples_iterable_with_indices(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [x for idx, x in enumerate(all_examples) if func(x, idx)]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
indices = list(range(batch_offset, batch_offset + len(examples)))
mask = func(batch, indices)
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: id_ % 2 == 0, False, None, ["id"]), # keep even number
(25, lambda ids_: [i % 2 == 0 for i in ids_], True, 10, ["id"]), # same with bs=10
(3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None
(3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None
],
)
def test_filtered_examples_iterable_input_columns(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [x for x in all_examples if func(*[x[col] for col in columns_to_input])]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
mask = func(*[batch[col] for col in columns_to_input])
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
assert_load_state_dict_resumes_iteration(ex_iterable)
def test_skip_examples_iterable():
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
skip_ex_iterable = SkipExamplesIterable(base_ex_iterable, n=count)
expected = list(generate_examples_fn(n=total))[count:]
assert list(skip_ex_iterable) == expected
assert skip_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is skip_ex_iterable, (
"skip examples makes the shards order fixed"
)
assert_load_state_dict_resumes_iteration(skip_ex_iterable)
def test_take_examples_iterable():
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
take_ex_iterable = TakeExamplesIterable(base_ex_iterable, n=count)
expected = list(generate_examples_fn(n=total))[:count]
assert list(take_ex_iterable) == expected
assert take_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is take_ex_iterable, (
"skip examples makes the shards order fixed"
)
assert_load_state_dict_resumes_iteration(take_ex_iterable)
@pytest.mark.parametrize(
"n, num_times",
[
(3, None),
(3, 3),
(3, 0),
],
)
def test_repeat_examples_iterable(n, num_times):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = RepeatExamplesIterable(base_ex_iterable, num_times=num_times)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if num_times is not None:
expected = all_examples * max(num_times, 0)
assert [x for _, x in ex_iterable] == expected
else:
max_iters = 135
iterator = iter(ex_iterable)
for i in range(max_iters):
assert next(iterator)[1] == all_examples[i % len(all_examples)], f"iteration {i} failed,"
def test_vertically_concatenated_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2]
assert [x for _, x in concatenated_ex_iterable] == expected
assert_load_state_dict_resumes_iteration(concatenated_ex_iterable)
def test_vertically_concatenated_examples_iterable_with_different_columns():
# having different columns is supported
# Though iterable datasets fill the missing data with nulls
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2]
assert [x for _, x in concatenated_ex_iterable] == expected
assert_load_state_dict_resumes_iteration(concatenated_ex_iterable)
def test_vertically_concatenated_examples_iterable_shuffle_data_sources():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
rng = np.random.default_rng(42)
shuffled_ex_iterable = concatenated_ex_iterable.shuffle_data_sources(rng)
# make sure the list of examples iterables is shuffled, and each examples iterable is shuffled
expected = [x for _, x in ex_iterable2.shuffle_data_sources(rng)] + [
x for _, x in ex_iterable1.shuffle_data_sources(rng)
]
assert [x for _, x in shuffled_ex_iterable] == expected
assert_load_state_dict_resumes_iteration(shuffled_ex_iterable)
def test_horizontally_concatenated_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
with pytest.raises(ValueError): # column "id" is duplicated -> raise an error
list(concatenated_ex_iterable)
ex_iterable2 = MappedExamplesIterable(ex_iterable2, lambda x: x, remove_columns=["id"])
concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [{**x, **y} for (_, x), (_, y) in zip(ex_iterable1, ex_iterable2)]
assert [x for _, x in concatenated_ex_iterable] == expected
assert concatenated_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is concatenated_ex_iterable, (
"horizontally concatenated examples makes the shards order fixed"
)
assert_load_state_dict_resumes_iteration(concatenated_ex_iterable)
@pytest.mark.parametrize(
"ex_iterable",
[
ExamplesIterable(generate_examples_fn, {}),
ShuffledDataSourcesExamplesIterable(generate_examples_fn, {}, np.random.default_rng(42)),
SelectColumnsIterable(ExamplesIterable(generate_examples_fn, {}), ["id"]),
StepExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 2, 0),
CyclingMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
VerticallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
HorizontallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
RandomlyCyclingMultiSourcesExamplesIterable(
[ExamplesIterable(generate_examples_fn, {})], np.random.default_rng(42)
),
MappedExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: x),
MappedExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: x),
FilteredExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: True),
FilteredExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: True),
BufferShuffledExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10, np.random.default_rng(42)),
SkipExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10),
TakeExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10),
FormattedExamplesIterable(
ExamplesIterable(generate_examples_fn, {}), None, Features({"id": Value("int32")}), token_per_repo_id={}
),
],
)
def test_no_iter_arrow(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is None
if not isinstance(ex_iterable, BufferShuffledExamplesIterable):
assert_load_state_dict_resumes_iteration(ex_iterable)
@pytest.mark.parametrize(
"ex_iterable",
[
ArrowExamplesIterable(generate_tables_fn, {}),
ShuffledDataSourcesArrowExamplesIterable(generate_tables_fn, {}, np.random.default_rng(42)),
SelectColumnsIterable(ArrowExamplesIterable(generate_tables_fn, {}), ["id"]),
# StepExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 2, 0), # not implemented
# CyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented
VerticallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]),
# HorizontallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented
# RandomlyCyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})], np.random.default_rng(42)), # not implemented
MappedExamplesIterable(
RebatchedArrowExamplesIterable(ExamplesIterable(generate_examples_fn, {}), batch_size=1),
lambda t: t,
formatting=FormattingConfig(format_type="arrow"),
),
MappedExamplesIterable(
RebatchedArrowExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), batch_size=1),
lambda t: t,
formatting=FormattingConfig(format_type="arrow"),
),
FilteredExamplesIterable(
RebatchedArrowExamplesIterable(ExamplesIterable(generate_examples_fn, {}), batch_size=1),
lambda t: True,
formatting=FormattingConfig(format_type="arrow"),
),
FilteredExamplesIterable(
RebatchedArrowExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), batch_size=1),
lambda t: True,
formatting=FormattingConfig(format_type="arrow"),
),
# BufferShuffledExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10, np.random.default_rng(42)), # not implemented
# SkipExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented
# TakeExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented
FormattedExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}), None, Features({"id": Value("int32")}), token_per_repo_id={}
),
],
)
def test_iter_arrow(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is not None
key, pa_table = next(ex_iterable.iter_arrow())
assert isinstance(pa_table, pa.Table)
assert_load_state_dict_resumes_arrow_iteration(ex_iterable)
############################
#
# IterableDataset tests
#
############################
def test_iterable_dataset():
dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {}))
expected = [x for _, x in generate_examples_fn()]
assert next(iter(dataset)) == expected[0]
assert list(dataset) == expected
def test_iterable_dataset_from_generator():
data = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
def gen():
yield from data
dataset = IterableDataset.from_generator(gen)
assert isinstance(dataset, IterableDataset)
assert list(dataset) == data
def test_iterable_dataset_from_generator_with_shards():
def gen(shard_names):
for shard_name in shard_names:
for i in range(10):
yield {"shard_name": shard_name, "i": i}
shard_names = [f"data{shard_idx}.txt" for shard_idx in range(4)]
dataset = IterableDataset.from_generator(gen, gen_kwargs={"shard_names": shard_names})
assert isinstance(dataset, IterableDataset)
assert dataset.num_shards == len(shard_names)
@require_numpy1_on_windows
def test_iterable_dataset_from_file(dataset: IterableDataset, arrow_file: str):
with assert_arrow_memory_doesnt_increase():
dataset_from_file = IterableDataset.from_file(arrow_file)
expected_features = dataset._resolve_features().features
assert dataset_from_file.features.type == expected_features.type
assert dataset_from_file.features == expected_features
assert isinstance(dataset_from_file, IterableDataset)
assert list(dataset_from_file) == list(dataset)
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_streaming():
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [
("0", 0, 0.0),
("1", 1, 1.0),
("2", 2, 2.0),
("3", 3, 3.0),
]
df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float")
dataset = IterableDataset.from_spark(df)
assert isinstance(dataset, IterableDataset)
results = []
for ex in dataset:
results.append(ex)
assert results == [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_streaming_features():
import PIL.Image
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())]
df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>")
features = Features({"idx": Value("int64"), "image": Image()})
dataset = IterableDataset.from_spark(
df,
features=features,
)
assert isinstance(dataset, IterableDataset)
results = []
for ex in dataset:
results.append(ex)
assert len(results) == 1
isinstance(results[0]["image"], PIL.Image.Image)
@require_torch
def test_iterable_dataset_torch_integration():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
import torch.utils.data
assert isinstance(dataset, torch.utils.data.IterableDataset)
assert isinstance(dataset, IterableDataset)
@require_torch
def test_iterable_dataset_torch_picklable():
import pickle
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable, formatting=FormattingConfig(format_type="torch"))
reloaded_dataset = pickle.loads(pickle.dumps(dataset))
import torch.utils.data
assert isinstance(reloaded_dataset, IterableDataset)
assert isinstance(reloaded_dataset, torch.utils.data.IterableDataset)
assert reloaded_dataset._formatting.format_type == "torch"
assert len(list(dataset)) == len(list(reloaded_dataset))
@require_torch
def test_iterable_dataset_with_format_torch():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
from torch.utils.data import DataLoader
dataloader = DataLoader(dataset)
assert len(list(dataloader)) == len(list(ex_iterable))
@require_torch
def test_iterable_dataset_torch_dataloader_parallel():
from torch.utils.data import DataLoader
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
dataloader = DataLoader(dataset, num_workers=2, batch_size=None)
result = list(dataloader)
expected = [example for _, example in ex_iterable]
assert len(result) == len(expected)
assert {str(x) for x in result} == {str(x) for x in expected}
@require_torch
@pytest.mark.filterwarnings("ignore:This DataLoader will create:UserWarning")
@pytest.mark.parametrize("num_shards, num_workers", [(2, 1), (2, 2), (3, 2), (2, 3)])
def test_sharded_iterable_dataset_torch_dataloader_parallel(num_shards, num_workers):
from torch.utils.data import DataLoader
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}.txt" for i in range(num_shards)]})
dataset = IterableDataset(ex_iterable)
dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers)
result = list(dataloader)
expected = [example for _, example in ex_iterable]
assert len(result) == len(expected)
assert {str(x) for x in result} == {str(x) for x in expected}
@require_torch
@pytest.mark.integration
@pytest.mark.parametrize("num_workers", [1, 2])
def test_iterable_dataset_from_hub_torch_dataloader_parallel(num_workers, tmp_path):
from torch.utils.data import DataLoader
dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=str(tmp_path), streaming=True, split="train")
dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers)
result = list(dataloader)
assert len(result) == 2
@pytest.mark.parametrize("batch_size", [4, 5])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_iterable_dataset_iter_batch(batch_size, drop_last_batch):
n = 25
dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {"n": n}))
all_examples = [ex for _, ex in generate_examples_fn(n=n)]
expected = []
for i in range(0, len(all_examples), batch_size):
if len(all_examples[i : i + batch_size]) < batch_size and drop_last_batch:
continue
expected.append(_examples_to_batch(all_examples[i : i + batch_size]))
assert next(iter(dataset.iter(batch_size, drop_last_batch=drop_last_batch))) == expected[0]
assert list(dataset.iter(batch_size, drop_last_batch=drop_last_batch)) == expected
def test_iterable_dataset_info():
info = DatasetInfo(description="desc", citation="@article{}", size_in_bytes=42)
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable, info=info)
assert dataset.info == info
assert dataset.description == info.description
assert dataset.citation == info.citation
assert dataset.size_in_bytes == info.size_in_bytes
def test_iterable_dataset_set_epoch(dataset: IterableDataset):
assert dataset._epoch == 0
dataset.set_epoch(42)
assert dataset._epoch == 42
@pytest.mark.parametrize("seed", [None, 42, 1337])
@pytest.mark.parametrize("epoch", [None, 0, 1, 10])
def test_iterable_dataset_set_epoch_of_shuffled_dataset(dataset: IterableDataset, seed, epoch):
buffer_size = 10
shuffled_dataset = dataset.shuffle(seed, buffer_size=buffer_size)
base_generator = shuffled_dataset._shuffling.generator
if epoch is not None:
shuffled_dataset.set_epoch(epoch)
effective_generator = shuffled_dataset._effective_generator()
assert effective_generator is not None
if epoch is None or epoch == 0:
assert is_rng_equal(base_generator, shuffled_dataset._effective_generator())
else:
assert not is_rng_equal(base_generator, shuffled_dataset._effective_generator())
effective_seed = deepcopy(base_generator).integers(0, 1 << 63) - epoch
assert is_rng_equal(np.random.default_rng(effective_seed), shuffled_dataset._effective_generator())
def test_iterable_dataset_map(
dataset: IterableDataset,
):
func = lambda x: {"id+1": x["id"] + 1} # noqa: E731
mapped_dataset = dataset.map(func)
assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable)
assert mapped_dataset._ex_iterable.function is func
assert mapped_dataset._ex_iterable.batched is False
assert next(iter(mapped_dataset)) == {**next(iter(dataset)), **func(next(iter(generate_examples_fn()))[1])}
def test_iterable_dataset_map_batched(
dataset: IterableDataset,
):
func = lambda x: {"id+1": [i + 1 for i in x["id"]]} # noqa: E731
batch_size = 3
dataset = dataset.map(func, batched=True, batch_size=batch_size)
assert isinstance(dataset._ex_iterable, MappedExamplesIterable)
assert dataset._ex_iterable.function is func
assert dataset._ex_iterable.batch_size == batch_size
assert next(iter(dataset)) == {"id": 0, "id+1": 1}
def test_iterable_dataset_map_complex_features(
dataset: IterableDataset,
):
# https://github.com/huggingface/datasets/issues/3505
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"})
features = Features(
{
"id": Value("int64"),
"label": Value("string"),
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
dataset = dataset.cast_column("label", ClassLabel(names=["negative", "positive"]))
dataset = dataset.map(lambda x: {"id+1": x["id"] + 1, **x})
assert isinstance(dataset._ex_iterable, MappedExamplesIterable)
features["label"] = ClassLabel(names=["negative", "positive"])
assert [{k: v for k, v in ex.items() if k != "id+1"} for ex in dataset] == [
features.encode_example(ex) for _, ex in ex_iterable
]
def test_iterable_dataset_map_with_features(dataset: IterableDataset) -> None:
# https://github.com/huggingface/datasets/issues/3888
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"})
features_before_map = Features(
{
"id": Value("int64"),
"label": Value("string"),
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features_before_map))
assert dataset.info.features is not None
assert dataset.info.features == features_before_map
features_after_map = Features(
{
"id": Value("int64"),
"label": Value("string"),
"target": Value("string"),
}
)
dataset = dataset.map(lambda x: {"target": x["label"]}, features=features_after_map)
assert dataset.info.features is not None
assert dataset.info.features == features_after_map
def test_iterable_dataset_map_with_fn_kwargs(dataset: IterableDataset) -> None:
fn_kwargs = {"y": 1}
mapped_dataset = dataset.map(lambda x, y: {"id+y": x["id"] + y}, fn_kwargs=fn_kwargs)
assert mapped_dataset._ex_iterable.batched is False
assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1}
batch_size = 3
mapped_dataset = dataset.map(
lambda x, y: {"id+y": [i + y for i in x["id"]]}, batched=True, batch_size=batch_size, fn_kwargs=fn_kwargs
)
assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable)
assert mapped_dataset._ex_iterable.batch_size == batch_size
assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1}
def test_iterable_dataset_filter(dataset: IterableDataset) -> None:
fn_kwargs = {"y": 1}
filtered_dataset = dataset.filter(lambda x, y: x["id"] == y, fn_kwargs=fn_kwargs)
assert filtered_dataset._ex_iterable.batched is False
assert next(iter(filtered_dataset)) == {"id": 1}
@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456])
@pytest.mark.parametrize("epoch", [None, 0, 1])
def test_iterable_dataset_shuffle(dataset: IterableDataset, seed, epoch):
buffer_size = 3
dataset = deepcopy(dataset)
dataset._ex_iterable.kwargs["filepaths"] = ["0.txt", "1.txt"]
dataset = dataset.shuffle(seed, buffer_size=buffer_size)
assert isinstance(dataset._shuffling, ShufflingConfig)
assert isinstance(dataset._shuffling.generator, np.random.Generator)
assert is_rng_equal(dataset._shuffling.generator, np.random.default_rng(seed))
# Effective seed is sum of seed and epoch
if epoch is None or epoch == 0:
effective_seed = seed
else:
dataset.set_epoch(epoch)
effective_seed = np.random.default_rng(seed).integers(0, 1 << 63) - epoch
# Shuffling adds a shuffle buffer
expected_first_example_index = next(
iter(BufferShuffledExamplesIterable._iter_random_indices(np.random.default_rng(effective_seed), buffer_size))
)
assert isinstance(dataset._ex_iterable, BufferShuffledExamplesIterable)
# It also shuffles the underlying examples iterable
expected_ex_iterable = ExamplesIterable(
generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]}
).shuffle_data_sources(np.random.default_rng(effective_seed))
assert isinstance(dataset._ex_iterable.ex_iterable, ExamplesIterable)
assert next(iter(dataset)) == list(islice(expected_ex_iterable, expected_first_example_index + 1))[-1][1]
@pytest.mark.parametrize(
"features",
[
None,
Features(
{
"id": Value("int64"),
"label": Value("int64"),
}
),
Features(
{
"id": Value("int64"),
"label": ClassLabel(names=["negative", "positive"]),
}
),
],
)
def test_iterable_dataset_features(features):
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
if features:
expected = [features.encode_example(x) for _, x in ex_iterable]
else:
expected = [x for _, x in ex_iterable]
assert list(dataset) == expected
def test_iterable_dataset_features_cast_to_python():
ex_iterable = ExamplesIterable(
generate_examples_fn, {"timestamp": pd.Timestamp(2020, 1, 1), "array": np.ones(5), "n": 1}
)
features = Features(
{
"id": Value("int64"),
"timestamp": Value("timestamp[us]"),
"array": [Value("int64")],
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
assert list(dataset) == [{"timestamp": pd.Timestamp(2020, 1, 1).to_pydatetime(), "array": [1] * 5, "id": 0}]
@require_torch
@require_tf
@require_jax
@pytest.mark.parametrize(
"format_type", [None, "torch", "python", "tf", "tensorflow", "np", "numpy", "jax", "arrow", "pd", "pandas"]
)
def test_iterable_dataset_with_format(dataset: IterableDataset, format_type):
formatted_dataset = dataset.with_format(format_type)
assert formatted_dataset._formatting.format_type == get_format_type_from_alias(format_type)
@require_torch
def test_iterable_dataset_is_torch_iterable_dataset(dataset: IterableDataset):
from torch.utils.data import DataLoader, _DatasetKind
dataloader = DataLoader(dataset)
assert dataloader._dataset_kind == _DatasetKind.Iterable
out = list(dataloader)
assert len(out) == DEFAULT_N_EXAMPLES
@require_torch
def test_iterable_dataset_persists_epoch_in_torch_workers(dataset: IterableDataset):
from torch.utils.data import DataLoader
dataset = dataset.shuffle(seed=42)
dataloader = DataLoader(dataset, num_workers=1, persistent_workers=True)
epoch0 = list(dataloader)
assert list(dataloader) == epoch0
dataset.set_epoch(1)
assert list(dataloader) != epoch0
# Make sure pickle works even with torch objects in shared memory
dataset_copy: IterableDataset = pickle.loads(pickle.dumps(dataset))
dataloader = DataLoader(dataset_copy, num_workers=1, persistent_workers=True)
epoch1 = list(dataloader)
assert list(dataloader) == epoch1
dataset.set_epoch(2) # this should not affect the copy
assert list(dataloader) == epoch1
dataset_copy.set_epoch(2)
assert list(dataloader) != epoch1
@pytest.mark.parametrize("n", [0, 2, int(1e10)])
def test_iterable_dataset_skip(dataset: IterableDataset, n):
skip_dataset = dataset.skip(n)
assert isinstance(skip_dataset._ex_iterable, SkipExamplesIterable)
assert skip_dataset._ex_iterable.n == n
assert list(skip_dataset) == list(dataset)[n:]
@pytest.mark.parametrize("n", [0, 2, int(1e10)])
def test_iterable_dataset_take(dataset: IterableDataset, n):
take_dataset = dataset.take(n)
assert isinstance(take_dataset._ex_iterable, TakeExamplesIterable)
assert take_dataset._ex_iterable.n == n
assert list(take_dataset) == list(dataset)[:n]
@pytest.mark.parametrize("n", [0, 2])
def test_iterable_dataset_repeat(dataset: IterableDataset, n):
repeat_dataset = dataset.repeat(n)
assert isinstance(repeat_dataset._ex_iterable, RepeatExamplesIterable)
assert repeat_dataset._ex_iterable.num_times == n
assert list(repeat_dataset) == list(dataset) * n
def test_iterable_dataset_shard():
num_examples = 20
num_shards = 5
dataset = Dataset.from_dict({"a": range(num_examples)}).to_iterable_dataset(num_shards=num_shards)
assert sum(dataset.shard(num_shards, i).num_shards for i in range(num_shards)) == dataset.num_shards
assert list(concatenate_datasets([dataset.shard(num_shards, i) for i in range(num_shards)])) == list(dataset)
num_shards = 2
assert sum(dataset.shard(num_shards, i).num_shards for i in range(num_shards)) == dataset.num_shards
assert list(concatenate_datasets([dataset.shard(num_shards, i) for i in range(num_shards)])) == list(dataset)
assert (
sum(dataset.shard(num_shards, i, contiguous=False).num_shards for i in range(num_shards)) == dataset.num_shards
)
assert list(
concatenate_datasets([dataset.shard(num_shards, i, contiguous=False) for i in range(num_shards)])
) != list(dataset)
assert sorted(
concatenate_datasets([dataset.shard(num_shards, i, contiguous=False) for i in range(num_shards)]),
key=lambda x: x["a"],
) == list(dataset)
@pytest.mark.parametrize("method", ["skip", "take"])
@pytest.mark.parametrize("after_shuffle", [False, True])
@pytest.mark.parametrize("count", [2, 5, 11])
def test_iterable_dataset_skip_or_take_after_shuffle(method, after_shuffle, count):
seed = 42
n, num_shards = 3, 10
ex_iterable = ExamplesIterable(
generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(num_shards)]}
)
dataset = IterableDataset(ex_iterable)
shuffled_dataset = dataset
if after_shuffle:
shuffled_dataset = shuffled_dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES)
shuffled_dataset = shuffled_dataset.skip(count) if method == "skip" else shuffled_dataset.take(count)
# skip/take a shuffled dataset should not keep the same examples and shuffle the shards
key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731
assert (len(list(dataset)) - count if method == "skip" else count) == len(list(shuffled_dataset))
assert sorted(list(dataset)[count:] if method == "skip" else list(dataset)[:count], key=key) != sorted(
shuffled_dataset, key=key
)
else:
shuffled_dataset = shuffled_dataset.skip(count) if method == "skip" else shuffled_dataset.take(count)
shuffled_dataset = shuffled_dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES)
# shuffling a skip/take dataset should keep the same examples and don't shuffle the shards
key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731
assert (len(list(dataset)) - count if method == "skip" else count) == len(list(shuffled_dataset))
assert sorted(list(dataset)[count:] if method == "skip" else list(dataset)[:count], key=key) == sorted(
shuffled_dataset, key=key
)
@pytest.mark.parametrize("method", ["skip", "take"])
@pytest.mark.parametrize("after_split_by_node", [False, True])
@pytest.mark.parametrize("count", [2, 5, 11])
def test_iterable_dataset_skip_or_take_after_split_by_node(method, after_split_by_node, count):
n, num_shards = 3, 10
rank, world_size = 1, 2
ex_iterable = ExamplesIterable(
generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(num_shards)]}
)
dataset = IterableDataset(ex_iterable)
distributed_dataset = dataset
true_distributed_dataset = split_dataset_by_node(dataset, rank=rank, world_size=world_size)
if after_split_by_node:
distributed_dataset = split_dataset_by_node(distributed_dataset, rank=rank, world_size=world_size)
distributed_dataset = distributed_dataset.skip(count) if method == "skip" else distributed_dataset.take(count)
assert (
list(true_distributed_dataset)[count:]
if method == "skip"
else list(true_distributed_dataset)[:count] == list(distributed_dataset)
)
else:
distributed_dataset = distributed_dataset.skip(count) if method == "skip" else distributed_dataset.take(count)
distributed_dataset = split_dataset_by_node(distributed_dataset, rank=rank, world_size=world_size)
assert len(
list(true_distributed_dataset)[count // world_size :]
if method == "skip"
else list(true_distributed_dataset)[: count // world_size]
) == len(list(distributed_dataset))
def test_iterable_dataset_add_column(dataset_with_several_columns: IterableDataset):
new_column = list(range(3 * DEFAULT_N_EXAMPLES))
new_dataset = dataset_with_several_columns.add_column("new_column", new_column)
assert list(new_dataset) == [
{**example, "new_column": idx} for idx, example in enumerate(dataset_with_several_columns)
]
new_dataset = new_dataset._resolve_features()
assert "new_column" in new_dataset.column_names
def test_iterable_dataset_rename_column(dataset_with_several_columns: IterableDataset):
new_dataset = dataset_with_several_columns.rename_column("id", "new_id")
assert list(new_dataset) == [
{("new_id" if k == "id" else k): v for k, v in example.items()} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# rename the column if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().rename_column("id", "new_id")
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert "id" not in new_dataset.column_names
assert "new_id" in new_dataset.column_names
def test_iterable_dataset_rename_columns(dataset_with_several_columns: IterableDataset):
column_mapping = {"id": "new_id", "filepath": "filename"}
new_dataset = dataset_with_several_columns.rename_columns(column_mapping)
assert list(new_dataset) == [
{column_mapping.get(k, k): v for k, v in example.items()} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# rename the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().rename_columns(column_mapping)
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c not in new_dataset.column_names for c in ["id", "filepath"])
assert all(c in new_dataset.column_names for c in ["new_id", "filename"])
def test_iterable_dataset_remove_columns(dataset_with_several_columns: IterableDataset):
new_dataset = dataset_with_several_columns.remove_columns("id")
assert list(new_dataset) == [
{k: v for k, v in example.items() if k != "id"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
new_dataset = dataset_with_several_columns.remove_columns(["id", "filepath"])
assert list(new_dataset) == [
{k: v for k, v in example.items() if k != "id" and k != "filepath"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# remove the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().remove_columns(["id", "filepath"])
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c not in new_dataset.features for c in ["id", "filepath"])
assert all(c not in new_dataset.column_names for c in ["id", "filepath"])
def test_iterable_dataset_select_columns(dataset_with_several_columns: IterableDataset):
new_dataset = dataset_with_several_columns.select_columns("id")
assert list(new_dataset) == [
{k: v for k, v in example.items() if k == "id"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
new_dataset = dataset_with_several_columns.select_columns(["id", "filepath"])
assert list(new_dataset) == [
{k: v for k, v in example.items() if k in ("id", "filepath")} for example in dataset_with_several_columns
]
assert new_dataset.features is None
# select the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().select_columns(["id", "filepath"])
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c in new_dataset.features for c in ["id", "filepath"])
assert all(c in new_dataset.column_names for c in ["id", "filepath"])
def test_iterable_dataset_cast_column():
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
casted_dataset = dataset.cast_column("label", Value("bool"))
casted_features = features.copy()
casted_features["label"] = Value("bool")
assert list(casted_dataset) == [casted_features.encode_example(ex) for _, ex in ex_iterable]
def test_iterable_dataset_cast():
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
new_features = Features({"id": Value("int64"), "label": Value("bool")})
casted_dataset = dataset.cast(new_features)
assert list(casted_dataset) == [new_features.encode_example(ex) for _, ex in ex_iterable]
def test_iterable_dataset_resolve_features():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
assert dataset.features is None
assert dataset.column_names is None
dataset = dataset._resolve_features()
assert dataset.features == Features(
{
"id": Value("int64"),
}
)
assert dataset.column_names == ["id"]
def test_iterable_dataset_resolve_features_keep_order():
def gen():
yield from zip(range(3), [{"a": 1}, {"c": 1}, {"b": 1}])
ex_iterable = ExamplesIterable(gen, {})
dataset = IterableDataset(ex_iterable)._resolve_features()
# columns appear in order of appearance in the dataset
assert list(dataset.features) == ["a", "c", "b"]
assert dataset.column_names == ["a", "c", "b"]
def test_iterable_dataset_with_features_fill_with_none():
def gen():
yield from zip(range(2), [{"a": 1}, {"b": 1}])
ex_iterable = ExamplesIterable(gen, {})
info = DatasetInfo(features=Features({"a": Value("int32"), "b": Value("int32")}))
dataset = IterableDataset(ex_iterable, info=info)
assert list(dataset) == [{"a": 1, "b": None}, {"b": 1, "a": None}]
def test_concatenate_datasets():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
dataset2 = IterableDataset(ex_iterable2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + list(dataset2)
def test_concatenate_datasets_resolves_features():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
dataset2 = IterableDataset(ex_iterable2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert concatenated_dataset.features is not None
assert sorted(concatenated_dataset.features) == ["id", "label"]
def test_concatenate_datasets_with_different_columns():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
dataset2 = IterableDataset(ex_iterable2)
# missing column "label" -> it should be replaced with nulls
extended_dataset2_list = [{"label": None, **x} for x in dataset2]
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1])
assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1)
def test_concatenate_datasets_axis_1():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
dataset2 = IterableDataset(ex_iterable2)
with pytest.raises(ValueError): # column "id" is duplicated -> raise an error
concatenate_datasets([dataset1, dataset2], axis=1)
concatenated_dataset = concatenate_datasets([dataset1, dataset2.remove_columns("id")], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, dataset2)]
def test_concatenate_datasets_axis_1_resolves_features():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
dataset2 = IterableDataset(ex_iterable2).remove_columns("id")
concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1)
assert concatenated_dataset.features is not None
assert sorted(concatenated_dataset.features) == ["id", "label1", "label2"]
def test_concatenate_datasets_axis_1_with_different_lengths():
n1 = 10
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10, "n": n1})
dataset1 = IterableDataset(ex_iterable1)
n2 = 5
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5, "n": n2})
dataset2 = IterableDataset(ex_iterable2).remove_columns("id")
# missing rows -> they should be replaced with nulls
extended_dataset2_list = list(dataset2) + [{"label2": None}] * (n1 - n2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, extended_dataset2_list)]
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(extended_dataset2_list, dataset1)]
@pytest.mark.parametrize(
"probas, seed, expected_length, stopping_strategy",
[
(None, None, 3 * (DEFAULT_N_EXAMPLES - 1) + 1, "first_exhausted"),
([1, 0, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"),
([0, 1, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"),
([0.2, 0.5, 0.3], 42, None, "first_exhausted"),
([0.1, 0.1, 0.8], 1337, None, "first_exhausted"),
([0.5, 0.2, 0.3], 101010, None, "first_exhausted"),
(None, None, 3 * DEFAULT_N_EXAMPLES, "all_exhausted"),
([0.2, 0.5, 0.3], 42, None, "all_exhausted"),
([0.1, 0.1, 0.8], 1337, None, "all_exhausted"),
([0.5, 0.2, 0.3], 101010, None, "all_exhausted"),
],
)
def test_interleave_datasets(dataset: IterableDataset, probas, seed, expected_length, stopping_strategy):
d1 = dataset
d2 = dataset.map(lambda x: {"id+1": x["id"] + 1, **x})
d3 = dataset.with_format("python")
datasets = [d1, d2, d3]
merged_dataset = interleave_datasets(
datasets, probabilities=probas, seed=seed, stopping_strategy=stopping_strategy
)
def fill_default(example):
return {"id": None, "id+1": None, **example}
# Check the examples iterable
assert isinstance(
merged_dataset._ex_iterable, (CyclingMultiSourcesExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable)
)
# Check that it is deterministic
if seed is not None:
merged_dataset2 = interleave_datasets(
[d1, d2, d3], probabilities=probas, seed=seed, stopping_strategy=stopping_strategy
)
assert list(merged_dataset) == list(merged_dataset2)
# Check features
assert merged_dataset.features == Features({"id": Value("int64"), "id+1": Value("int64")})
# Check first example
if seed is not None:
rng = np.random.default_rng(seed)
i = next(iter(cycle(rng.choice(len(datasets), size=1000, p=probas))))
assert next(iter(merged_dataset)) == fill_default(next(iter(datasets[i])))
else:
assert any(next(iter(merged_dataset)) == fill_default(next(iter(dataset))) for dataset in datasets)
# Compute length it case it's random
if expected_length is None:
expected_length = 0
counts = np.array([len(list(d)) for d in datasets])
bool_strategy_func = np.all if stopping_strategy == "all_exhausted" else np.any
rng = np.random.default_rng(seed)
for i in cycle(rng.choice(len(datasets), size=1000, p=probas)):
counts[i] -= 1
expected_length += 1
if bool_strategy_func(counts <= 0):
break
# Check length
assert len(list(merged_dataset)) == expected_length
def test_interleave_datasets_with_features(
dataset: IterableDataset,
):
features = Features(
{
"id": Value("int64"),
"label": ClassLabel(names=["negative", "positive"]),
}
)
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0})
dataset_with_features = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
merged_dataset = interleave_datasets([dataset, dataset_with_features])
assert merged_dataset.features == features
def test_interleave_datasets_with_oversampling():
# Test hardcoded results
d1 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [0, 1, 2]])), {}))
d2 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [10, 11, 12, 13]])), {}))
d3 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [20, 21, 22, 23, 24]])), {}))
expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
# Check oversampling strategy without probabilities
assert [x["a"] for x in interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")] == expected_values
# Check oversampling strategy with probabilities
expected_values = [20, 0, 21, 10, 1, 22, 23, 24, 2, 0, 1, 20, 11, 21, 2, 0, 12, 1, 22, 13]
values = [
x["a"]
for x in interleave_datasets(
[d1, d2, d3], probabilities=[0.5, 0.2, 0.3], seed=42, stopping_strategy="all_exhausted"
)
]
assert values == expected_values
@require_torch
def test_with_format_torch(dataset_with_several_columns: IterableDataset):
import torch
dset = dataset_with_several_columns.with_format(type="torch")
example = next(iter(dset))
batch = next(iter(dset.iter(batch_size=3)))
assert len(example) == 3
assert isinstance(example["id"], torch.Tensor)
assert list(example["id"].shape) == []
assert example["id"].item() == 0
assert isinstance(batch["id"], torch.Tensor)
assert isinstance(example["filepath"], list)
assert isinstance(example["filepath"][0], str)
assert example["filepath"][0] == "data0.txt"
assert isinstance(batch["filepath"], list)
assert isinstance(example["metadata"], dict)
assert isinstance(example["metadata"]["sources"], list)
assert isinstance(example["metadata"]["sources"][0], str)
assert isinstance(batch["metadata"], list)
@require_tf
def test_with_format_tf(dataset_with_several_columns: IterableDataset):
import tensorflow as tf
dset = dataset_with_several_columns.with_format(type="tensorflow")
example = next(iter(dset))
batch = next(iter(dset.iter(batch_size=3)))
assert isinstance(example["id"], tf.Tensor)
assert list(example["id"].shape) == []
assert example["id"].numpy().item() == 0
assert isinstance(batch["id"], tf.Tensor)
assert isinstance(example["filepath"], tf.Tensor)
assert example["filepath"][0] == b"data0.txt"
assert isinstance(batch["filepath"], tf.Tensor)
assert isinstance(example["metadata"], dict)
assert isinstance(example["metadata"]["sources"], tf.Tensor)
assert isinstance(batch["metadata"], list)
def test_map_array_are_not_converted_back_to_lists(dataset: IterableDataset):
def func(example):
return {"array": np.array([1, 2, 3])}
dset_test = dataset.map(func)
example = next(iter(dset_test))
# not aligned with Dataset.map because we don't convert back to lists after map()
assert isinstance(example["array"], np.ndarray)
def test_formatted_map(dataset: IterableDataset):
dataset = dataset.with_format("np")
assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray)
dataset = dataset.with_format(None)
assert isinstance(next(dataset.iter(batch_size=3))["id"], list)
def add_one_numpy(example):
assert isinstance(example["id"], np.ndarray)
return {"id": example["id"] + 1}
dataset = dataset.with_format("np")
dataset = dataset.map(add_one_numpy, batched=True)
assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray)
dataset = dataset.with_format(None)
assert isinstance(next(dataset.iter(batch_size=3))["id"], list)
def test_format_from_arrow():
python_arrow_extractor = Formatter.python_arrow_extractor
numpy_arrow_extractor = Formatter.numpy_arrow_extractor
with (
patch.object(Formatter, "python_arrow_extractor") as mock_python_arrow_extractor,
patch.object(Formatter, "numpy_arrow_extractor") as mock_numpy_arrow_extractor,
):
mock_python_arrow_extractor.side_effect = python_arrow_extractor
mock_numpy_arrow_extractor.side_effect = numpy_arrow_extractor
def g():
yield 0, pa.table({"a": range(10)})
ds = IterableDataset(ArrowExamplesIterable(g, {}))
ds = ds.with_format("np")
ds = ds.map(lambda x: x, batched=True)
next(iter(ds))
# we do arrow -> numpy -> python
mock_numpy_arrow_extractor.assert_called()
# we don't do any arrow -> python
mock_python_arrow_extractor.assert_not_called()
def test_format_arrow(dataset: IterableDataset):
ds = dataset.with_format("arrow")
assert isinstance(next(iter(ds)), pa.Table)
assert isinstance(next(iter(ds.iter(batch_size=4))), pa.Table)
assert len(next(iter(ds))) == 1
assert len(next(iter(ds.iter(batch_size=4)))) == 4
ds = ds.map(lambda t: t.append_column("new_col", pa.array([0] * len(t))))
ds = ds.map(lambda t: t.append_column("new_col_batched", pa.array([1] * len(t))), batched=True)
ds = ds.with_format(None)
assert next(iter(ds)) == {**next(iter(dataset)), "new_col": 0, "new_col_batched": 1}
def test_format_pandas(dataset: IterableDataset):
ds = dataset.with_format("pandas")
assert isinstance(next(iter(ds)), pd.DataFrame)
assert isinstance(next(iter(ds.iter(batch_size=4))), pd.DataFrame)
assert len(next(iter(ds))) == 1
assert len(next(iter(ds.iter(batch_size=4)))) == 4
ds = ds.map(lambda df: df.assign(new_col=[0] * len(df)))
ds = ds.map(lambda df: df.assign(new_col_batched=[1] * len(df)), batched=True)
ds = ds.with_format(None)
assert next(iter(ds)) == {**next(iter(dataset)), "new_col": 0, "new_col_batched": 1}
@require_polars
def test_format_polars(dataset: IterableDataset):
import polars as pl
ds = dataset.with_format("polars")
assert isinstance(next(iter(ds)), pl.DataFrame)
assert isinstance(next(iter(ds.iter(batch_size=4))), pl.DataFrame)
assert len(next(iter(ds))) == 1
assert len(next(iter(ds.iter(batch_size=4)))) == 4
ds = ds.map(lambda df: df.with_columns(pl.Series([0] * len(df)).alias("new_col")))
ds = ds.map(lambda df: df.with_columns(pl.Series([1] * len(df)).alias("new_col_batched")), batched=True)
ds = ds.with_format(None)
assert next(iter(ds)) == {**next(iter(dataset)), "new_col": 0, "new_col_batched": 1}
@pytest.mark.parametrize("num_shards1, num_shards2, num_workers", [(2, 1, 1), (2, 2, 2), (1, 3, 1), (4, 3, 3)])
def test_interleave_dataset_with_sharding(num_shards1, num_shards2, num_workers):
from torch.utils.data import DataLoader
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-1.txt" for i in range(num_shards1)]})
dataset1 = IterableDataset(ex_iterable1).with_format("torch")
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-2.txt" for i in range(num_shards2)]})
dataset2 = IterableDataset(ex_iterable2).with_format("torch")
dataset_merged = interleave_datasets([dataset1, dataset2], stopping_strategy="first_exhausted")
assert dataset_merged.num_shards == min(num_shards1, num_shards2)
dataloader = DataLoader(dataset_merged, batch_size=None, num_workers=num_workers)
result = list(dataloader)
expected_length = 2 * min(
len([example for _, example in ex_iterable1]), len([example for _, example in ex_iterable2])
)
# some samples may be missing because the stopping strategy is applied per process
assert expected_length - num_workers <= len(result) <= expected_length
assert len(result) == len({str(x) for x in result})
def filter_func(batch):
return batch["id"] == 4
def map_func(batch):
batch["id"] *= 2
return batch
def test_pickle_after_many_transforms(dataset_with_several_columns):
dataset = dataset_with_several_columns
dataset = dataset.remove_columns(["filepath"])
dataset = dataset.take(5)
dataset = dataset.map(map_func)
dataset = dataset.shuffle()
dataset = dataset.skip(1)
dataset = dataset.filter(filter_func)
dataset = dataset.add_column("additional_col", ["something"])
dataset = dataset.rename_column("metadata", "metadata1")
dataset = dataset.rename_columns({"id": "id1", "metadata1": "metadata2"})
dataset = dataset.select_columns(["id1", "additional_col"])
unpickled_dataset = pickle.loads(pickle.dumps(dataset))
assert list(unpickled_dataset) == list(dataset)
@require_torchdata_stateful_dataloader
def test_resume_dataloader(dataset: IterableDataset):
from torchdata.stateful_dataloader import StatefulDataLoader
dl = StatefulDataLoader(dataset)
remaining = []
for i, x in enumerate(dl):
if i == 2:
state_dict = dl.state_dict()
elif i > 2:
remaining.append(x)
dl = StatefulDataLoader(dataset)
dl.load_state_dict(state_dict)
assert remaining == list(dl)
def test_iterable_dataset_batch():
# Create a simple IterableDataset
data = [{"id": i, "text": f"Text {i}"} for i in range(10)]
ds = IterableDataset.from_generator(lambda: (x for x in data))
# Test with batch_size=3, drop_last_batch=False
batched_ds = ds.batch(batch_size=3, drop_last_batch=False)
batches = list(batched_ds)
assert len(batches) == 4 # 3 full batches and 1 partial batch
for i, batch in enumerate(batches[:3]): # Check full batches
assert len(batch["id"]) == 3
assert len(batch["text"]) == 3
assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2]
assert batch["text"] == [f"Text {3 * i}", f"Text {3 * i + 1}", f"Text {3 * i + 2}"]
# Check last partial batch
assert len(batches[3]["id"]) == 1
assert len(batches[3]["text"]) == 1
assert batches[3]["id"] == [9]
assert batches[3]["text"] == ["Text 9"]
# Test with batch_size=3, drop_last_batch=True
batched_ds = ds.batch(batch_size=3, drop_last_batch=True)
batches = list(batched_ds)
assert len(batches) == 3 # Only full batches
for i, batch in enumerate(batches):
assert len(batch["id"]) == 3
assert len(batch["text"]) == 3
assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2]
assert batch["text"] == [f"Text {3 * i}", f"Text {3 * i + 1}", f"Text {3 * i + 2}"]
# Test with batch_size=4 (doesn't evenly divide dataset size)
batched_ds = ds.batch(batch_size=4, drop_last_batch=False)
batches = list(batched_ds)
assert len(batches) == 3 # 2 full batches and 1 partial batch
for i, batch in enumerate(batches[:2]): # Check full batches
assert len(batch["id"]) == 4
assert len(batch["text"]) == 4
assert batch["id"] == [4 * i, 4 * i + 1, 4 * i + 2, 4 * i + 3]
assert batch["text"] == [f"Text {4 * i}", f"Text {4 * i + 1}", f"Text {4 * i + 2}", f"Text {4 * i + 3}"]
# Check last partial batch
assert len(batches[2]["id"]) == 2
assert len(batches[2]["text"]) == 2
assert batches[2]["id"] == [8, 9]
assert batches[2]["text"] == ["Text 8", "Text 9"]
| datasets/tests/test_iterable_dataset.py/0 | {
"file_path": "datasets/tests/test_iterable_dataset.py",
"repo_id": "datasets",
"token_count": 45174
} |
import glob
import subprocess
import sys
from typing import List
sys.path.append(".")
from benchmark_text_to_image import ALL_T2I_CKPTS # noqa: E402
PATTERN = "benchmark_*.py"
class SubprocessCallException(Exception):
pass
# Taken from `test_examples_utils.py`
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occurred while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
def main():
python_files = glob.glob(PATTERN)
for file in python_files:
print(f"****** Running file: {file} ******")
# Run with canonical settings.
if file != "benchmark_text_to_image.py" and file != "benchmark_ip_adapters.py":
command = f"python {file}"
run_command(command.split())
command += " --run_compile"
run_command(command.split())
# Run variants.
for file in python_files:
# See: https://github.com/pytorch/pytorch/issues/129637
if file == "benchmark_ip_adapters.py":
continue
if file == "benchmark_text_to_image.py":
for ckpt in ALL_T2I_CKPTS:
command = f"python {file} --ckpt {ckpt}"
if "turbo" in ckpt:
command += " --num_inference_steps 1"
run_command(command.split())
command += " --run_compile"
run_command(command.split())
elif file == "benchmark_sd_img.py":
for ckpt in ["stabilityai/stable-diffusion-xl-refiner-1.0", "stabilityai/sdxl-turbo"]:
command = f"python {file} --ckpt {ckpt}"
if ckpt == "stabilityai/sdxl-turbo":
command += " --num_inference_steps 2"
run_command(command.split())
command += " --run_compile"
run_command(command.split())
elif file in ["benchmark_sd_inpainting.py", "benchmark_ip_adapters.py"]:
sdxl_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
command = f"python {file} --ckpt {sdxl_ckpt}"
run_command(command.split())
command += " --run_compile"
run_command(command.split())
elif file in ["benchmark_controlnet.py", "benchmark_t2i_adapter.py"]:
sdxl_ckpt = (
"diffusers/controlnet-canny-sdxl-1.0"
if "controlnet" in file
else "TencentARC/t2i-adapter-canny-sdxl-1.0"
)
command = f"python {file} --ckpt {sdxl_ckpt}"
run_command(command.split())
command += " --run_compile"
run_command(command.split())
if __name__ == "__main__":
main()
| diffusers/benchmarks/run_all.py/0 | {
"file_path": "diffusers/benchmarks/run_all.py",
"repo_id": "diffusers",
"token_count": 1527
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Outpainting
Outpainting extends an image beyond its original boundaries, allowing you to add, replace, or modify visual elements in an image while preserving the original image. Like [inpainting](../using-diffusers/inpaint), you want to fill the white area (in this case, the area outside of the original image) with new visual elements while keeping the original image (represented by a mask of black pixels). There are a couple of ways to outpaint, such as with a [ControlNet](https://hf.co/blog/OzzyGT/outpainting-controlnet) or with [Differential Diffusion](https://hf.co/blog/OzzyGT/outpainting-differential-diffusion).
This guide will show you how to outpaint with an inpainting model, ControlNet, and a ZoeDepth estimator.
Before you begin, make sure you have the [controlnet_aux](https://github.com/huggingface/controlnet_aux) library installed so you can use the ZoeDepth estimator.
```py
!pip install -q controlnet_aux
```
## Image preparation
Start by picking an image to outpaint with and remove the background with a Space like [BRIA-RMBG-1.4](https://hf.co/spaces/briaai/BRIA-RMBG-1.4).
<iframe
src="https://briaai-bria-rmbg-1-4.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
For example, remove the background from this image of a pair of shoes.
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/original-jordan.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/no-background-jordan.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">background removed</figcaption>
</div>
</div>
[Stable Diffusion XL (SDXL)](../using-diffusers/sdxl) models work best with 1024x1024 images, but you can resize the image to any size as long as your hardware has enough memory to support it. The transparent background in the image should also be replaced with a white background. Create a function (like the one below) that scales and pastes the image onto a white background.
```py
import random
import requests
import torch
from controlnet_aux import ZoeDetector
from PIL import Image, ImageOps
from diffusers import (
AutoencoderKL,
ControlNetModel,
StableDiffusionXLControlNetPipeline,
StableDiffusionXLInpaintPipeline,
)
def scale_and_paste(original_image):
aspect_ratio = original_image.width / original_image.height
if original_image.width > original_image.height:
new_width = 1024
new_height = round(new_width / aspect_ratio)
else:
new_height = 1024
new_width = round(new_height * aspect_ratio)
resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)
white_background = Image.new("RGBA", (1024, 1024), "white")
x = (1024 - new_width) // 2
y = (1024 - new_height) // 2
white_background.paste(resized_original, (x, y), resized_original)
return resized_original, white_background
original_image = Image.open(
requests.get(
"https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/no-background-jordan.png",
stream=True,
).raw
).convert("RGBA")
resized_img, white_bg_image = scale_and_paste(original_image)
```
To avoid adding unwanted extra details, use the ZoeDepth estimator to provide additional guidance during generation and to ensure the shoes remain consistent with the original image.
```py
zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
image_zoe = zoe(white_bg_image, detect_resolution=512, image_resolution=1024)
image_zoe
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/zoedepth-jordan.png"/>
</div>
## Outpaint
Once your image is ready, you can generate content in the white area around the shoes with [controlnet-inpaint-dreamer-sdxl](https://hf.co/destitech/controlnet-inpaint-dreamer-sdxl), a SDXL ControlNet trained for inpainting.
Load the inpainting ControlNet, ZoeDepth model, VAE and pass them to the [`StableDiffusionXLControlNetPipeline`]. Then you can create an optional `generate_image` function (for convenience) to outpaint an initial image.
```py
controlnets = [
ControlNetModel.from_pretrained(
"destitech/controlnet-inpaint-dreamer-sdxl", torch_dtype=torch.float16, variant="fp16"
),
ControlNetModel.from_pretrained(
"diffusers/controlnet-zoe-depth-sdxl-1.0", torch_dtype=torch.float16
),
]
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to("cuda")
pipeline = StableDiffusionXLControlNetPipeline.from_pretrained(
"SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", controlnet=controlnets, vae=vae
).to("cuda")
def generate_image(prompt, negative_prompt, inpaint_image, zoe_image, seed: int = None):
if seed is None:
seed = random.randint(0, 2**32 - 1)
generator = torch.Generator(device="cpu").manual_seed(seed)
image = pipeline(
prompt,
negative_prompt=negative_prompt,
image=[inpaint_image, zoe_image],
guidance_scale=6.5,
num_inference_steps=25,
generator=generator,
controlnet_conditioning_scale=[0.5, 0.8],
control_guidance_end=[0.9, 0.6],
).images[0]
return image
prompt = "nike air jordans on a basketball court"
negative_prompt = ""
temp_image = generate_image(prompt, negative_prompt, white_bg_image, image_zoe, 908097)
```
Paste the original image over the initial outpainted image. You'll improve the outpainted background in a later step.
```py
x = (1024 - resized_img.width) // 2
y = (1024 - resized_img.height) // 2
temp_image.paste(resized_img, (x, y), resized_img)
temp_image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/initial-outpaint.png"/>
</div>
> [!TIP]
> Now is a good time to free up some memory if you're running low!
>
> ```py
> pipeline=None
> torch.cuda.empty_cache()
> ```
Now that you have an initial outpainted image, load the [`StableDiffusionXLInpaintPipeline`] with the [RealVisXL](https://hf.co/SG161222/RealVisXL_V4.0) model to generate the final outpainted image with better quality.
```py
pipeline = StableDiffusionXLInpaintPipeline.from_pretrained(
"OzzyGT/RealVisXL_V4.0_inpainting",
torch_dtype=torch.float16,
variant="fp16",
vae=vae,
).to("cuda")
```
Prepare a mask for the final outpainted image. To create a more natural transition between the original image and the outpainted background, blur the mask to help it blend better.
```py
mask = Image.new("L", temp_image.size)
mask.paste(resized_img.split()[3], (x, y))
mask = ImageOps.invert(mask)
final_mask = mask.point(lambda p: p > 128 and 255)
mask_blurred = pipeline.mask_processor.blur(final_mask, blur_factor=20)
mask_blurred
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/blurred-mask.png"/>
</div>
Create a better prompt and pass it to the `generate_outpaint` function to generate the final outpainted image. Again, paste the original image over the final outpainted background.
```py
def generate_outpaint(prompt, negative_prompt, image, mask, seed: int = None):
if seed is None:
seed = random.randint(0, 2**32 - 1)
generator = torch.Generator(device="cpu").manual_seed(seed)
image = pipeline(
prompt,
negative_prompt=negative_prompt,
image=image,
mask_image=mask,
guidance_scale=10.0,
strength=0.8,
num_inference_steps=30,
generator=generator,
).images[0]
return image
prompt = "high quality photo of nike air jordans on a basketball court, highly detailed"
negative_prompt = ""
final_image = generate_outpaint(prompt, negative_prompt, temp_image, mask_blurred, 7688778)
x = (1024 - resized_img.width) // 2
y = (1024 - resized_img.height) // 2
final_image.paste(resized_img, (x, y), resized_img)
final_image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/final-outpaint.png"/>
</div>
| diffusers/docs/source/en/advanced_inference/outpaint.md/0 | {
"file_path": "diffusers/docs/source/en/advanced_inference/outpaint.md",
"repo_id": "diffusers",
"token_count": 3145
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# aMUSEd
aMUSEd was introduced in [aMUSEd: An Open MUSE Reproduction](https://huggingface.co/papers/2401.01808) by Suraj Patil, William Berman, Robin Rombach, and Patrick von Platen.
Amused is a lightweight text to image model based off of the [MUSE](https://arxiv.org/abs/2301.00704) architecture. Amused is particularly useful in applications that require a lightweight and fast model such as generating many images quickly at once.
Amused is a vqvae token based transformer that can generate an image in fewer forward passes than many diffusion models. In contrast with muse, it uses the smaller text encoder CLIP-L/14 instead of t5-xxl. Due to its small parameter count and few forward pass generation process, amused can generate many images quickly. This benefit is seen particularly at larger batch sizes.
The abstract from the paper is:
*We present aMUSEd, an open-source, lightweight masked image model (MIM) for text-to-image generation based on MUSE. With 10 percent of MUSE's parameters, aMUSEd is focused on fast image generation. We believe MIM is under-explored compared to latent diffusion, the prevailing approach for text-to-image generation. Compared to latent diffusion, MIM requires fewer inference steps and is more interpretable. Additionally, MIM can be fine-tuned to learn additional styles with only a single image. We hope to encourage further exploration of MIM by demonstrating its effectiveness on large-scale text-to-image generation and releasing reproducible training code. We also release checkpoints for two models which directly produce images at 256x256 and 512x512 resolutions.*
| Model | Params |
|-------|--------|
| [amused-256](https://huggingface.co/amused/amused-256) | 603M |
| [amused-512](https://huggingface.co/amused/amused-512) | 608M |
## AmusedPipeline
[[autodoc]] AmusedPipeline
- __call__
- all
- enable_xformers_memory_efficient_attention
- disable_xformers_memory_efficient_attention
[[autodoc]] AmusedImg2ImgPipeline
- __call__
- all
- enable_xformers_memory_efficient_attention
- disable_xformers_memory_efficient_attention
[[autodoc]] AmusedInpaintPipeline
- __call__
- all
- enable_xformers_memory_efficient_attention
- disable_xformers_memory_efficient_attention | diffusers/docs/source/en/api/pipelines/amused.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/amused.md",
"repo_id": "diffusers",
"token_count": 770
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Kandinsky 3
Kandinsky 3 is created by [Vladimir Arkhipkin](https://github.com/oriBetelgeuse),[Anastasia Maltseva](https://github.com/NastyaMittseva),[Igor Pavlov](https://github.com/boomb0om),[Andrei Filatov](https://github.com/anvilarth),[Arseniy Shakhmatov](https://github.com/cene555),[Andrey Kuznetsov](https://github.com/kuznetsoffandrey),[Denis Dimitrov](https://github.com/denndimitrov), [Zein Shaheen](https://github.com/zeinsh)
The description from it's GitHub page:
*Kandinsky 3.0 is an open-source text-to-image diffusion model built upon the Kandinsky2-x model family. In comparison to its predecessors, enhancements have been made to the text understanding and visual quality of the model, achieved by increasing the size of the text encoder and Diffusion U-Net models, respectively.*
Its architecture includes 3 main components:
1. [FLAN-UL2](https://huggingface.co/google/flan-ul2), which is an encoder decoder model based on the T5 architecture.
2. New U-Net architecture featuring BigGAN-deep blocks doubles depth while maintaining the same number of parameters.
3. Sber-MoVQGAN is a decoder proven to have superior results in image restoration.
The original codebase can be found at [ai-forever/Kandinsky-3](https://github.com/ai-forever/Kandinsky-3).
<Tip>
Check out the [Kandinsky Community](https://huggingface.co/kandinsky-community) organization on the Hub for the official model checkpoints for tasks like text-to-image, image-to-image, and inpainting.
</Tip>
<Tip>
Make sure to check out the schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines.
</Tip>
## Kandinsky3Pipeline
[[autodoc]] Kandinsky3Pipeline
- all
- __call__
## Kandinsky3Img2ImgPipeline
[[autodoc]] Kandinsky3Img2ImgPipeline
- all
- __call__
| diffusers/docs/source/en/api/pipelines/kandinsky3.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/kandinsky3.md",
"repo_id": "diffusers",
"token_count": 760
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# K-Diffusion
[k-diffusion](https://github.com/crowsonkb/k-diffusion) is a popular library created by [Katherine Crowson](https://github.com/crowsonkb/). We provide `StableDiffusionKDiffusionPipeline` and `StableDiffusionXLKDiffusionPipeline` that allow you to run Stable DIffusion with samplers from k-diffusion.
Note that most the samplers from k-diffusion are implemented in Diffusers and we recommend using existing schedulers. You can find a mapping between k-diffusion samplers and schedulers in Diffusers [here](https://huggingface.co/docs/diffusers/api/schedulers/overview)
## StableDiffusionKDiffusionPipeline
[[autodoc]] StableDiffusionKDiffusionPipeline
## StableDiffusionXLKDiffusionPipeline
[[autodoc]] StableDiffusionXLKDiffusionPipeline | diffusers/docs/source/en/api/pipelines/stable_diffusion/k_diffusion.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/k_diffusion.md",
"repo_id": "diffusers",
"token_count": 378
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# EulerDiscreteScheduler
The Euler scheduler (Algorithm 2) is from the [Elucidating the Design Space of Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364) paper by Karras et al. This is a fast scheduler which can often generate good outputs in 20-30 steps. The scheduler is based on the original [k-diffusion](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L51) implementation by [Katherine Crowson](https://github.com/crowsonkb/).
## EulerDiscreteScheduler
[[autodoc]] EulerDiscreteScheduler
## EulerDiscreteSchedulerOutput
[[autodoc]] schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput
| diffusers/docs/source/en/api/schedulers/euler.md/0 | {
"file_path": "diffusers/docs/source/en/api/schedulers/euler.md",
"repo_id": "diffusers",
"token_count": 375
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# DeepCache
[DeepCache](https://huggingface.co/papers/2312.00858) accelerates [`StableDiffusionPipeline`] and [`StableDiffusionXLPipeline`] by strategically caching and reusing high-level features while efficiently updating low-level features by taking advantage of the U-Net architecture.
Start by installing [DeepCache](https://github.com/horseee/DeepCache):
```bash
pip install DeepCache
```
Then load and enable the [`DeepCacheSDHelper`](https://github.com/horseee/DeepCache#usage):
```diff
import torch
from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained('stable-diffusion-v1-5/stable-diffusion-v1-5', torch_dtype=torch.float16).to("cuda")
+ from DeepCache import DeepCacheSDHelper
+ helper = DeepCacheSDHelper(pipe=pipe)
+ helper.set_params(
+ cache_interval=3,
+ cache_branch_id=0,
+ )
+ helper.enable()
image = pipe("a photo of an astronaut on a moon").images[0]
```
The `set_params` method accepts two arguments: `cache_interval` and `cache_branch_id`. `cache_interval` means the frequency of feature caching, specified as the number of steps between each cache operation. `cache_branch_id` identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes.
Opting for a lower `cache_branch_id` or a larger `cache_interval` can lead to faster inference speed at the expense of reduced image quality (ablation experiments of these two hyperparameters can be found in the [paper](https://arxiv.org/abs/2312.00858)). Once those arguments are set, use the `enable` or `disable` methods to activate or deactivate the `DeepCacheSDHelper`.
<div class="flex justify-center">
<img src="https://github.com/horseee/Diffusion_DeepCache/raw/master/static/images/example.png">
</div>
You can find more generated samples (original pipeline vs DeepCache) and the corresponding inference latency in the [WandB report](https://wandb.ai/horseee/DeepCache/runs/jwlsqqgt?workspace=user-horseee). The prompts are randomly selected from the [MS-COCO 2017](https://cocodataset.org/#home) dataset.
## Benchmark
We tested how much faster DeepCache accelerates [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) with 50 inference steps on an NVIDIA RTX A5000, using different configurations for resolution, batch size, cache interval (I), and cache branch (B).
| **Resolution** | **Batch size** | **Original** | **DeepCache(I=3, B=0)** | **DeepCache(I=5, B=0)** | **DeepCache(I=5, B=1)** |
|----------------|----------------|--------------|-------------------------|-------------------------|-------------------------|
| 512| 8| 15.96| 6.88(2.32x)| 5.03(3.18x)| 7.27(2.20x)|
| | 4| 8.39| 3.60(2.33x)| 2.62(3.21x)| 3.75(2.24x)|
| | 1| 2.61| 1.12(2.33x)| 0.81(3.24x)| 1.11(2.35x)|
| 768| 8| 43.58| 18.99(2.29x)| 13.96(3.12x)| 21.27(2.05x)|
| | 4| 22.24| 9.67(2.30x)| 7.10(3.13x)| 10.74(2.07x)|
| | 1| 6.33| 2.72(2.33x)| 1.97(3.21x)| 2.98(2.12x)|
| 1024| 8| 101.95| 45.57(2.24x)| 33.72(3.02x)| 53.00(1.92x)|
| | 4| 49.25| 21.86(2.25x)| 16.19(3.04x)| 25.78(1.91x)|
| | 1| 13.83| 6.07(2.28x)| 4.43(3.12x)| 7.15(1.93x)|
| diffusers/docs/source/en/optimization/deepcache.md/0 | {
"file_path": "diffusers/docs/source/en/optimization/deepcache.md",
"repo_id": "diffusers",
"token_count": 1917
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Quantization
Quantization techniques focus on representing data with less information while also trying to not lose too much accuracy. This often means converting a data type to represent the same information with fewer bits. For example, if your model weights are stored as 32-bit floating points and they're quantized to 16-bit floating points, this halves the model size which makes it easier to store and reduces memory-usage. Lower precision can also speedup inference because it takes less time to perform calculations with fewer bits.
<Tip>
Interested in adding a new quantization method to Diffusers? Refer to the [Contribute new quantization method guide](https://huggingface.co/docs/transformers/main/en/quantization/contribute) to learn more about adding a new quantization method.
</Tip>
<Tip>
If you are new to the quantization field, we recommend you to check out these beginner-friendly courses about quantization in collaboration with DeepLearning.AI:
* [Quantization Fundamentals with Hugging Face](https://www.deeplearning.ai/short-courses/quantization-fundamentals-with-hugging-face/)
* [Quantization in Depth](https://www.deeplearning.ai/short-courses/quantization-in-depth/)
</Tip>
## When to use what?
Diffusers currently supports the following quantization methods.
- [BitsandBytes](./bitsandbytes)
- [TorchAO](./torchao)
- [GGUF](./gguf)
[This resource](https://huggingface.co/docs/transformers/main/en/quantization/overview#when-to-use-what) provides a good overview of the pros and cons of different quantization techniques.
| diffusers/docs/source/en/quantization/overview.md/0 | {
"file_path": "diffusers/docs/source/en/quantization/overview.md",
"repo_id": "diffusers",
"token_count": 544
} |
<!--Copyright 2024 Marigold authors and The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Marigold Pipelines for Computer Vision Tasks
[Marigold](../api/pipelines/marigold) is a novel diffusion-based dense prediction approach, and a set of pipelines for various computer vision tasks, such as monocular depth estimation.
This guide will show you how to use Marigold to obtain fast and high-quality predictions for images and videos.
Each pipeline supports one Computer Vision task, which takes an input RGB image as input and produces a *prediction* of the modality of interest, such as a depth map of the input image.
Currently, the following tasks are implemented:
| Pipeline | Predicted Modalities | Demos |
|---------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------:|
| [MarigoldDepthPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py) | [Depth](https://en.wikipedia.org/wiki/Depth_map), [Disparity](https://en.wikipedia.org/wiki/Binocular_disparity) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-lcm), [Slow Original Demo (DDIM)](https://huggingface.co/spaces/prs-eth/marigold) |
| [MarigoldNormalsPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py) | [Surface normals](https://en.wikipedia.org/wiki/Normal_mapping) | [Fast Demo (LCM)](https://huggingface.co/spaces/prs-eth/marigold-normals-lcm) |
The original checkpoints can be found under the [PRS-ETH](https://huggingface.co/prs-eth/) Hugging Face organization.
These checkpoints are meant to work with diffusers pipelines and the [original codebase](https://github.com/prs-eth/marigold).
The original code can also be used to train new checkpoints.
| Checkpoint | Modality | Comment |
|-----------------------------------------------------------------------------------------------|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [prs-eth/marigold-v1-0](https://huggingface.co/prs-eth/marigold-v1-0) | Depth | The first Marigold Depth checkpoint, which predicts *affine-invariant depth* maps. The performance of this checkpoint in benchmarks was studied in the original [paper](https://huggingface.co/papers/2312.02145). Designed to be used with the `DDIMScheduler` at inference, it requires at least 10 steps to get reliable predictions. Affine-invariant depth prediction has a range of values in each pixel between 0 (near plane) and 1 (far plane); both planes are chosen by the model as part of the inference process. See the `MarigoldImageProcessor` reference for visualization utilities. |
| [prs-eth/marigold-depth-lcm-v1-0](https://huggingface.co/prs-eth/marigold-depth-lcm-v1-0) | Depth | The fast Marigold Depth checkpoint, fine-tuned from `prs-eth/marigold-v1-0`. Designed to be used with the `LCMScheduler` at inference, it requires as little as 1 step to get reliable predictions. The prediction reliability saturates at 4 steps and declines after that. |
| [prs-eth/marigold-normals-v0-1](https://huggingface.co/prs-eth/marigold-normals-v0-1) | Normals | A preview checkpoint for the Marigold Normals pipeline. Designed to be used with the `DDIMScheduler` at inference, it requires at least 10 steps to get reliable predictions. The surface normals predictions are unit-length 3D vectors with values in the range from -1 to 1. *This checkpoint will be phased out after the release of `v1-0` version.* |
| [prs-eth/marigold-normals-lcm-v0-1](https://huggingface.co/prs-eth/marigold-normals-lcm-v0-1) | Normals | The fast Marigold Normals checkpoint, fine-tuned from `prs-eth/marigold-normals-v0-1`. Designed to be used with the `LCMScheduler` at inference, it requires as little as 1 step to get reliable predictions. The prediction reliability saturates at 4 steps and declines after that. *This checkpoint will be phased out after the release of `v1-0` version.* |
The examples below are mostly given for depth prediction, but they can be universally applied with other supported modalities.
We showcase the predictions using the same input image of Albert Einstein generated by Midjourney.
This makes it easier to compare visualizations of the predictions across various modalities and checkpoints.
<div class="flex gap-4" style="justify-content: center; width: 100%;">
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://marigoldmonodepth.github.io/images/einstein.jpg"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Example input image for all Marigold pipelines
</figcaption>
</div>
</div>
### Depth Prediction Quick Start
To get the first depth prediction, load `prs-eth/marigold-depth-lcm-v1-0` checkpoint into `MarigoldDepthPipeline` pipeline, put the image through the pipeline, and save the predictions:
```python
import diffusers
import torch
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
).to("cuda")
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
depth = pipe(image)
vis = pipe.image_processor.visualize_depth(depth.prediction)
vis[0].save("einstein_depth.png")
depth_16bit = pipe.image_processor.export_depth_to_16bit_png(depth.prediction)
depth_16bit[0].save("einstein_depth_16bit.png")
```
The visualization function for depth [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_depth`] applies one of [matplotlib's colormaps](https://matplotlib.org/stable/users/explain/colors/colormaps.html) (`Spectral` by default) to map the predicted pixel values from a single-channel `[0, 1]` depth range into an RGB image.
With the `Spectral` colormap, pixels with near depth are painted red, and far pixels are assigned blue color.
The 16-bit PNG file stores the single channel values mapped linearly from the `[0, 1]` range into `[0, 65535]`.
Below are the raw and the visualized predictions; as can be seen, dark areas (mustache) are easier to distinguish in the visualization:
<div class="flex gap-4">
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth_16bit.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Predicted depth (16-bit PNG)
</figcaption>
</div>
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Predicted depth visualization (Spectral)
</figcaption>
</div>
</div>
### Surface Normals Prediction Quick Start
Load `prs-eth/marigold-normals-lcm-v0-1` checkpoint into `MarigoldNormalsPipeline` pipeline, put the image through the pipeline, and save the predictions:
```python
import diffusers
import torch
pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(
"prs-eth/marigold-normals-lcm-v0-1", variant="fp16", torch_dtype=torch.float16
).to("cuda")
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
normals = pipe(image)
vis = pipe.image_processor.visualize_normals(normals.prediction)
vis[0].save("einstein_normals.png")
```
The visualization function for normals [`~pipelines.marigold.marigold_image_processing.MarigoldImageProcessor.visualize_normals`] maps the three-dimensional prediction with pixel values in the range `[-1, 1]` into an RGB image.
The visualization function supports flipping surface normals axes to make the visualization compatible with other choices of the frame of reference.
Conceptually, each pixel is painted according to the surface normal vector in the frame of reference, where `X` axis points right, `Y` axis points up, and `Z` axis points at the viewer.
Below is the visualized prediction:
<div class="flex gap-4" style="justify-content: center; width: 100%;">
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_normals.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Predicted surface normals visualization
</figcaption>
</div>
</div>
In this example, the nose tip almost certainly has a point on the surface, in which the surface normal vector points straight at the viewer, meaning that its coordinates are `[0, 0, 1]`.
This vector maps to the RGB `[128, 128, 255]`, which corresponds to the violet-blue color.
Similarly, a surface normal on the cheek in the right part of the image has a large `X` component, which increases the red hue.
Points on the shoulders pointing up with a large `Y` promote green color.
### Speeding up inference
The above quick start snippets are already optimized for speed: they load the LCM checkpoint, use the `fp16` variant of weights and computation, and perform just one denoising diffusion step.
The `pipe(image)` call completes in 280ms on RTX 3090 GPU.
Internally, the input image is encoded with the Stable Diffusion VAE encoder, then the U-Net performs one denoising step, and finally, the prediction latent is decoded with the VAE decoder into pixel space.
In this case, two out of three module calls are dedicated to converting between pixel and latent space of LDM.
Because Marigold's latent space is compatible with the base Stable Diffusion, it is possible to speed up the pipeline call by more than 3x (85ms on RTX 3090) by using a [lightweight replacement of the SD VAE](../api/models/autoencoder_tiny):
```diff
import diffusers
import torch
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
).to("cuda")
+ pipe.vae = diffusers.AutoencoderTiny.from_pretrained(
+ "madebyollin/taesd", torch_dtype=torch.float16
+ ).cuda()
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
depth = pipe(image)
```
As suggested in [Optimizations](../optimization/torch2.0#torch.compile), adding `torch.compile` may squeeze extra performance depending on the target hardware:
```diff
import diffusers
import torch
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
).to("cuda")
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
depth = pipe(image)
```
## Qualitative Comparison with Depth Anything
With the above speed optimizations, Marigold delivers predictions with more details and faster than [Depth Anything](https://huggingface.co/docs/transformers/main/en/model_doc/depth_anything) with the largest checkpoint [LiheYoung/depth-anything-large-hf](https://huggingface.co/LiheYoung/depth-anything-large-hf):
<div class="flex gap-4">
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Marigold LCM fp16 with Tiny AutoEncoder
</figcaption>
</div>
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/einstein_depthanything_large.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Depth Anything Large
</figcaption>
</div>
</div>
## Maximizing Precision and Ensembling
Marigold pipelines have a built-in ensembling mechanism combining multiple predictions from different random latents.
This is a brute-force way of improving the precision of predictions, capitalizing on the generative nature of diffusion.
The ensembling path is activated automatically when the `ensemble_size` argument is set greater than `1`.
When aiming for maximum precision, it makes sense to adjust `num_inference_steps` simultaneously with `ensemble_size`.
The recommended values vary across checkpoints but primarily depend on the scheduler type.
The effect of ensembling is particularly well-seen with surface normals:
```python
import diffusers
model_path = "prs-eth/marigold-normals-v1-0"
model_paper_kwargs = {
diffusers.schedulers.DDIMScheduler: {
"num_inference_steps": 10,
"ensemble_size": 10,
},
diffusers.schedulers.LCMScheduler: {
"num_inference_steps": 4,
"ensemble_size": 5,
},
}
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(model_path).to("cuda")
pipe_kwargs = model_paper_kwargs[type(pipe.scheduler)]
depth = pipe(image, **pipe_kwargs)
vis = pipe.image_processor.visualize_normals(depth.prediction)
vis[0].save("einstein_normals.png")
```
<div class="flex gap-4">
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_normals.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Surface normals, no ensembling
</figcaption>
</div>
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_normals.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Surface normals, with ensembling
</figcaption>
</div>
</div>
As can be seen, all areas with fine-grained structurers, such as hair, got more conservative and on average more correct predictions.
Such a result is more suitable for precision-sensitive downstream tasks, such as 3D reconstruction.
## Quantitative Evaluation
To evaluate Marigold quantitatively in standard leaderboards and benchmarks (such as NYU, KITTI, and other datasets), follow the evaluation protocol outlined in the paper: load the full precision fp32 model and use appropriate values for `num_inference_steps` and `ensemble_size`.
Optionally seed randomness to ensure reproducibility. Maximizing `batch_size` will deliver maximum device utilization.
```python
import diffusers
import torch
device = "cuda"
seed = 2024
model_path = "prs-eth/marigold-v1-0"
model_paper_kwargs = {
diffusers.schedulers.DDIMScheduler: {
"num_inference_steps": 50,
"ensemble_size": 10,
},
diffusers.schedulers.LCMScheduler: {
"num_inference_steps": 4,
"ensemble_size": 10,
},
}
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
generator = torch.Generator(device=device).manual_seed(seed)
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(model_path).to(device)
pipe_kwargs = model_paper_kwargs[type(pipe.scheduler)]
depth = pipe(image, generator=generator, **pipe_kwargs)
# evaluate metrics
```
## Using Predictive Uncertainty
The ensembling mechanism built into Marigold pipelines combines multiple predictions obtained from different random latents.
As a side effect, it can be used to quantify epistemic (model) uncertainty; simply specify `ensemble_size` greater than 1 and set `output_uncertainty=True`.
The resulting uncertainty will be available in the `uncertainty` field of the output.
It can be visualized as follows:
```python
import diffusers
import torch
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
).to("cuda")
image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
depth = pipe(
image,
ensemble_size=10, # any number greater than 1; higher values yield higher precision
output_uncertainty=True,
)
uncertainty = pipe.image_processor.visualize_uncertainty(depth.uncertainty)
uncertainty[0].save("einstein_depth_uncertainty.png")
```
<div class="flex gap-4">
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_depth_uncertainty.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Depth uncertainty
</figcaption>
</div>
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_normals_uncertainty.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Surface normals uncertainty
</figcaption>
</div>
</div>
The interpretation of uncertainty is easy: higher values (white) correspond to pixels, where the model struggles to make consistent predictions.
Evidently, the depth model is the least confident around edges with discontinuity, where the object depth changes drastically.
The surface normals model is the least confident in fine-grained structures, such as hair, and dark areas, such as the collar.
## Frame-by-frame Video Processing with Temporal Consistency
Due to Marigold's generative nature, each prediction is unique and defined by the random noise sampled for the latent initialization.
This becomes an obvious drawback compared to traditional end-to-end dense regression networks, as exemplified in the following videos:
<div class="flex gap-4">
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama.gif"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">Input video</figcaption>
</div>
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_independent.gif"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth applied to input video frames independently</figcaption>
</div>
</div>
To address this issue, it is possible to pass `latents` argument to the pipelines, which defines the starting point of diffusion.
Empirically, we found that a convex combination of the very same starting point noise latent and the latent corresponding to the previous frame prediction give sufficiently smooth results, as implemented in the snippet below:
```python
import imageio
from PIL import Image
from tqdm import tqdm
import diffusers
import torch
device = "cuda"
path_in = "obama.mp4"
path_out = "obama_depth.gif"
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
"prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
).to(device)
pipe.vae = diffusers.AutoencoderTiny.from_pretrained(
"madebyollin/taesd", torch_dtype=torch.float16
).to(device)
pipe.set_progress_bar_config(disable=True)
with imageio.get_reader(path_in) as reader:
size = reader.get_meta_data()['size']
last_frame_latent = None
latent_common = torch.randn(
(1, 4, 768 * size[1] // (8 * max(size)), 768 * size[0] // (8 * max(size)))
).to(device=device, dtype=torch.float16)
out = []
for frame_id, frame in tqdm(enumerate(reader), desc="Processing Video"):
frame = Image.fromarray(frame)
latents = latent_common
if last_frame_latent is not None:
latents = 0.9 * latents + 0.1 * last_frame_latent
depth = pipe(
frame, match_input_resolution=False, latents=latents, output_latent=True
)
last_frame_latent = depth.latent
out.append(pipe.image_processor.visualize_depth(depth.prediction)[0])
diffusers.utils.export_to_gif(out, path_out, fps=reader.get_meta_data()['fps'])
```
Here, the diffusion process starts from the given computed latent.
The pipeline sets `output_latent=True` to access `out.latent` and computes its contribution to the next frame's latent initialization.
The result is much more stable now:
<div class="flex gap-4">
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_independent.gif"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth applied to input video frames independently</figcaption>
</div>
<div style="flex: 1 1 50%; max-width: 50%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_obama_depth_consistent.gif"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">Marigold Depth with forced latents initialization</figcaption>
</div>
</div>
## Marigold for ControlNet
A very common application for depth prediction with diffusion models comes in conjunction with ControlNet.
Depth crispness plays a crucial role in obtaining high-quality results from ControlNet.
As seen in comparisons with other methods above, Marigold excels at that task.
The snippet below demonstrates how to load an image, compute depth, and pass it into ControlNet in a compatible format:
```python
import torch
import diffusers
device = "cuda"
generator = torch.Generator(device=device).manual_seed(2024)
image = diffusers.utils.load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_depth_source.png"
)
pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
"prs-eth/marigold-depth-lcm-v1-0", torch_dtype=torch.float16, variant="fp16"
).to(device)
depth_image = pipe(image, generator=generator).prediction
depth_image = pipe.image_processor.visualize_depth(depth_image, color_map="binary")
depth_image[0].save("motorcycle_controlnet_depth.png")
controlnet = diffusers.ControlNetModel.from_pretrained(
"diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
).to(device)
pipe = diffusers.StableDiffusionXLControlNetPipeline.from_pretrained(
"SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", controlnet=controlnet
).to(device)
pipe.scheduler = diffusers.DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
controlnet_out = pipe(
prompt="high quality photo of a sports bike, city",
negative_prompt="",
guidance_scale=6.5,
num_inference_steps=25,
image=depth_image,
controlnet_conditioning_scale=0.7,
control_guidance_end=0.7,
generator=generator,
).images
controlnet_out[0].save("motorcycle_controlnet_out.png")
```
<div class="flex gap-4">
<div style="flex: 1 1 33%; max-width: 33%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_depth_source.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Input image
</figcaption>
</div>
<div style="flex: 1 1 33%; max-width: 33%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/motorcycle_controlnet_depth.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
Depth in the format compatible with ControlNet
</figcaption>
</div>
<div style="flex: 1 1 33%; max-width: 33%;">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/motorcycle_controlnet_out.png"/>
<figcaption class="mt-1 text-center text-sm text-gray-500">
ControlNet generation, conditioned on depth and prompt: "high quality photo of a sports bike, city"
</figcaption>
</div>
</div>
Hopefully, you will find Marigold useful for solving your downstream tasks, be it a part of a more broad generative workflow, or a perception task, such as 3D reconstruction.
| diffusers/docs/source/en/using-diffusers/marigold_usage.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/marigold_usage.md",
"repo_id": "diffusers",
"token_count": 9939
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Textual inversion
[[open-in-colab]]
The [`StableDiffusionPipeline`] supports textual inversion, a technique that enables a model like Stable Diffusion to learn a new concept from just a few sample images. This gives you more control over the generated images and allows you to tailor the model towards specific concepts. You can get started quickly with a collection of community created concepts in the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer).
This guide will show you how to run inference with textual inversion using a pre-learned concept from the Stable Diffusion Conceptualizer. If you're interested in teaching a model new concepts with textual inversion, take a look at the [Textual Inversion](../training/text_inversion) training guide.
Import the necessary libraries:
```py
import torch
from diffusers import StableDiffusionPipeline
from diffusers.utils import make_image_grid
```
## Stable Diffusion 1 and 2
Pick a Stable Diffusion checkpoint and a pre-learned concept from the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer):
```py
pretrained_model_name_or_path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
repo_id_embeds = "sd-concepts-library/cat-toy"
```
Now you can load a pipeline, and pass the pre-learned concept to it:
```py
pipeline = StableDiffusionPipeline.from_pretrained(
pretrained_model_name_or_path, torch_dtype=torch.float16, use_safetensors=True
).to("cuda")
pipeline.load_textual_inversion(repo_id_embeds)
```
Create a prompt with the pre-learned concept by using the special placeholder token `<cat-toy>`, and choose the number of samples and rows of images you'd like to generate:
```py
prompt = "a grafitti in a favela wall with a <cat-toy> on it"
num_samples_per_row = 2
num_rows = 2
```
Then run the pipeline (feel free to adjust the parameters like `num_inference_steps` and `guidance_scale` to see how they affect image quality), save the generated images and visualize them with the helper function you created at the beginning:
```py
all_images = []
for _ in range(num_rows):
images = pipeline(prompt, num_images_per_prompt=num_samples_per_row, num_inference_steps=50, guidance_scale=7.5).images
all_images.extend(images)
grid = make_image_grid(all_images, num_rows, num_samples_per_row)
grid
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png">
</div>
## Stable Diffusion XL
Stable Diffusion XL (SDXL) can also use textual inversion vectors for inference. In contrast to Stable Diffusion 1 and 2, SDXL has two text encoders so you'll need two textual inversion embeddings - one for each text encoder model.
Let's download the SDXL textual inversion embeddings and have a closer look at it's structure:
```py
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
file = hf_hub_download("dn118/unaestheticXL", filename="unaestheticXLv31.safetensors")
state_dict = load_file(file)
state_dict
```
```
{'clip_g': tensor([[ 0.0077, -0.0112, 0.0065, ..., 0.0195, 0.0159, 0.0275],
...,
[-0.0170, 0.0213, 0.0143, ..., -0.0302, -0.0240, -0.0362]],
'clip_l': tensor([[ 0.0023, 0.0192, 0.0213, ..., -0.0385, 0.0048, -0.0011],
...,
[ 0.0475, -0.0508, -0.0145, ..., 0.0070, -0.0089, -0.0163]],
```
There are two tensors, `"clip_g"` and `"clip_l"`.
`"clip_g"` corresponds to the bigger text encoder in SDXL and refers to
`pipe.text_encoder_2` and `"clip_l"` refers to `pipe.text_encoder`.
Now you can load each tensor separately by passing them along with the correct text encoder and tokenizer
to [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`]:
```py
from diffusers import AutoPipelineForText2Image
import torch
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=torch.float16)
pipe.to("cuda")
pipe.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
pipe.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
# the embedding should be used as a negative embedding, so we pass it as a negative prompt
generator = torch.Generator().manual_seed(33)
image = pipe("a woman standing in front of a mountain", negative_prompt="unaestheticXLv31", generator=generator).images[0]
image
```
| diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md",
"repo_id": "diffusers",
"token_count": 1722
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ControlNet
[Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) (ControlNet)은 Lvmin Zhang과 Maneesh Agrawala에 의해 쓰여졌습니다.
이 예시는 [원본 ControlNet 리포지토리에서 예시 학습하기](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md)에 기반합니다. ControlNet은 원들을 채우기 위해 [small synthetic dataset](https://huggingface.co/datasets/fusing/fill50k)을 사용해서 학습됩니다.
## 의존성 설치하기
아래의 스크립트를 실행하기 전에, 라이브러리의 학습 의존성을 설치해야 합니다.
<Tip warning={true}>
가장 최신 버전의 예시 스크립트를 성공적으로 실행하기 위해서는, 소스에서 설치하고 최신 버전의 설치를 유지하는 것을 강력하게 추천합니다. 우리는 예시 스크립트들을 자주 업데이트하고 예시에 맞춘 특정한 요구사항을 설치합니다.
</Tip>
위 사항을 만족시키기 위해서, 새로운 가상환경에서 다음 일련의 스텝을 실행하세요:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install -e .
```
그 다음에는 [예시 폴더](https://github.com/huggingface/diffusers/tree/main/examples/controlnet)으로 이동합니다.
```bash
cd examples/controlnet
```
이제 실행하세요:
```bash
pip install -r requirements.txt
```
[🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화 합니다:
```bash
accelerate config
```
혹은 여러분의 환경이 무엇인지 몰라도 기본적인 🤗Accelerate 구성으로 초기화할 수 있습니다:
```bash
accelerate config default
```
혹은 당신의 환경이 노트북 같은 상호작용하는 쉘을 지원하지 않는다면, 아래의 코드로 초기화 할 수 있습니다:
```python
from accelerate.utils import write_basic_config
write_basic_config()
```
## 원을 채우는 데이터셋
원본 데이터셋은 ControlNet [repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip)에 올라와있지만, 우리는 [여기](https://huggingface.co/datasets/fusing/fill50k)에 새롭게 다시 올려서 🤗 Datasets 과 호환가능합니다. 그래서 학습 스크립트 상에서 데이터 불러오기를 다룰 수 있습니다.
우리의 학습 예시는 원래 ControlNet의 학습에 쓰였던 [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)을 사용합니다. 그렇지만 ControlNet은 대응되는 어느 Stable Diffusion 모델([`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4)) 혹은 [`stabilityai/stable-diffusion-2-1`](https://huggingface.co/stabilityai/stable-diffusion-2-1)의 증가를 위해 학습될 수 있습니다.
자체 데이터셋을 사용하기 위해서는 [학습을 위한 데이터셋 생성하기](create_dataset) 가이드를 확인하세요.
## 학습
이 학습에 사용될 다음 이미지들을 다운로드하세요:
```sh
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png
```
`MODEL_NAME` 환경 변수 (Hub 모델 리포지토리 아이디 혹은 모델 가중치가 있는 디렉토리로 가는 주소)를 명시하고 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인자로 환경변수를 보냅니다.
학습 스크립트는 당신의 리포지토리에 `diffusion_pytorch_model.bin` 파일을 생성하고 저장합니다.
```bash
export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=4 \
--push_to_hub
```
이 기본적인 설정으로는 ~38GB VRAM이 필요합니다.
기본적으로 학습 스크립트는 결과를 텐서보드에 기록합니다. 가중치(weight)와 편향(bias)을 사용하기 위해 `--report_to wandb` 를 전달합니다.
더 작은 batch(배치) 크기로 gradient accumulation(기울기 누적)을 하면 학습 요구사항을 ~20 GB VRAM으로 줄일 수 있습니다.
```bash
export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--push_to_hub
```
## 여러개 GPU로 학습하기
`accelerate` 은 seamless multi-GPU 학습을 고려합니다. `accelerate`과 함께 분산된 학습을 실행하기 위해 [여기](https://huggingface.co/docs/accelerate/basic_tutorials/launch)
의 설명을 확인하세요. 아래는 예시 명령어입니다:
```bash
export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=4 \
--mixed_precision="fp16" \
--tracker_project_name="controlnet-demo" \
--report_to=wandb \
--push_to_hub
```
## 예시 결과
#### 배치 사이즈 8로 300 스텝 이후:
| | |
|-------------------|:-------------------------:|
| | 푸른 배경과 빨간 원 |
 |  |
| | 갈색 꽃 배경과 청록색 원 |
 |  |
#### 배치 사이즈 8로 6000 스텝 이후:
| | |
|-------------------|:-------------------------:|
| | 푸른 배경과 빨간 원 |
 |  |
| | 갈색 꽃 배경과 청록색 원 |
 |  |
## 16GB GPU에서 학습하기
16GB GPU에서 학습하기 위해 다음의 최적화를 진행하세요:
- 기울기 체크포인트 저장하기
- bitsandbyte의 [8-bit optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)가 설치되지 않았다면 링크에 연결된 설명서를 보세요.
이제 학습 스크립트를 시작할 수 있습니다:
```bash
export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--use_8bit_adam \
--push_to_hub
```
## 12GB GPU에서 학습하기
12GB GPU에서 실행하기 위해 다음의 최적화를 진행하세요:
- 기울기 체크포인트 저장하기
- bitsandbyte의 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요)
- [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요)
- 기울기를 `None`으로 설정
```bash
export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--use_8bit_adam \
--enable_xformers_memory_efficient_attention \
--set_grads_to_none \
--push_to_hub
```
`pip install xformers`으로 `xformers`을 확실히 설치하고 `enable_xformers_memory_efficient_attention`을 사용하세요.
## 8GB GPU에서 학습하기
우리는 ControlNet을 지원하기 위한 DeepSpeed를 철저하게 테스트하지 않았습니다. 환경설정이 메모리를 저장할 때,
그 환경이 성공적으로 학습했는지를 확정하지 않았습니다. 성공한 학습 실행을 위해 설정을 변경해야 할 가능성이 높습니다.
8GB GPU에서 실행하기 위해 다음의 최적화를 진행하세요:
- 기울기 체크포인트 저장하기
- bitsandbyte의 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요)
- [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(가 설치되지 않았다면 링크에 연결된 설명서를 보세요)
- 기울기를 `None`으로 설정
- DeepSpeed stage 2 변수와 optimizer 없에기
- fp16 혼합 정밀도(precision)
[DeepSpeed](https://www.deepspeed.ai/)는 CPU 또는 NVME로 텐서를 VRAM에서 오프로드할 수 있습니다.
이를 위해서 훨씬 더 많은 RAM(약 25 GB)가 필요합니다.
DeepSpeed stage 2를 활성화하기 위해서 `accelerate config`로 환경을 구성해야합니다.
구성(configuration) 파일은 이런 모습이어야 합니다:
```yaml
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 4
offload_optimizer_device: cpu
offload_param_device: cpu
zero3_init_flag: false
zero_stage: 2
distributed_type: DEEPSPEED
```
<팁>
[문서](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)를 더 많은 DeepSpeed 설정 옵션을 위해 보세요.
<팁>
기본 Adam optimizer를 DeepSpeed'의 Adam
`deepspeed.ops.adam.DeepSpeedCPUAdam` 으로 바꾸면 상당한 속도 향상을 이룰수 있지만,
Pytorch와 같은 버전의 CUDA toolchain이 필요합니다. 8-비트 optimizer는 현재 DeepSpeed와
호환되지 않는 것 같습니다.
```bash
export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--enable_xformers_memory_efficient_attention \
--set_grads_to_none \
--mixed_precision fp16 \
--push_to_hub
```
## 추론
학습된 모델은 [`StableDiffusionControlNetPipeline`]과 함께 실행될 수 있습니다.
`base_model_path`와 `controlnet_path` 에 값을 지정하세요 `--pretrained_model_name_or_path` 와
`--output_dir` 는 학습 스크립트에 개별적으로 지정됩니다.
```py
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from diffusers.utils import load_image
import torch
base_model_path = "path to model"
controlnet_path = "path to controlnet"
controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
base_model_path, controlnet=controlnet, torch_dtype=torch.float16
)
# 더 빠른 스케줄러와 메모리 최적화로 diffusion 프로세스 속도 올리기
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
# xformers가 설치되지 않으면 아래 줄을 삭제하기
pipe.enable_xformers_memory_efficient_attention()
pipe.enable_model_cpu_offload()
control_image = load_image("./conditioning_image_1.png")
prompt = "pale golden rod circle with old lace background"
# 이미지 생성하기
generator = torch.manual_seed(0)
image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]
image.save("./output.png")
```
| diffusers/docs/source/ko/training/controlnet.md/0 | {
"file_path": "diffusers/docs/source/ko/training/controlnet.md",
"repo_id": "diffusers",
"token_count": 7824
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Text-guided depth-to-image 생성
[[open-in-colab]]
[`StableDiffusionDepth2ImgPipeline`]을 사용하면 텍스트 프롬프트와 초기 이미지를 전달하여 새 이미지의 생성을 조절할 수 있습니다. 또한 이미지 구조를 보존하기 위해 `depth_map`을 전달할 수도 있습니다. `depth_map`이 제공되지 않으면 파이프라인은 통합된 [depth-estimation model](https://github.com/isl-org/MiDaS)을 통해 자동으로 깊이를 예측합니다.
먼저 [`StableDiffusionDepth2ImgPipeline`]의 인스턴스를 생성합니다:
```python
import torch
import requests
from PIL import Image
from diffusers import StableDiffusionDepth2ImgPipeline
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth",
torch_dtype=torch.float16,
).to("cuda")
```
이제 프롬프트를 파이프라인에 전달합니다. 특정 단어가 이미지 생성을 가이드 하는것을 방지하기 위해 `negative_prompt`를 전달할 수도 있습니다:
```python
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
init_image = Image.open(requests.get(url, stream=True).raw)
prompt = "two tigers"
n_prompt = "bad, deformed, ugly, bad anatomy"
image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0]
image
```
| Input | Output |
|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|
| <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> |
아래의 Spaces를 가지고 놀며 depth map이 있는 이미지와 없는 이미지의 차이가 있는지 확인해 보세요!
<iframe
src="https://radames-stable-diffusion-depth2img.hf.space"
frameborder="0"
width="850"
height="500"
></iframe>
| diffusers/docs/source/ko/using-diffusers/depth2img.md/0 | {
"file_path": "diffusers/docs/source/ko/using-diffusers/depth2img.md",
"repo_id": "diffusers",
"token_count": 1376
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# 프롬프트에 가중치 부여하기
[[open-in-colab]]
텍스트 가이드 기반의 diffusion 모델은 주어진 텍스트 프롬프트를 기반으로 이미지를 생성합니다.
텍스트 프롬프트에는 모델이 생성해야 하는 여러 개념이 포함될 수 있으며 프롬프트의 특정 부분에 가중치를 부여하는 것이 바람직한 경우가 많습니다.
Diffusion 모델은 문맥화된 텍스트 임베딩으로 diffusion 모델의 cross attention 레이어를 조절함으로써 작동합니다.
([더 많은 정보를 위한 Stable Diffusion Guide](https://huggingface.co/docs/optimum-neuron/main/en/package_reference/modeling#stable-diffusion)를 참고하세요).
따라서 프롬프트의 특정 부분을 강조하는(또는 강조하지 않는) 간단한 방법은 프롬프트의 관련 부분에 해당하는 텍스트 임베딩 벡터의 크기를 늘리거나 줄이는 것입니다.
이것은 "프롬프트 가중치 부여" 라고 하며, 커뮤니티에서 가장 요구하는 기능입니다.([이곳](https://github.com/huggingface/diffusers/issues/2431)의 issue를 보세요 ).
## Diffusers에서 프롬프트 가중치 부여하는 방법
우리는 `diffusers`의 역할이 다른 프로젝트를 가능하게 하는 필수적인 기능을 제공하는 toolbex라고 생각합니다.
[InvokeAI](https://github.com/invoke-ai/InvokeAI) 나 [diffuzers](https://github.com/abhishekkrthakur/diffuzers) 같은 강력한 UI를 구축할 수 있습니다.
프롬프트를 조작하는 방법을 지원하기 위해, `diffusers` 는
[StableDiffusionPipeline](https://huggingface.co/docs/diffusers/v0.18.2/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline)와 같은
많은 파이프라인에 [prompt_embeds](https://huggingface.co/docs/diffusers/v0.14.0/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds)
인수를 노출시켜, "prompt-weighted"/축척된 텍스트 임베딩을 파이프라인에 바로 전달할 수 있게 합니다.
[Compel 라이브러리](https://github.com/damian0815/compel)는 프롬프트의 일부를 강조하거나 강조하지 않을 수 있는 쉬운 방법을 제공합니다.
임베딩을 직접 준비하는 것 대신 이 방법을 사용하는 것을 강력히 추천합니다.
간단한 예제를 살펴보겠습니다.
다음과 같이 `"공을 갖고 노는 붉은색 고양이"` 이미지를 생성하고 싶습니다:
```py
from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
prompt = "a red cat playing with a ball"
generator = torch.Generator(device="cpu").manual_seed(33)
image = pipe(prompt, generator=generator, num_inference_steps=20).images[0]
image
```
생성된 이미지:

사진에서 알 수 있듯이, "공"은 이미지에 없습니다. 이 부분을 강조해 볼까요!
먼저 `compel` 라이브러리를 설치해야합니다:
```sh
pip install compel
```
그런 다음에는 `Compel` 오브젝트를 생성합니다:
```py
from compel import Compel
compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
```
이제 `"++"` 를 사용해서 "공" 을 강조해 봅시다:
```py
prompt = "a red cat playing with a ball++"
```
그리고 이 프롬프트를 파이프라인에 바로 전달하지 않고, `compel_proc` 를 사용하여 처리해야합니다:
```py
prompt_embeds = compel_proc(prompt)
```
파이프라인에 `prompt_embeds` 를 바로 전달할 수 있습니다:
```py
generator = torch.Generator(device="cpu").manual_seed(33)
images = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0]
image
```
이제 "공"이 있는 그림을 출력할 수 있습니다!

마찬가지로 `--` 접미사를 단어에 사용하여 문장의 일부를 강조하지 않을 수 있습니다. 한번 시도해 보세요!
즐겨찾는 파이프라인에 `prompt_embeds` 입력이 없는 경우 issue를 새로 만들어주세요.
Diffusers 팀은 최대한 대응하려고 노력합니다.
Compel 1.1.6 는 textual inversions을 사용하여 단순화하는 유티릴티 클래스를 추가합니다.
`DiffusersTextualInversionManager`를 인스턴스화 한 후 이를 Compel init에 전달합니다:
```
textual_inversion_manager = DiffusersTextualInversionManager(pipe)
compel = Compel(
tokenizer=pipe.tokenizer,
text_encoder=pipe.text_encoder,
textual_inversion_manager=textual_inversion_manager)
```
더 많은 정보를 얻고 싶다면 [compel](https://github.com/damian0815/compel) 라이브러리 문서를 참고하세요.
| diffusers/docs/source/ko/using-diffusers/weighted_prompts.md/0 | {
"file_path": "diffusers/docs/source/ko/using-diffusers/weighted_prompts.md",
"repo_id": "diffusers",
"token_count": 3376
} |
# -*- coding: utf-8 -*-
import inspect
from typing import Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNet2DConditionModel,
)
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import PIL_INTERPOLATION
from diffusers.utils.torch_utils import randn_tensor
def preprocess(image, w, h):
if isinstance(image, torch.Tensor):
return image
elif isinstance(image, PIL.Image.Image):
image = [image]
if isinstance(image[0], PIL.Image.Image):
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
image = np.concatenate(image, axis=0)
image = np.array(image).astype(np.float32) / 255.0
image = image.transpose(0, 3, 1, 2)
image = 2.0 * image - 1.0
image = torch.from_numpy(image)
elif isinstance(image[0], torch.Tensor):
image = torch.cat(image, dim=0)
return image
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
if not isinstance(v0, np.ndarray):
inputs_are_torch = True
input_device = v0.device
v0 = v0.cpu().numpy()
v1 = v1.cpu().numpy()
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
if np.abs(dot) > DOT_THRESHOLD:
v2 = (1 - t) * v0 + t * v1
else:
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
theta_t = theta_0 * t
sin_theta_t = np.sin(theta_t)
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
s1 = sin_theta_t / sin_theta_0
v2 = s0 * v0 + s1 * v1
if inputs_are_torch:
v2 = torch.from_numpy(v2).to(input_device)
return v2
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
clip_model: CLIPModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
feature_extractor: CLIPImageProcessor,
coca_model=None,
coca_tokenizer=None,
coca_transform=None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
clip_model=clip_model,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
feature_extractor=feature_extractor,
coca_model=coca_model,
coca_tokenizer=coca_tokenizer,
coca_transform=coca_transform,
)
self.feature_extractor_size = (
feature_extractor.size
if isinstance(feature_extractor.size, int)
else feature_extractor.size["shortest_edge"]
)
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, False)
set_requires_grad(self.clip_model, False)
def freeze_vae(self):
set_requires_grad(self.vae, False)
def unfreeze_vae(self):
set_requires_grad(self.vae, True)
def freeze_unet(self):
set_requires_grad(self.unet, False)
def unfreeze_unet(self):
set_requires_grad(self.unet, True)
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
if not isinstance(image, torch.Tensor):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
image = image.to(device=device, dtype=dtype)
if isinstance(generator, list):
init_latents = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
]
init_latents = torch.cat(init_latents, dim=0)
else:
init_latents = self.vae.encode(image).latent_dist.sample(generator)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
init_latents = 0.18215 * init_latents
init_latents = init_latents.repeat_interleave(batch_size, dim=0)
noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
# get latents
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
latents = init_latents
return latents
def get_image_description(self, image):
transformed_image = self.coca_transform(image).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
def get_clip_image_embeddings(self, image, batch_size):
clip_image_input = self.feature_extractor.preprocess(image)
clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def cond_fn(
self,
latents,
timestep,
index,
text_embeddings,
noise_pred_original,
original_image_embeddings_clip,
clip_guidance_scale,
):
latents = latents.detach().requires_grad_()
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
# predict the noise residual
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
beta_prod_t = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
fac = torch.sqrt(beta_prod_t)
sample = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, LMSDiscreteScheduler):
sigma = self.scheduler.sigmas[index]
sample = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
sample = 1 / 0.18215 * sample
image = self.vae.decode(sample).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = transforms.Resize(self.feature_extractor_size)(image)
image = self.normalize(image).to(latents.dtype)
image_embeddings_clip = self.clip_model.get_image_features(image)
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
grads = -torch.autograd.grad(loss, latents)[0]
if isinstance(self.scheduler, LMSDiscreteScheduler):
latents = latents.detach() + grads * (sigma**2)
noise_pred = noise_pred_original
else:
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(
self,
style_image: Union[torch.Tensor, PIL.Image.Image],
content_image: Union[torch.Tensor, PIL.Image.Image],
style_prompt: Optional[str] = None,
content_prompt: Optional[str] = None,
height: Optional[int] = 512,
width: Optional[int] = 512,
noise_strength: float = 0.6,
num_inference_steps: Optional[int] = 50,
guidance_scale: Optional[float] = 7.5,
batch_size: Optional[int] = 1,
eta: float = 0.0,
clip_guidance_scale: Optional[float] = 100,
generator: Optional[torch.Generator] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
slerp_latent_style_strength: float = 0.8,
slerp_prompt_style_strength: float = 0.1,
slerp_clip_image_style_strength: float = 0.1,
):
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if isinstance(generator, torch.Generator) and batch_size > 1:
generator = [generator] + [None] * (batch_size - 1)
coca_is_none = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
coca_is_none = [x[0] for x in coca_is_none if x[1]]
coca_is_none_str = ", ".join(coca_is_none)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(coca_is_none):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
)
content_prompt = self.get_image_description(content_image)
if style_prompt is None:
if len(coca_is_none):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
)
style_prompt = self.get_image_description(style_image)
# get prompt text embeddings for content and style
content_text_input = self.tokenizer(
content_prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
style_text_input = self.tokenizer(
style_prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
# duplicate text embeddings for each generation per prompt
text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
# set timesteps
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
extra_set_kwargs = {}
if accepts_offset:
extra_set_kwargs["offset"] = 1
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
latent_timestep = timesteps[:1].repeat(batch_size)
# Preprocess image
preprocessed_content_image = preprocess(content_image, width, height)
content_latents = self.prepare_latents(
preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
)
preprocessed_style_image = preprocess(style_image, width, height)
style_latents = self.prepare_latents(
preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
)
latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
if clip_guidance_scale > 0:
content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
clip_image_embeddings = slerp(
slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
max_length = content_text_input.input_ids.shape[-1]
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
latents_dtype = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
self.device
)
else:
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
latents = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform classifier free guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
text_embeddings_for_guidance = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
noise_pred, latents = self.cond_fn(
latents,
t,
i,
text_embeddings_for_guidance,
noise_pred,
clip_image_embeddings,
clip_guidance_scale,
)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
progress_bar.update()
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
| diffusers/examples/community/clip_guided_images_mixing_stable_diffusion.py/0 | {
"file_path": "diffusers/examples/community/clip_guided_images_mixing_stable_diffusion.py",
"repo_id": "diffusers",
"token_count": 8765
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.utils.checkpoint
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
from diffusers.configuration_utils import register_to_config
from diffusers.image_processor import VaeImageProcessor
from diffusers.models.autoencoders import AutoencoderKL
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel, UNet2DConditionOutput
from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class UNet2DConditionModelHighResFix(UNet2DConditionModel):
r"""
A conditional 2D UNet model that applies Kohya fix proposed for high resolution image generation.
This model inherits from [`UNet2DConditionModel`]. Check the superclass documentation for learning about all the parameters.
Parameters:
high_res_fix (`List[Dict]`, *optional*, defaults to `[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]`):
Enables Kohya fix for high resolution generation. The activation maps are scaled based on the scale_factor up to the timestep at specified block_num.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(self, high_res_fix: List[Dict] = [{"timestep": 600, "scale_factor": 0.5, "block_num": 1}], **kwargs):
super().__init__(**kwargs)
if high_res_fix:
self.config.high_res_fix = sorted(high_res_fix, key=lambda x: x["timestep"], reverse=True)
@classmethod
def _resize(cls, sample, target=None, scale_factor=1, mode="bicubic"):
dtype = sample.dtype
if dtype == torch.bfloat16:
sample = sample.to(torch.float32)
if target is not None:
if sample.shape[-2:] != target.shape[-2:]:
sample = nn.functional.interpolate(sample, size=target.shape[-2:], mode=mode, align_corners=False)
elif scale_factor != 1:
sample = nn.functional.interpolate(sample, scale_factor=scale_factor, mode=mode, align_corners=False)
return sample.to(dtype)
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.FloatTensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
[`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
otherwise a `tuple` is returned where the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
for dim in sample.shape[-2:]:
if dim % default_overall_up_factor != 0:
# Forward upsample size to force interpolation output size.
forward_upsample_size = True
break
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
t_emb = self.get_time_embed(sample=sample, timestep=timestep)
emb = self.time_embedding(t_emb, timestep_cond)
aug_emb = None
class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
if class_emb is not None:
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
aug_emb = self.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
if self.config.addition_embed_type == "image_hint":
aug_emb, hint = aug_emb
sample = torch.cat([sample, hint], dim=1)
emb = emb + aug_emb if aug_emb is not None else emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
encoder_hidden_states = self.process_encoder_hidden_states(
encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
# 2. pre-process
sample = self.conv_in(sample)
# 2.5 GLIGEN position net
if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
gligen_args = cross_attention_kwargs.pop("gligen")
cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
# 3. down
# we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
# to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
if cross_attention_kwargs is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
lora_scale = cross_attention_kwargs.pop("scale", 1.0)
else:
lora_scale = 1.0
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
is_adapter = down_intrablock_additional_residuals is not None
# maintain backward compatibility for legacy usage, where
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
# but can only use one or the other
if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
deprecate(
"T2I should not use down_block_additional_residuals",
"1.3.0",
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
standard_warn=False,
)
down_intrablock_additional_residuals = down_block_additional_residuals
is_adapter = True
down_block_res_samples = (sample,)
for down_i, downsample_block in enumerate(self.down_blocks):
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
# For t2i-adapter CrossAttnDownBlock2D
additional_residuals = {}
if is_adapter and len(down_intrablock_additional_residuals) > 0:
additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
**additional_residuals,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
if is_adapter and len(down_intrablock_additional_residuals) > 0:
sample += down_intrablock_additional_residuals.pop(0)
down_block_res_samples += res_samples
# kohya high res fix
if self.config.high_res_fix:
for high_res_fix in self.config.high_res_fix:
if timestep > high_res_fix["timestep"] and down_i == high_res_fix["block_num"]:
sample = self.__class__._resize(sample, scale_factor=high_res_fix["scale_factor"])
break
if is_controlnet:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = down_block_res_sample + down_block_additional_residual
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = self.mid_block(sample, emb)
# To support T2I-Adapter-XL
if (
is_adapter
and len(down_intrablock_additional_residuals) > 0
and sample.shape == down_intrablock_additional_residuals[0].shape
):
sample += down_intrablock_additional_residuals.pop(0)
if is_controlnet:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# up scaling of kohya high res fix
if self.config.high_res_fix is not None:
if res_samples[0].shape[-2:] != sample.shape[-2:]:
sample = self.__class__._resize(sample, target=res_samples[0])
res_samples_up_sampled = (res_samples[0],)
for res_sample in res_samples[1:]:
res_samples_up_sampled += (self.__class__._resize(res_sample, target=res_samples[0]),)
res_samples = res_samples_up_sampled
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
)
# 6. post-process
if self.conv_norm_out:
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
@classmethod
def from_unet(cls, unet: UNet2DConditionModel, high_res_fix: list):
config = dict((unet.config))
config["high_res_fix"] = high_res_fix
unet_high_res = cls(**config)
unet_high_res.load_state_dict(unet.state_dict())
unet_high_res.to(unet.dtype)
return unet_high_res
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
custom_pipeline="kohya_hires_fix",
torch_dtype=torch.float16,
high_res_fix=[{'timestep': 600,
'scale_factor': 0.5,
'block_num': 1}])
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt, height=1000, width=1600).images[0]
```
"""
class StableDiffusionHighResFixPipeline(StableDiffusionPipeline):
r"""
Pipeline for text-to-image generation using Stable Diffusion with Kohya fix for high resolution generation.
This model inherits from [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods.
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
high_res_fix (`List[Dict]`, *optional*, defaults to `[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]`):
Enables Kohya fix for high resolution generation. The activation maps are scaled based on the scale_factor up to the timestep at specified block_num.
"""
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
_exclude_from_cpu_offload = ["safety_checker"]
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
image_encoder: CLIPVisionModelWithProjection = None,
requires_safety_checker: bool = True,
high_res_fix: List[Dict] = [{"timestep": 600, "scale_factor": 0.5, "block_num": 1}],
):
super().__init__(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
requires_safety_checker=requires_safety_checker,
)
unet = UNet2DConditionModelHighResFix.from_unet(unet=unet, high_res_fix=high_res_fix)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
| diffusers/examples/community/kohya_hires_fix.py/0 | {
"file_path": "diffusers/examples/community/kohya_hires_fix.py",
"repo_id": "diffusers",
"token_count": 10596
} |
#!/usr/bin/env python3
import torch
from diffusers import DiffusionPipeline
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
def __call__(self):
image = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
)
timestep = 1
model_output = self.unet(image, timestep).sample
scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output)
return result
| diffusers/examples/community/one_step_unet.py/0 | {
"file_path": "diffusers/examples/community/one_step_unet.py",
"repo_id": "diffusers",
"token_count": 299
} |
# Copyright 2024 Jingyang Zhang and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import inspect
import math
import numbers
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from packaging import version
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
from diffusers.configuration_utils import FrozenDict
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
from diffusers.loaders import (
FromSingleFileMixin,
IPAdapterMixin,
StableDiffusionLoraLoaderMixin,
TextualInversionLoaderMixin,
)
from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
from diffusers.models.attention_processor import Attention, FusedAttnProcessor2_0
from diffusers.models.lora import adjust_lora_scale_text_encoder
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
USE_PEFT_BACKEND,
deprecate,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionPipeline
>>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
class GaussianSmoothing(nn.Module):
"""
Copied from official repo: https://github.com/showlab/BoxDiff/blob/master/utils/gaussian_smoothing.py
Apply gaussian smoothing on a
1d, 2d or 3d tensor. Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data.
Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2))
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer("weight", kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError("Only 1, 2 and 3 dimensions are supported. Received {}.".format(dim))
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight.to(input.dtype), groups=self.groups)
class AttendExciteCrossAttnProcessor:
def __init__(self, attnstore, place_in_unet):
super().__init__()
self.attnstore = attnstore
self.place_in_unet = place_in_unet
def __call__(
self,
attn: Attention,
hidden_states: torch.FloatTensor,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size=1)
query = attn.to_q(hidden_states)
is_cross = encoder_hidden_states is not None
encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
query = attn.head_to_batch_dim(query)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(query, key, attention_mask)
self.attnstore(attention_probs, is_cross, self.place_in_unet)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
return hidden_states
class AttentionControl(abc.ABC):
def step_callback(self, x_t):
return x_t
def between_steps(self):
return
# @property
# def num_uncond_att_layers(self):
# return 0
@abc.abstractmethod
def forward(self, attn, is_cross: bool, place_in_unet: str):
raise NotImplementedError
def __call__(self, attn, is_cross: bool, place_in_unet: str):
if self.cur_att_layer >= self.num_uncond_att_layers:
self.forward(attn, is_cross, place_in_unet)
self.cur_att_layer += 1
if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:
self.cur_att_layer = 0
self.cur_step += 1
self.between_steps()
def reset(self):
self.cur_step = 0
self.cur_att_layer = 0
def __init__(self):
self.cur_step = 0
self.num_att_layers = -1
self.cur_att_layer = 0
class AttentionStore(AttentionControl):
@staticmethod
def get_empty_store():
return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []}
def forward(self, attn, is_cross: bool, place_in_unet: str):
key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
if attn.shape[1] <= 32**2: # avoid memory overhead
self.step_store[key].append(attn)
return attn
def between_steps(self):
self.attention_store = self.step_store
if self.save_global_store:
with torch.no_grad():
if len(self.global_store) == 0:
self.global_store = self.step_store
else:
for key in self.global_store:
for i in range(len(self.global_store[key])):
self.global_store[key][i] += self.step_store[key][i].detach()
self.step_store = self.get_empty_store()
self.step_store = self.get_empty_store()
def get_average_attention(self):
average_attention = self.attention_store
return average_attention
def get_average_global_attention(self):
average_attention = {
key: [item / self.cur_step for item in self.global_store[key]] for key in self.attention_store
}
return average_attention
def reset(self):
super(AttentionStore, self).reset()
self.step_store = self.get_empty_store()
self.attention_store = {}
self.global_store = {}
def __init__(self, save_global_store=False):
"""
Initialize an empty AttentionStore
:param step_index: used to visualize only a specific step in the diffusion process
"""
super(AttentionStore, self).__init__()
self.save_global_store = save_global_store
self.step_store = self.get_empty_store()
self.attention_store = {}
self.global_store = {}
self.curr_step_index = 0
self.num_uncond_att_layers = 0
def aggregate_attention(
attention_store: AttentionStore, res: int, from_where: List[str], is_cross: bool, select: int
) -> torch.Tensor:
"""Aggregates the attention across the different layers and heads at the specified resolution."""
out = []
attention_maps = attention_store.get_average_attention()
# for k, v in attention_maps.items():
# for vv in v:
# print(vv.shape)
# exit()
num_pixels = res**2
for location in from_where:
for item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]:
if item.shape[1] == num_pixels:
cross_maps = item.reshape(1, -1, res, res, item.shape[-1])[select]
out.append(cross_maps)
out = torch.cat(out, dim=0)
out = out.sum(0) / out.shape[0]
return out
def register_attention_control(model, controller):
attn_procs = {}
cross_att_count = 0
for name in model.unet.attn_processors.keys():
# cross_attention_dim = None if name.endswith("attn1.processor") else model.unet.config.cross_attention_dim
if name.startswith("mid_block"):
# hidden_size = model.unet.config.block_out_channels[-1]
place_in_unet = "mid"
elif name.startswith("up_blocks"):
# block_id = int(name[len("up_blocks.")])
# hidden_size = list(reversed(model.unet.config.block_out_channels))[block_id]
place_in_unet = "up"
elif name.startswith("down_blocks"):
# block_id = int(name[len("down_blocks.")])
# hidden_size = model.unet.config.block_out_channels[block_id]
place_in_unet = "down"
else:
continue
cross_att_count += 1
attn_procs[name] = AttendExciteCrossAttnProcessor(attnstore=controller, place_in_unet=place_in_unet)
model.unet.set_attn_processor(attn_procs)
controller.num_att_layers = cross_att_count
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
def retrieve_timesteps(
scheduler,
num_inference_steps: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
timesteps: Optional[List[int]] = None,
**kwargs,
):
"""
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
Args:
scheduler (`SchedulerMixin`):
The scheduler to get timesteps from.
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model. If used,
`timesteps` must be `None`.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
timesteps (`List[int]`, *optional*):
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
must be `None`.
Returns:
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
second element is the number of inference steps.
"""
if timesteps is not None:
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accepts_timesteps:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" timestep schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
else:
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
timesteps = scheduler.timesteps
return timesteps, num_inference_steps
class StableDiffusionBoxDiffPipeline(
DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
):
r"""
Pipeline for text-to-image generation using Stable Diffusion with BoxDiff.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
_exclude_from_cpu_offload = ["safety_checker"]
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
image_encoder: CLIPVisionModelWithProjection = None,
requires_safety_checker: bool = True,
):
super().__init__()
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
)
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["clip_sample"] = False
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = (
unet is not None
and hasattr(unet.config, "_diffusers_version")
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
)
is_unet_sample_size_less_64 = (
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
)
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
**kwargs,
):
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
prompt_embeds_tuple = self.encode_prompt(
prompt=prompt,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
**kwargs,
)
# concatenate for backwards comp
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
return prompt_embeds
def encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
if clip_skip is None:
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
else:
prompt_embeds = self.text_encoder(
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
)
# Access the `hidden_states` first, that contains a tuple of
# all the hidden states from the encoder layers. Then index into
# the tuple to access the hidden states from the desired layer.
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
# We also need to apply the final LayerNorm here to not mess with the
# representations. The `last_hidden_states` that we typically use for
# obtaining the final prompt representations passes through the LayerNorm
# layer.
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
return text_inputs, prompt_embeds, negative_prompt_embeds
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
if output_hidden_states:
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_enc_hidden_states = self.image_encoder(
torch.zeros_like(image), output_hidden_states=True
).hidden_states[-2]
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
num_images_per_prompt, dim=0
)
return image_enc_hidden_states, uncond_image_enc_hidden_states
else:
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if torch.is_tensor(image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
boxdiff_phrases,
boxdiff_boxes,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if boxdiff_phrases is not None or boxdiff_boxes is not None:
if not (boxdiff_phrases is not None and boxdiff_boxes is not None):
raise ValueError("Either both `boxdiff_phrases` and `boxdiff_boxes` must be passed or none of them.")
if not isinstance(boxdiff_phrases, list) or not isinstance(boxdiff_boxes, list):
raise ValueError("`boxdiff_phrases` and `boxdiff_boxes` must be lists.")
if len(boxdiff_phrases) != len(boxdiff_boxes):
raise ValueError(
"`boxdiff_phrases` and `boxdiff_boxes` must have the same length,"
f" got: `boxdiff_phrases` {len(boxdiff_phrases)} != `boxdiff_boxes`"
f" {len(boxdiff_boxes)}."
)
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stages where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
if not hasattr(self, "unet"):
raise ValueError("The pipeline must have `unet` for using FreeU.")
self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
def disable_freeu(self):
"""Disables the FreeU mechanism if enabled."""
self.unet.disable_freeu()
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
Args:
unet (`bool`, defaults to `True`): To apply fusion on the UNet.
vae (`bool`, defaults to `True`): To apply fusion on the VAE.
"""
self.fusing_unet = False
self.fusing_vae = False
if unet:
self.fusing_unet = True
self.unet.fuse_qkv_projections()
self.unet.set_attn_processor(FusedAttnProcessor2_0())
if vae:
if not isinstance(self.vae, AutoencoderKL):
raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
self.fusing_vae = True
self.vae.fuse_qkv_projections()
self.vae.set_attn_processor(FusedAttnProcessor2_0())
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
"""Disable QKV projection fusion if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
Args:
unet (`bool`, defaults to `True`): To apply fusion on the UNet.
vae (`bool`, defaults to `True`): To apply fusion on the VAE.
"""
if unet:
if not self.fusing_unet:
logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
else:
self.unet.unfuse_qkv_projections()
self.fusing_unet = False
if vae:
if not self.fusing_vae:
logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
else:
self.vae.unfuse_qkv_projections()
self.fusing_vae = False
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
assert len(w.shape) == 1
w = w * 1000.0
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
@property
def guidance_scale(self):
return self._guidance_scale
@property
def guidance_rescale(self):
return self._guidance_rescale
@property
def clip_skip(self):
return self._clip_skip
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def num_timesteps(self):
return self._num_timesteps
@property
def interrupt(self):
return self._interrupt
def _compute_max_attention_per_index(
self,
attention_maps: torch.Tensor,
indices_to_alter: List[int],
smooth_attentions: bool = False,
sigma: float = 0.5,
kernel_size: int = 3,
normalize_eot: bool = False,
bboxes: List[int] = None,
L: int = 1,
P: float = 0.2,
) -> List[torch.Tensor]:
"""Computes the maximum attention value for each of the tokens we wish to alter."""
last_idx = -1
if normalize_eot:
prompt = self.prompt
if isinstance(self.prompt, list):
prompt = self.prompt[0]
last_idx = len(self.tokenizer(prompt)["input_ids"]) - 1
attention_for_text = attention_maps[:, :, 1:last_idx]
attention_for_text *= 100
attention_for_text = torch.nn.functional.softmax(attention_for_text, dim=-1)
# Shift indices since we removed the first token "1:last_idx"
indices_to_alter = [index - 1 for index in indices_to_alter]
# Extract the maximum values
max_indices_list_fg = []
max_indices_list_bg = []
dist_x = []
dist_y = []
cnt = 0
for i in indices_to_alter:
image = attention_for_text[:, :, i]
# TODO
# box = [max(round(b / (512 / image.shape[0])), 0) for b in bboxes[cnt]]
# x1, y1, x2, y2 = box
H, W = image.shape
x1 = min(max(round(bboxes[cnt][0] * W), 0), W)
y1 = min(max(round(bboxes[cnt][1] * H), 0), H)
x2 = min(max(round(bboxes[cnt][2] * W), 0), W)
y2 = min(max(round(bboxes[cnt][3] * H), 0), H)
box = [x1, y1, x2, y2]
cnt += 1
# coordinates to masks
obj_mask = torch.zeros_like(image)
ones_mask = torch.ones([y2 - y1, x2 - x1], dtype=obj_mask.dtype).to(obj_mask.device)
obj_mask[y1:y2, x1:x2] = ones_mask
bg_mask = 1 - obj_mask
if smooth_attentions:
smoothing = GaussianSmoothing(channels=1, kernel_size=kernel_size, sigma=sigma, dim=2).to(image.device)
input = F.pad(image.unsqueeze(0).unsqueeze(0), (1, 1, 1, 1), mode="reflect")
image = smoothing(input).squeeze(0).squeeze(0)
# Inner-Box constraint
k = (obj_mask.sum() * P).long()
max_indices_list_fg.append((image * obj_mask).reshape(-1).topk(k)[0].mean())
# Outer-Box constraint
k = (bg_mask.sum() * P).long()
max_indices_list_bg.append((image * bg_mask).reshape(-1).topk(k)[0].mean())
# Corner Constraint
gt_proj_x = torch.max(obj_mask, dim=0)[0]
gt_proj_y = torch.max(obj_mask, dim=1)[0]
corner_mask_x = torch.zeros_like(gt_proj_x)
corner_mask_y = torch.zeros_like(gt_proj_y)
# create gt according to the number config.L
N = gt_proj_x.shape[0]
corner_mask_x[max(box[0] - L, 0) : min(box[0] + L + 1, N)] = 1.0
corner_mask_x[max(box[2] - L, 0) : min(box[2] + L + 1, N)] = 1.0
corner_mask_y[max(box[1] - L, 0) : min(box[1] + L + 1, N)] = 1.0
corner_mask_y[max(box[3] - L, 0) : min(box[3] + L + 1, N)] = 1.0
dist_x.append((F.l1_loss(image.max(dim=0)[0], gt_proj_x, reduction="none") * corner_mask_x).mean())
dist_y.append((F.l1_loss(image.max(dim=1)[0], gt_proj_y, reduction="none") * corner_mask_y).mean())
return max_indices_list_fg, max_indices_list_bg, dist_x, dist_y
def _aggregate_and_get_max_attention_per_token(
self,
attention_store: AttentionStore,
indices_to_alter: List[int],
attention_res: int = 16,
smooth_attentions: bool = False,
sigma: float = 0.5,
kernel_size: int = 3,
normalize_eot: bool = False,
bboxes: List[int] = None,
L: int = 1,
P: float = 0.2,
):
"""Aggregates the attention for each token and computes the max activation value for each token to alter."""
attention_maps = aggregate_attention(
attention_store=attention_store,
res=attention_res,
from_where=("up", "down", "mid"),
is_cross=True,
select=0,
)
max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y = self._compute_max_attention_per_index(
attention_maps=attention_maps,
indices_to_alter=indices_to_alter,
smooth_attentions=smooth_attentions,
sigma=sigma,
kernel_size=kernel_size,
normalize_eot=normalize_eot,
bboxes=bboxes,
L=L,
P=P,
)
return max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y
@staticmethod
def _compute_loss(
max_attention_per_index_fg: List[torch.Tensor],
max_attention_per_index_bg: List[torch.Tensor],
dist_x: List[torch.Tensor],
dist_y: List[torch.Tensor],
return_losses: bool = False,
) -> torch.Tensor:
"""Computes the attend-and-excite loss using the maximum attention value for each token."""
losses_fg = [max(0, 1.0 - curr_max) for curr_max in max_attention_per_index_fg]
losses_bg = [max(0, curr_max) for curr_max in max_attention_per_index_bg]
loss = sum(losses_fg) + sum(losses_bg) + sum(dist_x) + sum(dist_y)
if return_losses:
return max(losses_fg), losses_fg
else:
return max(losses_fg), loss
@staticmethod
def _update_latent(latents: torch.Tensor, loss: torch.Tensor, step_size: float) -> torch.Tensor:
"""Update the latent according to the computed loss."""
grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents], retain_graph=True)[0]
latents = latents - step_size * grad_cond
return latents
def _perform_iterative_refinement_step(
self,
latents: torch.Tensor,
indices_to_alter: List[int],
loss_fg: torch.Tensor,
threshold: float,
text_embeddings: torch.Tensor,
text_input,
attention_store: AttentionStore,
step_size: float,
t: int,
attention_res: int = 16,
smooth_attentions: bool = True,
sigma: float = 0.5,
kernel_size: int = 3,
max_refinement_steps: int = 20,
normalize_eot: bool = False,
bboxes: List[int] = None,
L: int = 1,
P: float = 0.2,
):
"""
Performs the iterative latent refinement introduced in the paper. Here, we continuously update the latent
code according to our loss objective until the given threshold is reached for all tokens.
"""
iteration = 0
target_loss = max(0, 1.0 - threshold)
while loss_fg > target_loss:
iteration += 1
latents = latents.clone().detach().requires_grad_(True)
# noise_pred_text = self.unet(latents, t, encoder_hidden_states=text_embeddings[1].unsqueeze(0)).sample
self.unet.zero_grad()
# Get max activation value for each subject token
(
max_attention_per_index_fg,
max_attention_per_index_bg,
dist_x,
dist_y,
) = self._aggregate_and_get_max_attention_per_token(
attention_store=attention_store,
indices_to_alter=indices_to_alter,
attention_res=attention_res,
smooth_attentions=smooth_attentions,
sigma=sigma,
kernel_size=kernel_size,
normalize_eot=normalize_eot,
bboxes=bboxes,
L=L,
P=P,
)
loss_fg, losses_fg = self._compute_loss(
max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y, return_losses=True
)
if loss_fg != 0:
latents = self._update_latent(latents, loss_fg, step_size)
# with torch.no_grad():
# noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=text_embeddings[0].unsqueeze(0)).sample
# noise_pred_text = self.unet(latents, t, encoder_hidden_states=text_embeddings[1].unsqueeze(0)).sample
# try:
# low_token = np.argmax([l.item() if not isinstance(l, int) else l for l in losses_fg])
# except Exception as e:
# print(e) # catch edge case :)
# low_token = np.argmax(losses_fg)
# low_word = self.tokenizer.decode(text_input.input_ids[0][indices_to_alter[low_token]])
# print(f'\t Try {iteration}. {low_word} has a max attention of {max_attention_per_index_fg[low_token]}')
if iteration >= max_refinement_steps:
# print(f'\t Exceeded max number of iterations ({max_refinement_steps})! '
# f'Finished with a max attention of {max_attention_per_index_fg[low_token]}')
break
# Run one more time but don't compute gradients and update the latents.
# We just need to compute the new loss - the grad update will occur below
latents = latents.clone().detach().requires_grad_(True)
# noise_pred_text = self.unet(latents, t, encoder_hidden_states=text_embeddings[1].unsqueeze(0)).sample
self.unet.zero_grad()
# Get max activation value for each subject token
(
max_attention_per_index_fg,
max_attention_per_index_bg,
dist_x,
dist_y,
) = self._aggregate_and_get_max_attention_per_token(
attention_store=attention_store,
indices_to_alter=indices_to_alter,
attention_res=attention_res,
smooth_attentions=smooth_attentions,
sigma=sigma,
kernel_size=kernel_size,
normalize_eot=normalize_eot,
bboxes=bboxes,
L=L,
P=P,
)
loss_fg, losses_fg = self._compute_loss(
max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y, return_losses=True
)
# print(f"\t Finished with loss of: {loss_fg}")
return loss_fg, latents, max_attention_per_index_fg
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
boxdiff_phrases: List[str] = None,
boxdiff_boxes: List[List[float]] = None, # TODO
boxdiff_kwargs: Optional[Dict[str, Any]] = {
"attention_res": 16,
"P": 0.2,
"L": 1,
"max_iter_to_alter": 25,
"loss_thresholds": {0: 0.05, 10: 0.5, 20: 0.8},
"scale_factor": 20,
"scale_range": (1.0, 0.5),
"smooth_attentions": True,
"sigma": 0.5,
"kernel_size": 3,
"refine": False,
"normalize_eot": True,
},
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
timesteps: List[int] = None,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
boxdiff_attention_res (`int`, *optional*, defaults to 16):
The resolution of the attention maps used for computing the BoxDiff loss.
boxdiff_P (`float`, *optional*, defaults to 0.2):
boxdiff_L (`int`, *optional*, defaults to 1):
The number of pixels around the corner to be selected in BoxDiff loss.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
timesteps (`List[int]`, *optional*):
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
passed will be used. Must be in descending order.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, *optional*):
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
`callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
# -1. Register attention control (for BoxDiff)
attention_store = AttentionStore()
register_attention_control(self, attention_store)
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# to deal with lora scaling and other possible forward hooks
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
boxdiff_phrases,
boxdiff_boxes,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
callback_on_step_end_tensor_inputs,
)
self.prompt = prompt
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._interrupt = False
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
)
text_inputs, prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
clip_skip=self.clip_skip,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
if ip_adapter_image is not None:
output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
image_embeds, negative_image_embeds = self.encode_image(
ip_adapter_image, device, num_images_per_prompt, output_hidden_state
)
if self.do_classifier_free_guidance:
image_embeds = torch.cat([negative_image_embeds, image_embeds])
# 4. Prepare timesteps
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6.1 Add image embeds for IP-Adapter
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
# 6.2 Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
# 6.3 Prepare BoxDiff inputs
# a) Indices to alter
input_ids = self.tokenizer(prompt)["input_ids"]
decoded = [self.tokenizer.decode([t]) for t in input_ids]
indices_to_alter = []
bboxes = []
for phrase, box in zip(boxdiff_phrases, boxdiff_boxes):
# it could happen that phrase does not correspond a single token?
if phrase not in decoded:
continue
indices_to_alter.append(decoded.index(phrase))
bboxes.append(box)
# b) A bunch of hyperparameters
attention_res = boxdiff_kwargs.get("attention_res", 16)
smooth_attentions = boxdiff_kwargs.get("smooth_attentions", True)
sigma = boxdiff_kwargs.get("sigma", 0.5)
kernel_size = boxdiff_kwargs.get("kernel_size", 3)
L = boxdiff_kwargs.get("L", 1)
P = boxdiff_kwargs.get("P", 0.2)
thresholds = boxdiff_kwargs.get("loss_thresholds", {0: 0.05, 10: 0.5, 20: 0.8})
max_iter_to_alter = boxdiff_kwargs.get("max_iter_to_alter", len(self.scheduler.timesteps) + 1)
scale_factor = boxdiff_kwargs.get("scale_factor", 20)
refine = boxdiff_kwargs.get("refine", False)
normalize_eot = boxdiff_kwargs.get("normalize_eot", True)
scale_range = boxdiff_kwargs.get("scale_range", (1.0, 0.5))
scale_range = np.linspace(scale_range[0], scale_range[1], len(self.scheduler.timesteps))
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
if self.interrupt:
continue
# BoxDiff optimization
with torch.enable_grad():
latents = latents.clone().detach().requires_grad_(True)
# Forward pass of denoising with text conditioning
noise_pred_text = self.unet(
latents,
t,
encoder_hidden_states=prompt_embeds[1].unsqueeze(0),
cross_attention_kwargs=cross_attention_kwargs,
).sample
self.unet.zero_grad()
# Get max activation value for each subject token
(
max_attention_per_index_fg,
max_attention_per_index_bg,
dist_x,
dist_y,
) = self._aggregate_and_get_max_attention_per_token(
attention_store=attention_store,
indices_to_alter=indices_to_alter,
attention_res=attention_res,
smooth_attentions=smooth_attentions,
sigma=sigma,
kernel_size=kernel_size,
normalize_eot=normalize_eot,
bboxes=bboxes,
L=L,
P=P,
)
loss_fg, loss = self._compute_loss(
max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y
)
# Refinement from attend-and-excite (not necessary)
if refine and i in thresholds.keys() and loss_fg > 1.0 - thresholds[i]:
del noise_pred_text
torch.cuda.empty_cache()
loss_fg, latents, max_attention_per_index_fg = self._perform_iterative_refinement_step(
latents=latents,
indices_to_alter=indices_to_alter,
loss_fg=loss_fg,
threshold=thresholds[i],
text_embeddings=prompt_embeds,
text_input=text_inputs,
attention_store=attention_store,
step_size=scale_factor * np.sqrt(scale_range[i]),
t=t,
attention_res=attention_res,
smooth_attentions=smooth_attentions,
sigma=sigma,
kernel_size=kernel_size,
normalize_eot=normalize_eot,
bboxes=bboxes,
L=L,
P=P,
)
# Perform gradient update
if i < max_iter_to_alter:
_, loss = self._compute_loss(
max_attention_per_index_fg, max_attention_per_index_bg, dist_x, dist_y
)
if loss != 0:
latents = self._update_latent(
latents=latents, loss=loss, step_size=scale_factor * np.sqrt(scale_range[i])
)
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
0
]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| diffusers/examples/community/pipeline_stable_diffusion_boxdiff.py/0 | {
"file_path": "diffusers/examples/community/pipeline_stable_diffusion_boxdiff.py",
"repo_id": "diffusers",
"token_count": 36200
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import warnings
from typing import Callable, List, Optional, Union
import torch
from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser
from diffusers import DiffusionPipeline, LMSDiscreteScheduler, StableDiffusionMixin
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class ModelWrapper:
def __init__(self, model, alphas_cumprod):
self.model = model
self.alphas_cumprod = alphas_cumprod
def apply_model(self, *args, **kwargs):
if len(args) == 3:
encoder_hidden_states = args[-1]
args = args[:2]
if kwargs.get("cond", None) is not None:
encoder_hidden_states = kwargs.pop("cond")
return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample
class StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae,
text_encoder,
tokenizer,
unet,
scheduler,
safety_checker,
feature_extractor,
):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
# get correct sigmas from LMS
scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
model = ModelWrapper(unet, scheduler.alphas_cumprod)
if scheduler.config.prediction_type == "v_prediction":
self.k_diffusion_model = CompVisVDenoiser(model)
else:
self.k_diffusion_model = CompVisDenoiser(model)
def set_sampler(self, scheduler_type: str):
warnings.warn("The `set_sampler` method is deprecated, please use `set_scheduler` instead.")
return self.set_scheduler(scheduler_type)
def set_scheduler(self, scheduler_type: str):
library = importlib.import_module("k_diffusion")
sampling = getattr(library, "sampling")
self.sampler = getattr(sampling, scheduler_type)
def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `list(int)`):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
"""
batch_size = len(prompt) if isinstance(prompt, list) else 1
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
if not torch.equal(text_input_ids, untruncated_ids):
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
text_embeddings = self.text_encoder(
text_input_ids.to(device),
attention_mask=attention_mask,
)
text_embeddings = text_embeddings[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = text_embeddings.shape
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = text_input_ids.shape[-1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
uncond_embeddings = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
uncond_embeddings = uncond_embeddings[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = uncond_embeddings.shape[1]
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
return text_embeddings
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
else:
has_nsfw_concept = None
return image, has_nsfw_concept
def decode_latents(self, latents):
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
def check_inputs(self, prompt, height, width, callback_steps):
if not isinstance(prompt, str) and not isinstance(prompt, list):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // 8, width // 8)
if latents is None:
if device.type == "mps":
# randn does not work reproducibly on mps
latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
else:
latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
return latents
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
height: int = 512,
width: int = 512,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[torch.Generator] = None,
latents: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
# 1. Check inputs. Raise error if not correct
self.check_inputs(prompt, height, width, callback_steps)
# 2. Define call parameters
batch_size = 1 if isinstance(prompt, str) else len(prompt)
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = True
if guidance_scale <= 1.0:
raise ValueError("has to use guidance_scale")
# 3. Encode input prompt
text_embeddings = self._encode_prompt(
prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=text_embeddings.device)
sigmas = self.scheduler.sigmas
sigmas = sigmas.to(text_embeddings.dtype)
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
text_embeddings.dtype,
device,
generator,
latents,
)
latents = latents * sigmas[0]
self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device)
def model_fn(x, t):
latent_model_input = torch.cat([x] * 2)
noise_pred = self.k_diffusion_model(latent_model_input, t, cond=text_embeddings)
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
return noise_pred
latents = self.sampler(model_fn, latents, sigmas)
# 8. Post-processing
image = self.decode_latents(latents)
# 9. Run safety checker
image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
# 10. Convert to PIL
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| diffusers/examples/community/sd_text2img_k_diffusion.py/0 | {
"file_path": "diffusers/examples/community/sd_text2img_k_diffusion.py",
"repo_id": "diffusers",
"token_count": 8535
} |
# Based on stable_diffusion_xl_reference.py and stable_diffusion_controlnet_reference.py
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
from diffusers import StableDiffusionXLControlNetPipeline
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
from diffusers.image_processor import PipelineImageInput
from diffusers.models import ControlNetModel
from diffusers.models.attention import BasicTransformerBlock
from diffusers.models.unets.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
from diffusers.utils import PIL_INTERPOLATION, deprecate, logging, replace_example_docstring
from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> # !pip install opencv-python transformers accelerate
>>> from diffusers import ControlNetModel, AutoencoderKL
>>> from diffusers.schedulers import UniPCMultistepScheduler
>>> from diffusers.utils import load_image
>>> import numpy as np
>>> import torch
>>> import cv2
>>> from PIL import Image
>>> # download an image for the Canny controlnet
>>> canny_image = load_image(
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl_reference_input_cat.jpg"
... )
>>> # download an image for the Reference controlnet
>>> ref_image = load_image(
... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
... )
>>> # initialize the models and pipeline
>>> controlnet_conditioning_scale = 0.5 # recommended for good generalization
>>> controlnet = ControlNetModel.from_pretrained(
... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
... )
>>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
>>> pipe = StableDiffusionXLControlNetReferencePipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16
... ).to("cuda:0")
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
>>> # get canny image
>>> image = np.array(canny_image)
>>> image = cv2.Canny(image, 100, 200)
>>> image = image[:, :, None]
>>> image = np.concatenate([image, image, image], axis=2)
>>> canny_image = Image.fromarray(image)
>>> # generate image
>>> image = pipe(
... prompt="a cat",
... num_inference_steps=20,
... controlnet_conditioning_scale=controlnet_conditioning_scale,
... image=canny_image,
... ref_image=ref_image,
... reference_attn=True,
... reference_adain=True
... style_fidelity=1.0,
... generator=torch.Generator("cuda").manual_seed(42)
... ).images[0]
```
"""
def torch_dfs(model: torch.nn.Module):
result = [model]
for child in model.children():
result += torch_dfs(child)
return result
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
def retrieve_timesteps(
scheduler,
num_inference_steps: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
timesteps: Optional[List[int]] = None,
sigmas: Optional[List[float]] = None,
**kwargs,
):
r"""
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
Args:
scheduler (`SchedulerMixin`):
The scheduler to get timesteps from.
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
must be `None`.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
timesteps (`List[int]`, *optional*):
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
`num_inference_steps` and `sigmas` must be `None`.
sigmas (`List[float]`, *optional*):
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
`num_inference_steps` and `timesteps` must be `None`.
Returns:
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
second element is the number of inference steps.
"""
if timesteps is not None and sigmas is not None:
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
if timesteps is not None:
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accepts_timesteps:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" timestep schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
elif sigmas is not None:
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accept_sigmas:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" sigmas schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
else:
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
timesteps = scheduler.timesteps
return timesteps, num_inference_steps
class StableDiffusionXLControlNetReferencePipeline(StableDiffusionXLControlNetPipeline):
r"""
Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]):
Second frozen text-encoder
([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
tokenizer_2 ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
Provides additional conditioning to the `unet` during the denoising process. If you set multiple
ControlNets as a list, the outputs from each ControlNet are added together to create one combined
additional conditioning.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
Whether the negative prompt embeddings should always be set to 0. Also see the config of
`stabilityai/stable-diffusion-xl-base-1-0`.
add_watermarker (`bool`, *optional*):
Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to
watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no
watermarker is used.
"""
def prepare_ref_latents(self, refimage, batch_size, dtype, device, generator, do_classifier_free_guidance):
refimage = refimage.to(device=device)
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
if needs_upcasting:
self.upcast_vae()
refimage = refimage.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
if refimage.dtype != self.vae.dtype:
refimage = refimage.to(dtype=self.vae.dtype)
# encode the mask image into latents space so we can concatenate it to the latents
if isinstance(generator, list):
ref_image_latents = [
self.vae.encode(refimage[i : i + 1]).latent_dist.sample(generator=generator[i])
for i in range(batch_size)
]
ref_image_latents = torch.cat(ref_image_latents, dim=0)
else:
ref_image_latents = self.vae.encode(refimage).latent_dist.sample(generator=generator)
ref_image_latents = self.vae.config.scaling_factor * ref_image_latents
# duplicate mask and ref_image_latents for each generation per prompt, using mps friendly method
if ref_image_latents.shape[0] < batch_size:
if not batch_size % ref_image_latents.shape[0] == 0:
raise ValueError(
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
f" to a total batch size of {batch_size}, but {ref_image_latents.shape[0]} images were passed."
" Make sure the number of images that you pass is divisible by the total requested batch size."
)
ref_image_latents = ref_image_latents.repeat(batch_size // ref_image_latents.shape[0], 1, 1, 1)
ref_image_latents = torch.cat([ref_image_latents] * 2) if do_classifier_free_guidance else ref_image_latents
# aligning device to prevent device errors when concating it with the latent model input
ref_image_latents = ref_image_latents.to(device=device, dtype=dtype)
# cast back to fp16 if needed
if needs_upcasting:
self.vae.to(dtype=torch.float16)
return ref_image_latents
def prepare_ref_image(
self,
image,
width,
height,
batch_size,
num_images_per_prompt,
device,
dtype,
do_classifier_free_guidance=False,
guess_mode=False,
):
if not isinstance(image, torch.Tensor):
if isinstance(image, PIL.Image.Image):
image = [image]
if isinstance(image[0], PIL.Image.Image):
images = []
for image_ in image:
image_ = image_.convert("RGB")
image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
image_ = np.array(image_)
image_ = image_[None, :]
images.append(image_)
image = images
image = np.concatenate(image, axis=0)
image = np.array(image).astype(np.float32) / 255.0
image = (image - 0.5) / 0.5
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
elif isinstance(image[0], torch.Tensor):
image = torch.stack(image, dim=0)
image_batch_size = image.shape[0]
if image_batch_size == 1:
repeat_by = batch_size
else:
repeat_by = num_images_per_prompt
image = image.repeat_interleave(repeat_by, dim=0)
image = image.to(device=device, dtype=dtype)
if do_classifier_free_guidance and not guess_mode:
image = torch.cat([image] * 2)
return image
def check_ref_inputs(
self,
ref_image,
reference_guidance_start,
reference_guidance_end,
style_fidelity,
reference_attn,
reference_adain,
):
ref_image_is_pil = isinstance(ref_image, PIL.Image.Image)
ref_image_is_tensor = isinstance(ref_image, torch.Tensor)
if not ref_image_is_pil and not ref_image_is_tensor:
raise TypeError(
f"ref image must be passed and be one of PIL image or torch tensor, but is {type(ref_image)}"
)
if not reference_attn and not reference_adain:
raise ValueError("`reference_attn` or `reference_adain` must be True.")
if style_fidelity < 0.0:
raise ValueError(f"style fidelity: {style_fidelity} can't be smaller than 0.")
if style_fidelity > 1.0:
raise ValueError(f"style fidelity: {style_fidelity} can't be larger than 1.0.")
if reference_guidance_start >= reference_guidance_end:
raise ValueError(
f"reference guidance start: {reference_guidance_start} cannot be larger or equal to reference guidance end: {reference_guidance_end}."
)
if reference_guidance_start < 0.0:
raise ValueError(f"reference guidance start: {reference_guidance_start} can't be smaller than 0.")
if reference_guidance_end > 1.0:
raise ValueError(f"reference guidance end: {reference_guidance_end} can't be larger than 1.0.")
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: PipelineImageInput = None,
ref_image: Union[torch.Tensor, PIL.Image.Image] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
timesteps: List[int] = None,
sigmas: List[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
pooled_prompt_embeds: Optional[torch.Tensor] = None,
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
guess_mode: bool = False,
control_guidance_start: Union[float, List[float]] = 0.0,
control_guidance_end: Union[float, List[float]] = 1.0,
original_size: Tuple[int, int] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
negative_original_size: Optional[Tuple[int, int]] = None,
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
negative_target_size: Optional[Tuple[int, int]] = None,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
attention_auto_machine_weight: float = 1.0,
gn_auto_machine_weight: float = 1.0,
reference_guidance_start: float = 0.0,
reference_guidance_end: float = 1.0,
style_fidelity: float = 0.5,
reference_attn: bool = True,
reference_adain: bool = True,
**kwargs,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders.
image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
`List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
images must be passed as a list such that each element of the list can be correctly batched for input
to a single ControlNet.
ref_image (`torch.Tensor`, `PIL.Image.Image`):
The Reference Control input condition. Reference Control uses this input condition to generate guidance to Unet. If
the type is specified as `Torch.Tensor`, it is passed to Reference Control as is. `PIL.Image.Image` can
also be accepted as an image.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image. Anything below 512 pixels won't work well for
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
and checkpoints that are not specifically fine-tuned on low resolutions.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image. Anything below 512 pixels won't work well for
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
and checkpoints that are not specifically fine-tuned on low resolutions.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
timesteps (`List[int]`, *optional*):
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
passed will be used. Must be in descending order.
sigmas (`List[float]`, *optional*):
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
will be used.
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
guidance_scale (`float`, *optional*, defaults to 5.0):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
pooled_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, pooled text embeddings are generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
provided, embeddings are computed from the `ip_adapter_image` input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
the corresponding scale as a list.
guess_mode (`bool`, *optional*, defaults to `False`):
The ControlNet encoder tries to recognize the content of the input image even if you remove all
prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
The percentage of total steps at which the ControlNet starts applying.
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
The percentage of total steps at which the ControlNet stops applying.
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
For most cases, `target_size` should be set to the desired height and width of the generated image. If
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
To negatively condition the generation process based on a target image resolution. It should be as same
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
attention_auto_machine_weight (`float`):
Weight of using reference query for self attention's context.
If attention_auto_machine_weight=1.0, use reference query for all self attention's context.
gn_auto_machine_weight (`float`):
Weight of using reference adain. If gn_auto_machine_weight=2.0, use all reference adain plugins.
reference_guidance_start (`float`, *optional*, defaults to 0.0):
The percentage of total steps at which the reference ControlNet starts applying.
reference_guidance_end (`float`, *optional*, defaults to 1.0):
The percentage of total steps at which the reference ControlNet stops applying.
style_fidelity (`float`):
style fidelity of ref_uncond_xt. If style_fidelity=1.0, control more important,
elif style_fidelity=0.0, prompt more important, else balanced.
reference_attn (`bool`):
Whether to use reference query for self attention's context.
reference_adain (`bool`):
Whether to use reference adain.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned containing the output images.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
# align format for control guidance
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
control_guidance_start, control_guidance_end = (
mult * [control_guidance_start],
mult * [control_guidance_end],
)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
prompt_2,
image,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
ip_adapter_image,
ip_adapter_image_embeds,
negative_pooled_prompt_embeds,
controlnet_conditioning_scale,
control_guidance_start,
control_guidance_end,
callback_on_step_end_tensor_inputs,
)
self.check_ref_inputs(
ref_image,
reference_guidance_start,
reference_guidance_end,
style_fidelity,
reference_attn,
reference_adain,
)
self._guidance_scale = guidance_scale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._denoising_end = denoising_end
self._interrupt = False
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
global_pool_conditions = (
controlnet.config.global_pool_conditions
if isinstance(controlnet, ControlNetModel)
else controlnet.nets[0].config.global_pool_conditions
)
guess_mode = guess_mode or global_pool_conditions
# 3.1 Encode input prompt
text_encoder_lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt,
prompt_2,
device,
num_images_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
clip_skip=self.clip_skip,
)
# 3.2 Encode ip_adapter_image
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
image_embeds = self.prepare_ip_adapter_image_embeds(
ip_adapter_image,
ip_adapter_image_embeds,
device,
batch_size * num_images_per_prompt,
self.do_classifier_free_guidance,
)
# 4. Prepare image
if isinstance(controlnet, ControlNetModel):
image = self.prepare_image(
image=image,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=self.do_classifier_free_guidance,
guess_mode=guess_mode,
)
height, width = image.shape[-2:]
elif isinstance(controlnet, MultiControlNetModel):
images = []
for image_ in image:
image_ = self.prepare_image(
image=image_,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=self.do_classifier_free_guidance,
guess_mode=guess_mode,
)
images.append(image_)
image = images
height, width = image[0].shape[-2:]
else:
assert False
# 5. Preprocess reference image
ref_image = self.prepare_ref_image(
image=ref_image,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=prompt_embeds.dtype,
)
# 6. Prepare timesteps
timesteps, num_inference_steps = retrieve_timesteps(
self.scheduler, num_inference_steps, device, timesteps, sigmas
)
self._num_timesteps = len(timesteps)
# 7. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 7.5 Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
# 8. Prepare reference latent variables
ref_image_latents = self.prepare_ref_latents(
ref_image,
batch_size * num_images_per_prompt,
prompt_embeds.dtype,
device,
generator,
self.do_classifier_free_guidance,
)
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 9.1 Create tensor stating which controlnets to keep
controlnet_keep = []
reference_keeps = []
for i in range(len(timesteps)):
keeps = [
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
for s, e in zip(control_guidance_start, control_guidance_end)
]
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
reference_keep = 1.0 - float(
i / len(timesteps) < reference_guidance_start or (i + 1) / len(timesteps) > reference_guidance_end
)
reference_keeps.append(reference_keep)
# 9.2 Modify self attention and group norm
MODE = "write"
uc_mask = (
torch.Tensor([1] * batch_size * num_images_per_prompt + [0] * batch_size * num_images_per_prompt)
.type_as(ref_image_latents)
.bool()
)
do_classifier_free_guidance = self.do_classifier_free_guidance
def hacked_basic_transformer_inner_forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
):
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
else:
norm_hidden_states = self.norm1(hidden_states)
# 1. Self-Attention
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
if self.only_cross_attention:
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
else:
if MODE == "write":
self.bank.append(norm_hidden_states.detach().clone())
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if MODE == "read":
if attention_auto_machine_weight > self.attn_weight:
attn_output_uc = self.attn1(
norm_hidden_states,
encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),
# attention_mask=attention_mask,
**cross_attention_kwargs,
)
attn_output_c = attn_output_uc.clone()
if do_classifier_free_guidance and style_fidelity > 0:
attn_output_c[uc_mask] = self.attn1(
norm_hidden_states[uc_mask],
encoder_hidden_states=norm_hidden_states[uc_mask],
**cross_attention_kwargs,
)
attn_output = style_fidelity * attn_output_c + (1.0 - style_fidelity) * attn_output_uc
self.bank.clear()
else:
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
hidden_states = attn_output + hidden_states
if self.attn2 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
# 2. Cross-Attention
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# 3. Feed-forward
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
ff_output = self.ff(norm_hidden_states)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
hidden_states = ff_output + hidden_states
return hidden_states
def hacked_mid_forward(self, *args, **kwargs):
eps = 1e-6
x = self.original_forward(*args, **kwargs)
if MODE == "write":
if gn_auto_machine_weight >= self.gn_weight:
var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
self.mean_bank.append(mean)
self.var_bank.append(var)
if MODE == "read":
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))
var_acc = sum(self.var_bank) / float(len(self.var_bank))
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
x_uc = (((x - mean) / std) * std_acc) + mean_acc
x_c = x_uc.clone()
if do_classifier_free_guidance and style_fidelity > 0:
x_c[uc_mask] = x[uc_mask]
x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc
self.mean_bank = []
self.var_bank = []
return x
def hack_CrossAttnDownBlock2D_forward(
self,
hidden_states: torch.Tensor,
temb: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
):
eps = 1e-6
# TODO(Patrick, William) - attention mask is not used
output_states = ()
for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
if MODE == "write":
if gn_auto_machine_weight >= self.gn_weight:
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
self.mean_bank.append([mean])
self.var_bank.append([var])
if MODE == "read":
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
hidden_states_c = hidden_states_uc.clone()
if do_classifier_free_guidance and style_fidelity > 0:
hidden_states_c[uc_mask] = hidden_states[uc_mask]
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
output_states = output_states + (hidden_states,)
if MODE == "read":
self.mean_bank = []
self.var_bank = []
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states = output_states + (hidden_states,)
return hidden_states, output_states
def hacked_DownBlock2D_forward(self, hidden_states, temb=None, *args, **kwargs):
eps = 1e-6
output_states = ()
for i, resnet in enumerate(self.resnets):
hidden_states = resnet(hidden_states, temb)
if MODE == "write":
if gn_auto_machine_weight >= self.gn_weight:
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
self.mean_bank.append([mean])
self.var_bank.append([var])
if MODE == "read":
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
hidden_states_c = hidden_states_uc.clone()
if do_classifier_free_guidance and style_fidelity > 0:
hidden_states_c[uc_mask] = hidden_states[uc_mask]
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
output_states = output_states + (hidden_states,)
if MODE == "read":
self.mean_bank = []
self.var_bank = []
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states = output_states + (hidden_states,)
return hidden_states, output_states
def hacked_CrossAttnUpBlock2D_forward(
self,
hidden_states: torch.Tensor,
res_hidden_states_tuple: Tuple[torch.Tensor, ...],
temb: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
):
eps = 1e-6
# TODO(Patrick, William) - attention mask is not used
for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
if MODE == "write":
if gn_auto_machine_weight >= self.gn_weight:
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
self.mean_bank.append([mean])
self.var_bank.append([var])
if MODE == "read":
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
hidden_states_c = hidden_states_uc.clone()
if do_classifier_free_guidance and style_fidelity > 0:
hidden_states_c[uc_mask] = hidden_states[uc_mask]
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
if MODE == "read":
self.mean_bank = []
self.var_bank = []
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
def hacked_UpBlock2D_forward(
self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, *args, **kwargs
):
eps = 1e-6
for i, resnet in enumerate(self.resnets):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
if MODE == "write":
if gn_auto_machine_weight >= self.gn_weight:
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
self.mean_bank.append([mean])
self.var_bank.append([var])
if MODE == "read":
if len(self.mean_bank) > 0 and len(self.var_bank) > 0:
var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)
std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5
mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))
var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))
std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5
hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc
hidden_states_c = hidden_states_uc.clone()
if do_classifier_free_guidance and style_fidelity > 0:
hidden_states_c[uc_mask] = hidden_states[uc_mask]
hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc
if MODE == "read":
self.mean_bank = []
self.var_bank = []
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
if reference_attn:
attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock)]
attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])
for i, module in enumerate(attn_modules):
module._original_inner_forward = module.forward
module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)
module.bank = []
module.attn_weight = float(i) / float(len(attn_modules))
if reference_adain:
gn_modules = [self.unet.mid_block]
self.unet.mid_block.gn_weight = 0
down_blocks = self.unet.down_blocks
for w, module in enumerate(down_blocks):
module.gn_weight = 1.0 - float(w) / float(len(down_blocks))
gn_modules.append(module)
up_blocks = self.unet.up_blocks
for w, module in enumerate(up_blocks):
module.gn_weight = float(w) / float(len(up_blocks))
gn_modules.append(module)
for i, module in enumerate(gn_modules):
if getattr(module, "original_forward", None) is None:
module.original_forward = module.forward
if i == 0:
# mid_block
module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)
elif isinstance(module, CrossAttnDownBlock2D):
module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)
elif isinstance(module, DownBlock2D):
module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)
elif isinstance(module, CrossAttnUpBlock2D):
module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)
elif isinstance(module, UpBlock2D):
module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)
module.mean_bank = []
module.var_bank = []
module.gn_weight *= 2
# 9.2 Prepare added time ids & embeddings
if isinstance(image, list):
original_size = original_size or image[0].shape[-2:]
else:
original_size = original_size or image.shape[-2:]
target_size = target_size or (height, width)
add_text_embeds = pooled_prompt_embeds
if self.text_encoder_2 is None:
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
else:
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
add_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
if negative_original_size is not None and negative_target_size is not None:
negative_add_time_ids = self._get_add_time_ids(
negative_original_size,
negative_crops_coords_top_left,
negative_target_size,
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
else:
negative_add_time_ids = add_time_ids
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
# 10. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
# 10.1 Apply denoising_end
if (
self.denoising_end is not None
and isinstance(self.denoising_end, float)
and self.denoising_end > 0
and self.denoising_end < 1
):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
is_unet_compiled = is_compiled_module(self.unet)
is_controlnet_compiled = is_compiled_module(self.controlnet)
is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
if self.interrupt:
continue
# Relevant thread:
# https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
torch._inductor.cudagraph_mark_step_begin()
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
# controlnet(s) inference
if guess_mode and self.do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
control_model_input = latents
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
controlnet_added_cond_kwargs = {
"text_embeds": add_text_embeds.chunk(2)[1],
"time_ids": add_time_ids.chunk(2)[1],
}
else:
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
controlnet_added_cond_kwargs = added_cond_kwargs
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=image,
conditioning_scale=cond_scale,
guess_mode=guess_mode,
added_cond_kwargs=controlnet_added_cond_kwargs,
return_dict=False,
)
if guess_mode and self.do_classifier_free_guidance:
# Inferred ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
added_cond_kwargs["image_embeds"] = image_embeds
# ref only part
if reference_keeps[i] > 0:
noise = randn_tensor(
ref_image_latents.shape, generator=generator, device=device, dtype=ref_image_latents.dtype
)
ref_xt = self.scheduler.add_noise(
ref_image_latents,
noise,
t.reshape(
1,
),
)
ref_xt = self.scheduler.scale_model_input(ref_xt, t)
MODE = "write"
self.unet(
ref_xt,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)
# predict the noise residual
MODE = "read"
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
negative_pooled_prompt_embeds = callback_outputs.pop(
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
)
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
# make sure the VAE is in float32 mode, as it overflows in float16
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
if needs_upcasting:
self.upcast_vae()
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
# unscale/denormalize the latents
# denormalize with the mean and std if available and not None
has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
if has_latents_mean and has_latents_std:
latents_mean = (
torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
)
latents_std = (
torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
)
latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
else:
latents = latents / self.vae.config.scaling_factor
image = self.vae.decode(latents, return_dict=False)[0]
# cast back to fp16 if needed
if needs_upcasting:
self.vae.to(dtype=torch.float16)
else:
image = latents
if not output_type == "latent":
# apply watermark if available
if self.watermark is not None:
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
| diffusers/examples/community/stable_diffusion_xl_controlnet_reference.py/0 | {
"file_path": "diffusers/examples/community/stable_diffusion_xl_controlnet_reference.py",
"repo_id": "diffusers",
"token_count": 34133
} |
# Kandinsky2.2 text-to-image fine-tuning
Kandinsky 2.2 includes a prior pipeline that generates image embeddings from text prompts, and a decoder pipeline that generates the output image based on the image embeddings. We provide `train_text_to_image_prior.py` and `train_text_to_image_decoder.py` scripts to show you how to fine-tune the Kandinsky prior and decoder models separately based on your own dataset. To achieve the best results, you should fine-tune **_both_** your prior and decoder models.
___Note___:
___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset.___
## Running locally with PyTorch
Before running the scripts, make sure to install the library's training dependencies:
**Important**
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Then cd in the example folder and run
```bash
pip install -r requirements.txt
```
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
```bash
accelerate config
```
For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the --push_to_hub flag.
___
### Naruto example
For all our examples, we will directly store the trained weights on the Hub, so we need to be logged in and add the `--push_to_hub` flag. In order to do that, you have to be a registered user on the 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to the [User Access Tokens](https://huggingface.co/docs/hub/security-tokens) guide.
Run the following command to authenticate your token
```bash
huggingface-cli login
```
We also use [Weights and Biases](https://docs.wandb.ai/quickstart) logging by default, because it is really useful to monitor the training progress by regularly generating sample images during training. To install wandb, run
```bash
pip install wandb
```
To disable wandb logging, remove the `--report_to=="wandb"` and `--validation_prompts="A robot naruto, 4k photo"` flags from below examples
#### Fine-tune decoder
<br>
<!-- accelerate_snippet_start -->
```bash
export DATASET_NAME="lambdalabs/naruto-blip-captions"
accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \
--dataset_name=$DATASET_NAME \
--resolution=768 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--checkpoints_total_limit=3 \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--validation_prompts="A robot naruto, 4k photo" \
--report_to="wandb" \
--push_to_hub \
--output_dir="kandi2-decoder-naruto-model"
```
<!-- accelerate_snippet_end -->
To train on your own training files, prepare the dataset according to the format required by `datasets`. You can find the instructions for how to do that in the [ImageFolder with metadata](https://huggingface.co/docs/datasets/en/image_load#imagefolder-with-metadata) guide.
If you wish to use custom loading logic, you should modify the script and we have left pointers for that in the training script.
```bash
export TRAIN_DIR="path_to_your_dataset"
accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \
--train_data_dir=$TRAIN_DIR \
--resolution=768 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--checkpoints_total_limit=3 \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--validation_prompts="A robot naruto, 4k photo" \
--report_to="wandb" \
--push_to_hub \
--output_dir="kandi22-decoder-naruto-model"
```
Once the training is finished the model will be saved in the `output_dir` specified in the command. In this example it's `kandi22-decoder-naruto-model`. To load the fine-tuned model for inference just pass that path to `AutoPipelineForText2Image`
```python
from diffusers import AutoPipelineForText2Image
import torch
pipe = AutoPipelineForText2Image.from_pretrained(output_dir, torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
prompt='A robot naruto, 4k photo'
images = pipe(prompt=prompt).images
images[0].save("robot-naruto.png")
```
Checkpoints only save the unet, so to run inference from a checkpoint, just load the unet
```python
from diffusers import AutoPipelineForText2Image, UNet2DConditionModel
model_path = "path_to_saved_model"
unet = UNet2DConditionModel.from_pretrained(model_path + "/checkpoint-<N>/unet")
pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
image = pipe(prompt="A robot naruto, 4k photo").images[0]
image.save("robot-naruto.png")
```
#### Fine-tune prior
You can fine-tune the Kandinsky prior model with `train_text_to_image_prior.py` script. Note that we currently do not support `--gradient_checkpointing` for prior model fine-tuning.
<br>
<!-- accelerate_snippet_start -->
```bash
export DATASET_NAME="lambdalabs/naruto-blip-captions"
accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \
--dataset_name=$DATASET_NAME \
--resolution=768 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--checkpoints_total_limit=3 \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--validation_prompts="A robot naruto, 4k photo" \
--report_to="wandb" \
--push_to_hub \
--output_dir="kandi2-prior-naruto-model"
```
<!-- accelerate_snippet_end -->
To perform inference with the fine-tuned prior model, you will need to first create a prior pipeline by passing the `output_dir` to `DiffusionPipeline`. Then create a `KandinskyV22CombinedPipeline` from a pretrained or fine-tuned decoder checkpoint along with all the modules of the prior pipeline you just created.
```python
from diffusers import AutoPipelineForText2Image, DiffusionPipeline
import torch
pipe_prior = DiffusionPipeline.from_pretrained(output_dir, torch_dtype=torch.float16)
prior_components = {"prior_" + k: v for k,v in pipe_prior.components.items()}
pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
prompt='A robot naruto, 4k photo'
images = pipe(prompt=prompt, negative_prompt=negative_prompt).images
images[0]
```
If you want to use a fine-tuned decoder checkpoint along with your fine-tuned prior checkpoint, you can simply replace the "kandinsky-community/kandinsky-2-2-decoder" in above code with your custom model repo name. Note that in order to be able to create a `KandinskyV22CombinedPipeline`, your model repository need to have a prior tag. If you have created your model repo using our training script, the prior tag is automatically included.
#### Training with multiple GPUs
`accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch)
for running distributed training with `accelerate`. Here is an example command:
```bash
export DATASET_NAME="lambdalabs/naruto-blip-captions"
accelerate launch --mixed_precision="fp16" --multi_gpu train_text_to_image_decoder.py \
--dataset_name=$DATASET_NAME \
--resolution=768 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--checkpoints_total_limit=3 \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--validation_prompts="A robot naruto, 4k photo" \
--report_to="wandb" \
--push_to_hub \
--output_dir="kandi2-decoder-naruto-model"
```
#### Training with Min-SNR weighting
We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps achieve faster convergence
by rebalancing the loss. Enable the `--snr_gamma` argument and set it to the recommended
value of 5.0.
## Training with LoRA
Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*.
In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages:
- Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114).
- Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable.
- LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter.
[cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository.
With LoRA, it's possible to fine-tune Kandinsky 2.2 on a custom image-caption pair dataset
on consumer GPUs like Tesla T4, Tesla V100.
### Training
First, you need to set up your development environment as explained in the [installation](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables. Here, we will use [Kandinsky 2.2](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder) and the [Narutos dataset](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions).
#### Train decoder
```bash
export DATASET_NAME="lambdalabs/naruto-blip-captions"
accelerate launch --mixed_precision="fp16" train_text_to_image_decoder_lora.py \
--dataset_name=$DATASET_NAME --caption_column="text" \
--resolution=768 \
--train_batch_size=1 \
--num_train_epochs=100 --checkpointing_steps=5000 \
--learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \
--seed=42 \
--rank=4 \
--gradient_checkpointing \
--output_dir="kandi22-decoder-naruto-lora" \
--validation_prompt="cute dragon creature" --report_to="wandb" \
--push_to_hub \
```
#### Train prior
```bash
export DATASET_NAME="lambdalabs/naruto-blip-captions"
accelerate launch --mixed_precision="fp16" train_text_to_image_prior_lora.py \
--dataset_name=$DATASET_NAME --caption_column="text" \
--resolution=768 \
--train_batch_size=1 \
--num_train_epochs=100 --checkpointing_steps=5000 \
--learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \
--seed=42 \
--rank=4 \
--output_dir="kandi22-prior-naruto-lora" \
--validation_prompt="cute dragon creature" --report_to="wandb" \
--push_to_hub \
```
**___Note: When using LoRA we can use a much higher learning rate compared to non-LoRA fine-tuning. Here we use *1e-4* instead of the usual *1e-5*. Also, by using LoRA, it's possible to run above scripts in consumer GPUs like T4 or V100.___**
### Inference
#### Inference using fine-tuned LoRA checkpoint for decoder
Once you have trained a Kandinsky decoder model using the above command, inference can be done with the `AutoPipelineForText2Image` after loading the trained LoRA weights. You need to pass the `output_dir` for loading the LoRA weights, which in this case is `kandi22-decoder-naruto-lora`.
```python
from diffusers import AutoPipelineForText2Image
import torch
pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
pipe.unet.load_attn_procs(output_dir)
pipe.enable_model_cpu_offload()
prompt='A robot naruto, 4k photo'
image = pipe(prompt=prompt).images[0]
image.save("robot_naruto.png")
```
#### Inference using fine-tuned LoRA checkpoint for prior
```python
from diffusers import AutoPipelineForText2Image
import torch
pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
pipe.prior_prior.load_attn_procs(output_dir)
pipe.enable_model_cpu_offload()
prompt='A robot naruto, 4k photo'
image = pipe(prompt=prompt).images[0]
image.save("robot_naruto.png")
image
```
### Training with xFormers:
You can enable memory efficient attention by [installing xFormers](https://huggingface.co/docs/diffusers/main/en/optimization/xformers) and passing the `--enable_xformers_memory_efficient_attention` argument to the script.
xFormers training is not available for fine-tuning the prior model.
**Note**:
According to [this issue](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212), xFormers `v0.0.16` cannot be used for training in some GPUs. If you observe that problem, please install a development version as indicated in that comment. | diffusers/examples/kandinsky2_2/text_to_image/README.md/0 | {
"file_path": "diffusers/examples/kandinsky2_2/text_to_image/README.md",
"repo_id": "diffusers",
"token_count": 4393
} |
# [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) by [colossalai](https://github.com/hpcaitech/ColossalAI.git)
[DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject.
The `train_dreambooth_colossalai.py` script shows how to implement the training procedure and adapt it for stable diffusion.
By accommodating model data in CPU and GPU and moving the data to the computing device when necessary, [Gemini](https://www.colossalai.org/docs/advanced_tutorials/meet_gemini), the Heterogeneous Memory Manager of [Colossal-AI](https://github.com/hpcaitech/ColossalAI) can breakthrough the GPU memory wall by using GPU and CPU memory (composed of CPU DRAM or nvme SSD memory) together at the same time. Moreover, the model scale can be further improved by combining heterogeneous training with the other parallel approaches, such as data parallel, tensor parallel and pipeline parallel.
## Installing the dependencies
Before running the scripts, make sure to install the library's training dependencies:
```bash
pip install -r requirements.txt
```
## Install [ColossalAI](https://github.com/hpcaitech/ColossalAI.git)
**From PyPI**
```bash
pip install colossalai
```
**From source**
```bash
git clone https://github.com/hpcaitech/ColossalAI.git
cd ColossalAI
# install colossalai
pip install .
```
## Dataset for Teyvat BLIP captions
Dataset used to train [Teyvat characters text to image model](https://github.com/hpcaitech/ColossalAI/tree/main/examples/images/diffusion).
BLIP generated captions for characters images from [genshin-impact fandom wiki](https://genshin-impact.fandom.com/wiki/Character#Playable_Characters)and [biligame wiki for genshin impact](https://wiki.biligame.com/ys/%E8%A7%92%E8%89%B2).
For each row the dataset contains `image` and `text` keys. `image` is a varying size PIL png, and `text` is the accompanying text caption. Only a train split is provided.
The `text` include the tag `Teyvat`, `Name`,`Element`, `Weapon`, `Region`, `Model type`, and `Description`, the `Description` is captioned with the [pre-trained BLIP model](https://github.com/salesforce/BLIP).
## Training
The argument `placement` can be `cpu`, `auto`, `cuda`, with `cpu` the GPU RAM required can be minimized to 4GB but will deceleration, with `cuda` you can also reduce GPU memory by half but accelerated training, with `auto` a more balanced solution for speed and memory can be obtained。
**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export INSTANCE_DIR="path-to-instance-images"
export OUTPUT_DIR="path-to-save-model"
torchrun --nproc_per_node 2 train_dreambooth_colossalai.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--instance_prompt="a photo of sks dog" \
--resolution=512 \
--train_batch_size=1 \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--max_train_steps=400 \
--placement="cuda"
```
### Training with prior-preservation loss
Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data.
According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time.
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export INSTANCE_DIR="path-to-instance-images"
export CLASS_DIR="path-to-class-images"
export OUTPUT_DIR="path-to-save-model"
torchrun --nproc_per_node 2 train_dreambooth_colossalai.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--max_train_steps=800 \
--placement="cuda"
```
## Inference
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt.
```python
from diffusers import StableDiffusionPipeline
import torch
model_id = "path-to-save-model"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
prompt = "A photo of sks dog in a bucket"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
```
| diffusers/examples/research_projects/colossalai/README.md/0 | {
"file_path": "diffusers/examples/research_projects/colossalai/README.md",
"repo_id": "diffusers",
"token_count": 1659
} |
# Dreambooth for the inpainting model
This script was added by @thedarkzeno .
Please note that this script is not actively maintained, you can open an issue and tag @thedarkzeno or @patil-suraj though.
```bash
export MODEL_NAME="runwayml/stable-diffusion-inpainting"
export INSTANCE_DIR="path-to-instance-images"
export OUTPUT_DIR="path-to-save-model"
accelerate launch train_dreambooth_inpaint.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--instance_prompt="a photo of sks dog" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=1 \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--max_train_steps=400
```
### Training with prior-preservation loss
Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data.
According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases.
```bash
export MODEL_NAME="runwayml/stable-diffusion-inpainting"
export INSTANCE_DIR="path-to-instance-images"
export CLASS_DIR="path-to-class-images"
export OUTPUT_DIR="path-to-save-model"
accelerate launch train_dreambooth_inpaint.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=1 \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800
```
### Training with gradient checkpointing and 8-bit optimizer:
With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU.
To install `bitandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation).
```bash
export MODEL_NAME="runwayml/stable-diffusion-inpainting"
export INSTANCE_DIR="path-to-instance-images"
export CLASS_DIR="path-to-class-images"
export OUTPUT_DIR="path-to-save-model"
accelerate launch train_dreambooth_inpaint.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=2 --gradient_checkpointing \
--use_8bit_adam \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800
```
### Fine-tune text encoder with the UNet.
The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces.
Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`.
___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___
```bash
export MODEL_NAME="runwayml/stable-diffusion-inpainting"
export INSTANCE_DIR="path-to-instance-images"
export CLASS_DIR="path-to-class-images"
export OUTPUT_DIR="path-to-save-model"
accelerate launch train_dreambooth_inpaint.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_text_encoder \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--use_8bit_adam \
--gradient_checkpointing \
--learning_rate=2e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800
```
| diffusers/examples/research_projects/dreambooth_inpaint/README.md/0 | {
"file_path": "diffusers/examples/research_projects/dreambooth_inpaint/README.md",
"repo_id": "diffusers",
"token_count": 1501
} |
import argparse
import itertools
import json
import os
import random
import time
from pathlib import Path
import torch
import torch.nn.functional as F
from accelerate import Accelerator
from accelerate.utils import ProjectConfiguration
from ip_adapter.ip_adapter import ImageProjModel
from ip_adapter.utils import is_torch2_available
from PIL import Image
from torchvision import transforms
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel
if is_torch2_available():
from ip_adapter.attention_processor import AttnProcessor2_0 as AttnProcessor
from ip_adapter.attention_processor import IPAttnProcessor2_0 as IPAttnProcessor
else:
from ip_adapter.attention_processor import AttnProcessor, IPAttnProcessor
# Dataset
class MyDataset(torch.utils.data.Dataset):
def __init__(
self, json_file, tokenizer, size=512, t_drop_rate=0.05, i_drop_rate=0.05, ti_drop_rate=0.05, image_root_path=""
):
super().__init__()
self.tokenizer = tokenizer
self.size = size
self.i_drop_rate = i_drop_rate
self.t_drop_rate = t_drop_rate
self.ti_drop_rate = ti_drop_rate
self.image_root_path = image_root_path
self.data = json.load(open(json_file)) # list of dict: [{"image_file": "1.png", "text": "A dog"}]
self.transform = transforms.Compose(
[
transforms.Resize(self.size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(self.size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
self.clip_image_processor = CLIPImageProcessor()
def __getitem__(self, idx):
item = self.data[idx]
text = item["text"]
image_file = item["image_file"]
# read image
raw_image = Image.open(os.path.join(self.image_root_path, image_file))
image = self.transform(raw_image.convert("RGB"))
clip_image = self.clip_image_processor(images=raw_image, return_tensors="pt").pixel_values
# drop
drop_image_embed = 0
rand_num = random.random()
if rand_num < self.i_drop_rate:
drop_image_embed = 1
elif rand_num < (self.i_drop_rate + self.t_drop_rate):
text = ""
elif rand_num < (self.i_drop_rate + self.t_drop_rate + self.ti_drop_rate):
text = ""
drop_image_embed = 1
# get text and tokenize
text_input_ids = self.tokenizer(
text,
max_length=self.tokenizer.model_max_length,
padding="max_length",
truncation=True,
return_tensors="pt",
).input_ids
return {
"image": image,
"text_input_ids": text_input_ids,
"clip_image": clip_image,
"drop_image_embed": drop_image_embed,
}
def __len__(self):
return len(self.data)
def collate_fn(data):
images = torch.stack([example["image"] for example in data])
text_input_ids = torch.cat([example["text_input_ids"] for example in data], dim=0)
clip_images = torch.cat([example["clip_image"] for example in data], dim=0)
drop_image_embeds = [example["drop_image_embed"] for example in data]
return {
"images": images,
"text_input_ids": text_input_ids,
"clip_images": clip_images,
"drop_image_embeds": drop_image_embeds,
}
class IPAdapter(torch.nn.Module):
"""IP-Adapter"""
def __init__(self, unet, image_proj_model, adapter_modules, ckpt_path=None):
super().__init__()
self.unet = unet
self.image_proj_model = image_proj_model
self.adapter_modules = adapter_modules
if ckpt_path is not None:
self.load_from_checkpoint(ckpt_path)
def forward(self, noisy_latents, timesteps, encoder_hidden_states, image_embeds):
ip_tokens = self.image_proj_model(image_embeds)
encoder_hidden_states = torch.cat([encoder_hidden_states, ip_tokens], dim=1)
# Predict the noise residual
noise_pred = self.unet(noisy_latents, timesteps, encoder_hidden_states).sample
return noise_pred
def load_from_checkpoint(self, ckpt_path: str):
# Calculate original checksums
orig_ip_proj_sum = torch.sum(torch.stack([torch.sum(p) for p in self.image_proj_model.parameters()]))
orig_adapter_sum = torch.sum(torch.stack([torch.sum(p) for p in self.adapter_modules.parameters()]))
state_dict = torch.load(ckpt_path, map_location="cpu")
# Load state dict for image_proj_model and adapter_modules
self.image_proj_model.load_state_dict(state_dict["image_proj"], strict=True)
self.adapter_modules.load_state_dict(state_dict["ip_adapter"], strict=True)
# Calculate new checksums
new_ip_proj_sum = torch.sum(torch.stack([torch.sum(p) for p in self.image_proj_model.parameters()]))
new_adapter_sum = torch.sum(torch.stack([torch.sum(p) for p in self.adapter_modules.parameters()]))
# Verify if the weights have changed
assert orig_ip_proj_sum != new_ip_proj_sum, "Weights of image_proj_model did not change!"
assert orig_adapter_sum != new_adapter_sum, "Weights of adapter_modules did not change!"
print(f"Successfully loaded weights from checkpoint {ckpt_path}")
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--pretrained_ip_adapter_path",
type=str,
default=None,
help="Path to pretrained ip adapter model. If not specified weights are initialized randomly.",
)
parser.add_argument(
"--data_json_file",
type=str,
default=None,
required=True,
help="Training data",
)
parser.add_argument(
"--data_root_path",
type=str,
default="",
required=True,
help="Training data root path",
)
parser.add_argument(
"--image_encoder_path",
type=str,
default=None,
required=True,
help="Path to CLIP image encoder",
)
parser.add_argument(
"--output_dir",
type=str,
default="sd-ip_adapter",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--resolution",
type=int,
default=512,
help=("The resolution for input images"),
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Learning rate to use.",
)
parser.add_argument("--weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--save_steps",
type=int,
default=2000,
help=("Save a checkpoint of the training state every X updates"),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args
def main():
args = parse_args()
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Load scheduler, tokenizer and models.
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
image_encoder = CLIPVisionModelWithProjection.from_pretrained(args.image_encoder_path)
# freeze parameters of models to save more memory
unet.requires_grad_(False)
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
image_encoder.requires_grad_(False)
# ip-adapter
image_proj_model = ImageProjModel(
cross_attention_dim=unet.config.cross_attention_dim,
clip_embeddings_dim=image_encoder.config.projection_dim,
clip_extra_context_tokens=4,
)
# init adapter modules
attn_procs = {}
unet_sd = unet.state_dict()
for name in unet.attn_processors.keys():
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
if name.startswith("mid_block"):
hidden_size = unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = unet.config.block_out_channels[block_id]
if cross_attention_dim is None:
attn_procs[name] = AttnProcessor()
else:
layer_name = name.split(".processor")[0]
weights = {
"to_k_ip.weight": unet_sd[layer_name + ".to_k.weight"],
"to_v_ip.weight": unet_sd[layer_name + ".to_v.weight"],
}
attn_procs[name] = IPAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
attn_procs[name].load_state_dict(weights)
unet.set_attn_processor(attn_procs)
adapter_modules = torch.nn.ModuleList(unet.attn_processors.values())
ip_adapter = IPAdapter(unet, image_proj_model, adapter_modules, args.pretrained_ip_adapter_path)
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
image_encoder.to(accelerator.device, dtype=weight_dtype)
# optimizer
params_to_opt = itertools.chain(ip_adapter.image_proj_model.parameters(), ip_adapter.adapter_modules.parameters())
optimizer = torch.optim.AdamW(params_to_opt, lr=args.learning_rate, weight_decay=args.weight_decay)
# dataloader
train_dataset = MyDataset(
args.data_json_file, tokenizer=tokenizer, size=args.resolution, image_root_path=args.data_root_path
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
shuffle=True,
collate_fn=collate_fn,
batch_size=args.train_batch_size,
num_workers=args.dataloader_num_workers,
)
# Prepare everything with our `accelerator`.
ip_adapter, optimizer, train_dataloader = accelerator.prepare(ip_adapter, optimizer, train_dataloader)
global_step = 0
for epoch in range(0, args.num_train_epochs):
begin = time.perf_counter()
for step, batch in enumerate(train_dataloader):
load_data_time = time.perf_counter() - begin
with accelerator.accumulate(ip_adapter):
# Convert images to latent space
with torch.no_grad():
latents = vae.encode(
batch["images"].to(accelerator.device, dtype=weight_dtype)
).latent_dist.sample()
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
with torch.no_grad():
image_embeds = image_encoder(
batch["clip_images"].to(accelerator.device, dtype=weight_dtype)
).image_embeds
image_embeds_ = []
for image_embed, drop_image_embed in zip(image_embeds, batch["drop_image_embeds"]):
if drop_image_embed == 1:
image_embeds_.append(torch.zeros_like(image_embed))
else:
image_embeds_.append(image_embed)
image_embeds = torch.stack(image_embeds_)
with torch.no_grad():
encoder_hidden_states = text_encoder(batch["text_input_ids"].to(accelerator.device))[0]
noise_pred = ip_adapter(noisy_latents, timesteps, encoder_hidden_states, image_embeds)
loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="mean")
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean().item()
# Backpropagate
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
if accelerator.is_main_process:
print(
"Epoch {}, step {}, data_time: {}, time: {}, step_loss: {}".format(
epoch, step, load_data_time, time.perf_counter() - begin, avg_loss
)
)
global_step += 1
if global_step % args.save_steps == 0:
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
begin = time.perf_counter()
if __name__ == "__main__":
main()
| diffusers/examples/research_projects/ip_adapter/tutorial_train_ip-adapter.py/0 | {
"file_path": "diffusers/examples/research_projects/ip_adapter/tutorial_train_ip-adapter.py",
"repo_id": "diffusers",
"token_count": 7360
} |
import torch
import torchvision.transforms as T
from controlnet_aux import HEDdetector
from diffusers.utils import load_image
from examples.research_projects.pixart.controlnet_pixart_alpha import PixArtControlNetAdapterModel
from examples.research_projects.pixart.pipeline_pixart_alpha_controlnet import PixArtAlphaControlnetPipeline
controlnet_repo_id = "raulc0399/pixart-alpha-hed-controlnet"
weight_dtype = torch.float16
image_size = 1024
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(0)
# load controlnet
controlnet = PixArtControlNetAdapterModel.from_pretrained(
controlnet_repo_id,
torch_dtype=weight_dtype,
use_safetensors=True,
).to(device)
pipe = PixArtAlphaControlnetPipeline.from_pretrained(
"PixArt-alpha/PixArt-XL-2-1024-MS",
controlnet=controlnet,
torch_dtype=weight_dtype,
use_safetensors=True,
).to(device)
images_path = "images"
control_image_file = "0_7.jpg"
# prompt = "cinematic photo of superman in action . 35mm photograph, film, bokeh, professional, 4k, highly detailed"
# prompt = "yellow modern car, city in background, beautiful rainy day"
# prompt = "modern villa, clear sky, suny day . 35mm photograph, film, bokeh, professional, 4k, highly detailed"
# prompt = "robot dog toy in park . 35mm photograph, film, bokeh, professional, 4k, highly detailed"
# prompt = "purple car, on highway, beautiful sunny day"
# prompt = "realistical photo of a loving couple standing in the open kitchen of the living room, cooking ."
prompt = "battleship in space, galaxy in background"
control_image_name = control_image_file.split(".")[0]
control_image = load_image(f"{images_path}/{control_image_file}")
print(control_image.size)
height, width = control_image.size
hed = HEDdetector.from_pretrained("lllyasviel/Annotators")
condition_transform = T.Compose(
[
T.Lambda(lambda img: img.convert("RGB")),
T.CenterCrop([image_size, image_size]),
]
)
control_image = condition_transform(control_image)
hed_edge = hed(control_image, detect_resolution=image_size, image_resolution=image_size)
hed_edge.save(f"{images_path}/{control_image_name}_hed.jpg")
# run pipeline
with torch.no_grad():
out = pipe(
prompt=prompt,
image=hed_edge,
num_inference_steps=14,
guidance_scale=4.5,
height=image_size,
width=image_size,
)
out.images[0].save(f"{images_path}//{control_image_name}_output.jpg")
| diffusers/examples/research_projects/pixart/run_pixart_alpha_controlnet_pipeline.py/0 | {
"file_path": "diffusers/examples/research_projects/pixart/run_pixart_alpha_controlnet_pipeline.py",
"repo_id": "diffusers",
"token_count": 895
} |
import argparse
import os
import torch
from PIL import Image, ImageFilter
from transformers import CLIPTextModel
from diffusers import DPMSolverMultistepScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel
parser = argparse.ArgumentParser(description="Inference")
parser.add_argument(
"--model_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--validation_image",
type=str,
default=None,
required=True,
help="The directory of the validation image",
)
parser.add_argument(
"--validation_mask",
type=str,
default=None,
required=True,
help="The directory of the validation mask",
)
parser.add_argument(
"--output_dir",
type=str,
default="./test-infer/",
help="The output directory where predictions are saved",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible inference.")
args = parser.parse_args()
if __name__ == "__main__":
os.makedirs(args.output_dir, exist_ok=True)
generator = None
# create & load model
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-inpainting", torch_dtype=torch.float32, revision=None
)
pipe.unet = UNet2DConditionModel.from_pretrained(
args.model_path,
subfolder="unet",
revision=None,
)
pipe.text_encoder = CLIPTextModel.from_pretrained(
args.model_path,
subfolder="text_encoder",
revision=None,
)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to("cuda")
if args.seed is not None:
generator = torch.Generator(device="cuda").manual_seed(args.seed)
image = Image.open(args.validation_image)
mask_image = Image.open(args.validation_mask)
results = pipe(
["a photo of sks"] * 16,
image=image,
mask_image=mask_image,
num_inference_steps=25,
guidance_scale=5,
generator=generator,
).images
erode_kernel = ImageFilter.MaxFilter(3)
mask_image = mask_image.filter(erode_kernel)
blur_kernel = ImageFilter.BoxBlur(1)
mask_image = mask_image.filter(blur_kernel)
for idx, result in enumerate(results):
result = Image.composite(result, image, mask_image)
result.save(f"{args.output_dir}/{idx}.png")
del pipe
torch.cuda.empty_cache()
| diffusers/examples/research_projects/realfill/infer.py/0 | {
"file_path": "diffusers/examples/research_projects/realfill/infer.py",
"repo_id": "diffusers",
"token_count": 984
} |
# Show best practices for SDXL JAX
import time
import jax
import jax.numpy as jnp
import numpy as np
from flax.jax_utils import replicate
# Let's cache the model compilation, so that it doesn't take as long the next time around.
from jax.experimental.compilation_cache import compilation_cache as cc
from diffusers import FlaxStableDiffusionXLPipeline
cc.initialize_cache("/tmp/sdxl_cache")
NUM_DEVICES = jax.device_count()
# 1. Let's start by downloading the model and loading it into our pipeline class
# Adhering to JAX's functional approach, the model's parameters are returned seperatetely and
# will have to be passed to the pipeline during inference
pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True
)
# 2. We cast all parameters to bfloat16 EXCEPT the scheduler which we leave in
# float32 to keep maximal precision
scheduler_state = params.pop("scheduler")
params = jax.tree_util.tree_map(lambda x: x.astype(jnp.bfloat16), params)
params["scheduler"] = scheduler_state
# 3. Next, we define the different inputs to the pipeline
default_prompt = "a colorful photo of a castle in the middle of a forest with trees and bushes, by Ismail Inceoglu, shadows, high contrast, dynamic shading, hdr, detailed vegetation, digital painting, digital drawing, detailed painting, a detailed digital painting, gothic art, featured on deviantart"
default_neg_prompt = "fog, grainy, purple"
default_seed = 33
default_guidance_scale = 5.0
default_num_steps = 25
# 4. In order to be able to compile the pipeline
# all inputs have to be tensors or strings
# Let's tokenize the prompt and negative prompt
def tokenize_prompt(prompt, neg_prompt):
prompt_ids = pipeline.prepare_inputs(prompt)
neg_prompt_ids = pipeline.prepare_inputs(neg_prompt)
return prompt_ids, neg_prompt_ids
# 5. To make full use of JAX's parallelization capabilities
# the parameters and input tensors are duplicated across devices
# To make sure every device generates a different image, we create
# different seeds for each image. The model parameters won't change
# during inference so we do not wrap them into a function
p_params = replicate(params)
def replicate_all(prompt_ids, neg_prompt_ids, seed):
p_prompt_ids = replicate(prompt_ids)
p_neg_prompt_ids = replicate(neg_prompt_ids)
rng = jax.random.PRNGKey(seed)
rng = jax.random.split(rng, NUM_DEVICES)
return p_prompt_ids, p_neg_prompt_ids, rng
# 6. Let's now put it all together in a generate function
def generate(
prompt,
negative_prompt,
seed=default_seed,
guidance_scale=default_guidance_scale,
num_inference_steps=default_num_steps,
):
prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt)
prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed)
images = pipeline(
prompt_ids,
p_params,
rng,
num_inference_steps=num_inference_steps,
neg_prompt_ids=neg_prompt_ids,
guidance_scale=guidance_scale,
jit=True,
).images
# convert the images to PIL
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
return pipeline.numpy_to_pil(np.array(images))
# 7. Remember that the first call will compile the function and hence be very slow. Let's run generate once
# so that the pipeline call is compiled
start = time.time()
print("Compiling ...")
generate(default_prompt, default_neg_prompt)
print(f"Compiled in {time.time() - start}")
# 8. Now the model forward pass will run very quickly, let's try it again
start = time.time()
prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang"
neg_prompt = "cartoon, illustration, animation. face. male, female"
images = generate(prompt, neg_prompt)
print(f"Inference in {time.time() - start}")
for i, image in enumerate(images):
image.save(f"castle_{i}.png")
| diffusers/examples/research_projects/sdxl_flax/sdxl_single.py/0 | {
"file_path": "diffusers/examples/research_projects/sdxl_flax/sdxl_single.py",
"repo_id": "diffusers",
"token_count": 1341
} |
## Textual Inversion fine-tuning example
[Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples.
The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion.
## Running on Colab
Colab for training
[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
Colab for inference
[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb)
## Running locally with PyTorch
### Installing the dependencies
Before running the scripts, make sure to install the library's training dependencies:
**Important**
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Then cd in the example folder and run:
```bash
pip install -r requirements.txt
```
And initialize an [🤗 Accelerate](https://github.com/huggingface/accelerate/) environment with:
```bash
accelerate config
```
### Cat toy example
First, let's login so that we can upload the checkpoint to the Hub during training:
```bash
huggingface-cli login
```
Now let's get our dataset. For this example we will use some cat images: https://huggingface.co/datasets/diffusers/cat_toy_example .
Let's first download it locally:
```py
from huggingface_hub import snapshot_download
local_dir = "./cat"
snapshot_download("diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes")
```
This will be our training data.
Now we can launch the training using:
**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
**___Note: Please follow the [README_sdxl.md](./README_sdxl.md) if you are using the [stable-diffusion-xl](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0).___**
```bash
export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5"
export DATA_DIR="./cat"
accelerate launch textual_inversion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$DATA_DIR \
--learnable_property="object" \
--placeholder_token="<cat-toy>" \
--initializer_token="toy" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--max_train_steps=3000 \
--learning_rate=5.0e-04 \
--scale_lr \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--push_to_hub \
--output_dir="textual_inversion_cat"
```
A full training run takes ~1 hour on one V100 GPU.
**Note**: As described in [the official paper](https://arxiv.org/abs/2208.01618)
only one embedding vector is used for the placeholder token, *e.g.* `"<cat-toy>"`.
However, one can also add multiple embedding vectors for the placeholder token
to increase the number of fine-tuneable parameters. This can help the model to learn
more complex details. To use multiple embedding vectors, you should define `--num_vectors`
to a number larger than one, *e.g.*:
```bash
--num_vectors 5
```
The saved textual inversion vectors will then be larger in size compared to the default case.
### Inference
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt.
```python
from diffusers import StableDiffusionPipeline
import torch
model_id = "path-to-your-trained-model"
pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float16).to("cuda")
repo_id_embeds = "path-to-your-learned-embeds"
pipe.load_textual_inversion(repo_id_embeds)
prompt = "A <cat-toy> backpack"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("cat-backpack.png")
```
## Training with Flax/JAX
For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script.
Before running the scripts, make sure to install the library's training dependencies:
```bash
pip install -U -r requirements_flax.txt
```
```bash
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
export DATA_DIR="path-to-dir-containing-images"
python textual_inversion_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$DATA_DIR \
--learnable_property="object" \
--placeholder_token="<cat-toy>" \
--initializer_token="toy" \
--resolution=512 \
--train_batch_size=1 \
--max_train_steps=3000 \
--learning_rate=5.0e-04 \
--scale_lr \
--output_dir="textual_inversion_cat"
```
It should be at least 70% faster than the PyTorch script with the same configuration.
### Training with xformers:
You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation.
| diffusers/examples/textual_inversion/README.md/0 | {
"file_path": "diffusers/examples/textual_inversion/README.md",
"repo_id": "diffusers",
"token_count": 1784
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import shutil
import sys
import tempfile
import torch
from diffusers import VQModel
from diffusers.utils.testing_utils import require_timm
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_timm
class TextToImage(ExamplesTestsAccelerate):
@property
def test_vqmodel_config(self):
return {
"_class_name": "VQModel",
"_diffusers_version": "0.17.0.dev0",
"act_fn": "silu",
"block_out_channels": [
32,
],
"down_block_types": [
"DownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 2,
"norm_num_groups": 32,
"norm_type": "spatial",
"num_vq_embeddings": 32,
"out_channels": 3,
"sample_size": 32,
"scaling_factor": 0.18215,
"up_block_types": [
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def test_discriminator_config(self):
return {
"_class_name": "Discriminator",
"_diffusers_version": "0.27.0.dev0",
"in_channels": 3,
"cond_channels": 0,
"hidden_channels": 8,
"depth": 4,
}
def get_vq_and_discriminator_configs(self, tmpdir):
vqmodel_config_path = os.path.join(tmpdir, "vqmodel.json")
discriminator_config_path = os.path.join(tmpdir, "discriminator.json")
with open(vqmodel_config_path, "w") as fp:
json.dump(self.test_vqmodel_config, fp)
with open(discriminator_config_path, "w") as fp:
json.dump(self.test_discriminator_config, fp)
return vqmodel_config_path, discriminator_config_path
def test_vqmodel(self):
with tempfile.TemporaryDirectory() as tmpdir:
vqmodel_config_path, discriminator_config_path = self.get_vq_and_discriminator_configs(tmpdir)
test_args = f"""
examples/vqgan/train_vqgan.py
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 32
--image_column image
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--model_config_name_or_path {vqmodel_config_path}
--discriminator_config_name_or_path {discriminator_config_path}
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(
os.path.isfile(os.path.join(tmpdir, "discriminator", "diffusion_pytorch_model.safetensors"))
)
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "vqmodel", "diffusion_pytorch_model.safetensors")))
def test_vqmodel_checkpointing(self):
with tempfile.TemporaryDirectory() as tmpdir:
vqmodel_config_path, discriminator_config_path = self.get_vq_and_discriminator_configs(tmpdir)
# Run training script with checkpointing
# max_train_steps == 4, checkpointing_steps == 2
# Should create checkpoints at steps 2, 4
initial_run_args = f"""
examples/vqgan/train_vqgan.py
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 32
--image_column image
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 4
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--model_config_name_or_path {vqmodel_config_path}
--discriminator_config_name_or_path {discriminator_config_path}
--checkpointing_steps=2
--output_dir {tmpdir}
--seed=0
""".split()
run_command(self._launch_args + initial_run_args)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
# check can run an intermediate checkpoint
model = VQModel.from_pretrained(tmpdir, subfolder="checkpoint-2/vqmodel")
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_ = model(image)
# Remove checkpoint 2 so that we can check only later checkpoints exist after resuming
shutil.rmtree(os.path.join(tmpdir, "checkpoint-2"))
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-4"},
)
# Run training script for 2 total steps resuming from checkpoint 4
resume_run_args = f"""
examples/vqgan/train_vqgan.py
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 32
--image_column image
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 6
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--model_config_name_or_path {vqmodel_config_path}
--discriminator_config_name_or_path {discriminator_config_path}
--checkpointing_steps=1
--resume_from_checkpoint={os.path.join(tmpdir, 'checkpoint-4')}
--output_dir {tmpdir}
--seed=0
""".split()
run_command(self._launch_args + resume_run_args)
# check can run new fully trained pipeline
model = VQModel.from_pretrained(tmpdir, subfolder="vqmodel")
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_ = model(image)
# no checkpoint-2 -> check old checkpoints do not exist
# check new checkpoints exist
# In the current script, checkpointing_steps 1 is equivalent to checkpointing_steps 2 as after the generator gets trained for one step,
# the discriminator gets trained and loss and saving happens after that. Thus we do not expect to get a checkpoint-5
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-4", "checkpoint-6"},
)
def test_vqmodel_checkpointing_use_ema(self):
with tempfile.TemporaryDirectory() as tmpdir:
vqmodel_config_path, discriminator_config_path = self.get_vq_and_discriminator_configs(tmpdir)
# Run training script with checkpointing
# max_train_steps == 4, checkpointing_steps == 2
# Should create checkpoints at steps 2, 4
initial_run_args = f"""
examples/vqgan/train_vqgan.py
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 32
--image_column image
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 4
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--model_config_name_or_path {vqmodel_config_path}
--discriminator_config_name_or_path {discriminator_config_path}
--checkpointing_steps=2
--output_dir {tmpdir}
--use_ema
--seed=0
""".split()
run_command(self._launch_args + initial_run_args)
model = VQModel.from_pretrained(tmpdir, subfolder="vqmodel")
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_ = model(image)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
# check can run an intermediate checkpoint
model = VQModel.from_pretrained(tmpdir, subfolder="checkpoint-2/vqmodel")
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_ = model(image)
# Remove checkpoint 2 so that we can check only later checkpoints exist after resuming
shutil.rmtree(os.path.join(tmpdir, "checkpoint-2"))
# Run training script for 2 total steps resuming from checkpoint 4
resume_run_args = f"""
examples/vqgan/train_vqgan.py
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 32
--image_column image
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 6
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--model_config_name_or_path {vqmodel_config_path}
--discriminator_config_name_or_path {discriminator_config_path}
--checkpointing_steps=1
--resume_from_checkpoint={os.path.join(tmpdir, 'checkpoint-4')}
--output_dir {tmpdir}
--use_ema
--seed=0
""".split()
run_command(self._launch_args + resume_run_args)
# check can run new fully trained pipeline
model = VQModel.from_pretrained(tmpdir, subfolder="vqmodel")
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_ = model(image)
# no checkpoint-2 -> check old checkpoints do not exist
# check new checkpoints exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-4", "checkpoint-6"},
)
def test_vqmodel_checkpointing_checkpoints_total_limit(self):
with tempfile.TemporaryDirectory() as tmpdir:
vqmodel_config_path, discriminator_config_path = self.get_vq_and_discriminator_configs(tmpdir)
# Run training script with checkpointing
# max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2
# Should create checkpoints at steps 2, 4, 6
# with checkpoint at step 2 deleted
initial_run_args = f"""
examples/vqgan/train_vqgan.py
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 32
--image_column image
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 6
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--model_config_name_or_path {vqmodel_config_path}
--discriminator_config_name_or_path {discriminator_config_path}
--output_dir {tmpdir}
--checkpointing_steps=2
--checkpoints_total_limit=2
--seed=0
""".split()
run_command(self._launch_args + initial_run_args)
model = VQModel.from_pretrained(tmpdir, subfolder="vqmodel")
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_ = model(image)
# check checkpoint directories exist
# checkpoint-2 should have been deleted
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
def test_vqmodel_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
vqmodel_config_path, discriminator_config_path = self.get_vq_and_discriminator_configs(tmpdir)
# Run training script with checkpointing
# max_train_steps == 4, checkpointing_steps == 2
# Should create checkpoints at steps 2, 4
initial_run_args = f"""
examples/vqgan/train_vqgan.py
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 32
--image_column image
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 4
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--model_config_name_or_path {vqmodel_config_path}
--discriminator_config_name_or_path {discriminator_config_path}
--checkpointing_steps=2
--output_dir {tmpdir}
--seed=0
""".split()
run_command(self._launch_args + initial_run_args)
model = VQModel.from_pretrained(tmpdir, subfolder="vqmodel")
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_ = model(image)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
# resume and we should try to checkpoint at 6, where we'll have to remove
# checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint
resume_run_args = f"""
examples/vqgan/train_vqgan.py
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 32
--image_column image
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 8
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--model_config_name_or_path {vqmodel_config_path}
--discriminator_config_name_or_path {discriminator_config_path}
--output_dir {tmpdir}
--checkpointing_steps=2
--resume_from_checkpoint={os.path.join(tmpdir, 'checkpoint-4')}
--checkpoints_total_limit=2
--seed=0
""".split()
run_command(self._launch_args + resume_run_args)
model = VQModel.from_pretrained(tmpdir, subfolder="vqmodel")
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_ = model(image)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-6", "checkpoint-8"},
)
| diffusers/examples/vqgan/test_vqgan.py/0 | {
"file_path": "diffusers/examples/vqgan/test_vqgan.py",
"repo_id": "diffusers",
"token_count": 8162
} |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNet2DModel,
)
TEST_UNET_CONFIG = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"attn_norm_num_groups": 32,
"upsample_type": "resnet",
"downsample_type": "resnet",
}
IMAGENET_64_UNET_CONFIG = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"attn_norm_num_groups": 32,
"upsample_type": "resnet",
"downsample_type": "resnet",
}
LSUN_256_UNET_CONFIG = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
CD_SCHEDULER_CONFIG = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
CT_IMAGENET_64_SCHEDULER_CONFIG = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
CT_LSUN_256_SCHEDULER_CONFIG = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected")
def convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=False):
new_checkpoint[f"{new_prefix}.norm1.weight"] = checkpoint[f"{old_prefix}.in_layers.0.weight"]
new_checkpoint[f"{new_prefix}.norm1.bias"] = checkpoint[f"{old_prefix}.in_layers.0.bias"]
new_checkpoint[f"{new_prefix}.conv1.weight"] = checkpoint[f"{old_prefix}.in_layers.2.weight"]
new_checkpoint[f"{new_prefix}.conv1.bias"] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
new_checkpoint[f"{new_prefix}.time_emb_proj.weight"] = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
new_checkpoint[f"{new_prefix}.time_emb_proj.bias"] = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
new_checkpoint[f"{new_prefix}.norm2.weight"] = checkpoint[f"{old_prefix}.out_layers.0.weight"]
new_checkpoint[f"{new_prefix}.norm2.bias"] = checkpoint[f"{old_prefix}.out_layers.0.bias"]
new_checkpoint[f"{new_prefix}.conv2.weight"] = checkpoint[f"{old_prefix}.out_layers.3.weight"]
new_checkpoint[f"{new_prefix}.conv2.bias"] = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
new_checkpoint[f"{new_prefix}.conv_shortcut.weight"] = checkpoint[f"{old_prefix}.skip_connection.weight"]
new_checkpoint[f"{new_prefix}.conv_shortcut.bias"] = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def convert_attention(checkpoint, new_checkpoint, old_prefix, new_prefix, attention_dim=None):
weight_q, weight_k, weight_v = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3, dim=0)
bias_q, bias_k, bias_v = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3, dim=0)
new_checkpoint[f"{new_prefix}.group_norm.weight"] = checkpoint[f"{old_prefix}.norm.weight"]
new_checkpoint[f"{new_prefix}.group_norm.bias"] = checkpoint[f"{old_prefix}.norm.bias"]
new_checkpoint[f"{new_prefix}.to_q.weight"] = weight_q.squeeze(-1).squeeze(-1)
new_checkpoint[f"{new_prefix}.to_q.bias"] = bias_q.squeeze(-1).squeeze(-1)
new_checkpoint[f"{new_prefix}.to_k.weight"] = weight_k.squeeze(-1).squeeze(-1)
new_checkpoint[f"{new_prefix}.to_k.bias"] = bias_k.squeeze(-1).squeeze(-1)
new_checkpoint[f"{new_prefix}.to_v.weight"] = weight_v.squeeze(-1).squeeze(-1)
new_checkpoint[f"{new_prefix}.to_v.bias"] = bias_v.squeeze(-1).squeeze(-1)
new_checkpoint[f"{new_prefix}.to_out.0.weight"] = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1).squeeze(-1)
)
new_checkpoint[f"{new_prefix}.to_out.0.bias"] = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1).squeeze(-1)
return new_checkpoint
def con_pt_to_diffuser(checkpoint_path: str, unet_config):
checkpoint = torch.load(checkpoint_path, map_location="cpu")
new_checkpoint = {}
new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["time_embed.0.weight"]
new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["time_embed.0.bias"]
new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["time_embed.2.weight"]
new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
new_checkpoint["class_embedding.weight"] = checkpoint["label_emb.weight"]
new_checkpoint["conv_in.weight"] = checkpoint["input_blocks.0.0.weight"]
new_checkpoint["conv_in.bias"] = checkpoint["input_blocks.0.0.bias"]
down_block_types = unet_config["down_block_types"]
layers_per_block = unet_config["layers_per_block"]
attention_head_dim = unet_config["attention_head_dim"]
channels_list = unet_config["block_out_channels"]
current_layer = 1
prev_channels = channels_list[0]
for i, layer_type in enumerate(down_block_types):
current_channels = channels_list[i]
downsample_block_has_skip = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(layers_per_block):
new_prefix = f"down_blocks.{i}.resnets.{j}"
old_prefix = f"input_blocks.{current_layer}.0"
has_skip = True if j == 0 and downsample_block_has_skip else False
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=has_skip)
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(layers_per_block):
new_prefix = f"down_blocks.{i}.resnets.{j}"
old_prefix = f"input_blocks.{current_layer}.0"
has_skip = True if j == 0 and downsample_block_has_skip else False
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=has_skip)
new_prefix = f"down_blocks.{i}.attentions.{j}"
old_prefix = f"input_blocks.{current_layer}.1"
new_checkpoint = convert_attention(
checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim
)
current_layer += 1
if i != len(down_block_types) - 1:
new_prefix = f"down_blocks.{i}.downsamplers.0"
old_prefix = f"input_blocks.{current_layer}.0"
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix)
current_layer += 1
prev_channels = current_channels
# hardcoded the mid-block for now
new_prefix = "mid_block.resnets.0"
old_prefix = "middle_block.0"
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix)
new_prefix = "mid_block.attentions.0"
old_prefix = "middle_block.1"
new_checkpoint = convert_attention(checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim)
new_prefix = "mid_block.resnets.1"
old_prefix = "middle_block.2"
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix)
current_layer = 0
up_block_types = unet_config["up_block_types"]
for i, layer_type in enumerate(up_block_types):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1):
new_prefix = f"up_blocks.{i}.resnets.{j}"
old_prefix = f"output_blocks.{current_layer}.0"
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=True)
current_layer += 1
if i != len(up_block_types) - 1:
new_prefix = f"up_blocks.{i}.upsamplers.0"
old_prefix = f"output_blocks.{current_layer-1}.1"
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix)
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1):
new_prefix = f"up_blocks.{i}.resnets.{j}"
old_prefix = f"output_blocks.{current_layer}.0"
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix, has_skip=True)
new_prefix = f"up_blocks.{i}.attentions.{j}"
old_prefix = f"output_blocks.{current_layer}.1"
new_checkpoint = convert_attention(
checkpoint, new_checkpoint, old_prefix, new_prefix, attention_head_dim
)
current_layer += 1
if i != len(up_block_types) - 1:
new_prefix = f"up_blocks.{i}.upsamplers.0"
old_prefix = f"output_blocks.{current_layer-1}.2"
new_checkpoint = convert_resnet(checkpoint, new_checkpoint, old_prefix, new_prefix)
new_checkpoint["conv_norm_out.weight"] = checkpoint["out.0.weight"]
new_checkpoint["conv_norm_out.bias"] = checkpoint["out.0.bias"]
new_checkpoint["conv_out.weight"] = checkpoint["out.2.weight"]
new_checkpoint["conv_out.bias"] = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
args = parser.parse_args()
args.class_cond = str2bool(args.class_cond)
ckpt_name = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
unet_config = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
unet_config = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
unet_config = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
unet_config["num_class_embeds"] = None
converted_unet_ckpt = con_pt_to_diffuser(args.unet_path, unet_config)
image_unet = UNet2DModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
scheduler_config = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
scheduler_config = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
scheduler_config = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
cm_scheduler = CMStochasticIterativeScheduler(**scheduler_config)
consistency_model = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| diffusers/scripts/convert_consistency_to_diffusers.py/0 | {
"file_path": "diffusers/scripts/convert_consistency_to_diffusers.py",
"repo_id": "diffusers",
"token_count": 5773
} |
import argparse
import huggingface_hub
import k_diffusion as K
import torch
from diffusers import UNet2DConditionModel
UPSCALER_REPO = "pcuenq/k-upscaler"
def resnet_to_diffusers_checkpoint(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix):
rv = {
# norm1
f"{diffusers_resnet_prefix}.norm1.linear.weight": checkpoint[f"{resnet_prefix}.main.0.mapper.weight"],
f"{diffusers_resnet_prefix}.norm1.linear.bias": checkpoint[f"{resnet_prefix}.main.0.mapper.bias"],
# conv1
f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.main.2.weight"],
f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.main.2.bias"],
# norm2
f"{diffusers_resnet_prefix}.norm2.linear.weight": checkpoint[f"{resnet_prefix}.main.4.mapper.weight"],
f"{diffusers_resnet_prefix}.norm2.linear.bias": checkpoint[f"{resnet_prefix}.main.4.mapper.bias"],
# conv2
f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.main.6.weight"],
f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.main.6.bias"],
}
if resnet.conv_shortcut is not None:
rv.update(
{
f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.skip.weight"],
}
)
return rv
def self_attn_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix):
weight_q, weight_k, weight_v = checkpoint[f"{attention_prefix}.qkv_proj.weight"].chunk(3, dim=0)
bias_q, bias_k, bias_v = checkpoint[f"{attention_prefix}.qkv_proj.bias"].chunk(3, dim=0)
rv = {
# norm
f"{diffusers_attention_prefix}.norm1.linear.weight": checkpoint[f"{attention_prefix}.norm_in.mapper.weight"],
f"{diffusers_attention_prefix}.norm1.linear.bias": checkpoint[f"{attention_prefix}.norm_in.mapper.bias"],
# to_q
f"{diffusers_attention_prefix}.attn1.to_q.weight": weight_q.squeeze(-1).squeeze(-1),
f"{diffusers_attention_prefix}.attn1.to_q.bias": bias_q,
# to_k
f"{diffusers_attention_prefix}.attn1.to_k.weight": weight_k.squeeze(-1).squeeze(-1),
f"{diffusers_attention_prefix}.attn1.to_k.bias": bias_k,
# to_v
f"{diffusers_attention_prefix}.attn1.to_v.weight": weight_v.squeeze(-1).squeeze(-1),
f"{diffusers_attention_prefix}.attn1.to_v.bias": bias_v,
# to_out
f"{diffusers_attention_prefix}.attn1.to_out.0.weight": checkpoint[f"{attention_prefix}.out_proj.weight"]
.squeeze(-1)
.squeeze(-1),
f"{diffusers_attention_prefix}.attn1.to_out.0.bias": checkpoint[f"{attention_prefix}.out_proj.bias"],
}
return rv
def cross_attn_to_diffusers_checkpoint(
checkpoint, *, diffusers_attention_prefix, diffusers_attention_index, attention_prefix
):
weight_k, weight_v = checkpoint[f"{attention_prefix}.kv_proj.weight"].chunk(2, dim=0)
bias_k, bias_v = checkpoint[f"{attention_prefix}.kv_proj.bias"].chunk(2, dim=0)
rv = {
# norm2 (ada groupnorm)
f"{diffusers_attention_prefix}.norm{diffusers_attention_index}.linear.weight": checkpoint[
f"{attention_prefix}.norm_dec.mapper.weight"
],
f"{diffusers_attention_prefix}.norm{diffusers_attention_index}.linear.bias": checkpoint[
f"{attention_prefix}.norm_dec.mapper.bias"
],
# layernorm on encoder_hidden_state
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.norm_cross.weight": checkpoint[
f"{attention_prefix}.norm_enc.weight"
],
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.norm_cross.bias": checkpoint[
f"{attention_prefix}.norm_enc.bias"
],
# to_q
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_q.weight": checkpoint[
f"{attention_prefix}.q_proj.weight"
]
.squeeze(-1)
.squeeze(-1),
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_q.bias": checkpoint[
f"{attention_prefix}.q_proj.bias"
],
# to_k
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_k.weight": weight_k.squeeze(-1).squeeze(-1),
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_k.bias": bias_k,
# to_v
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_v.weight": weight_v.squeeze(-1).squeeze(-1),
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_v.bias": bias_v,
# to_out
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_out.0.weight": checkpoint[
f"{attention_prefix}.out_proj.weight"
]
.squeeze(-1)
.squeeze(-1),
f"{diffusers_attention_prefix}.attn{diffusers_attention_index}.to_out.0.bias": checkpoint[
f"{attention_prefix}.out_proj.bias"
],
}
return rv
def block_to_diffusers_checkpoint(block, checkpoint, block_idx, block_type):
block_prefix = "inner_model.u_net.u_blocks" if block_type == "up" else "inner_model.u_net.d_blocks"
block_prefix = f"{block_prefix}.{block_idx}"
diffusers_checkpoint = {}
if not hasattr(block, "attentions"):
n = 1 # resnet only
elif not block.attentions[0].add_self_attention:
n = 2 # resnet -> cross-attention
else:
n = 3 # resnet -> self-attention -> cross-attention)
for resnet_idx, resnet in enumerate(block.resnets):
# diffusers_resnet_prefix = f"{diffusers_up_block_prefix}.resnets.{resnet_idx}"
diffusers_resnet_prefix = f"{block_type}_blocks.{block_idx}.resnets.{resnet_idx}"
idx = n * resnet_idx if block_type == "up" else n * resnet_idx + 1
resnet_prefix = f"{block_prefix}.{idx}" if block_type == "up" else f"{block_prefix}.{idx}"
diffusers_checkpoint.update(
resnet_to_diffusers_checkpoint(
resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix
)
)
if hasattr(block, "attentions"):
for attention_idx, attention in enumerate(block.attentions):
diffusers_attention_prefix = f"{block_type}_blocks.{block_idx}.attentions.{attention_idx}"
idx = n * attention_idx + 1 if block_type == "up" else n * attention_idx + 2
self_attention_prefix = f"{block_prefix}.{idx}"
cross_attention_prefix = f"{block_prefix}.{idx }"
cross_attention_index = 1 if not attention.add_self_attention else 2
idx = (
n * attention_idx + cross_attention_index
if block_type == "up"
else n * attention_idx + cross_attention_index + 1
)
cross_attention_prefix = f"{block_prefix}.{idx }"
diffusers_checkpoint.update(
cross_attn_to_diffusers_checkpoint(
checkpoint,
diffusers_attention_prefix=diffusers_attention_prefix,
diffusers_attention_index=2,
attention_prefix=cross_attention_prefix,
)
)
if attention.add_self_attention is True:
diffusers_checkpoint.update(
self_attn_to_diffusers_checkpoint(
checkpoint,
diffusers_attention_prefix=diffusers_attention_prefix,
attention_prefix=self_attention_prefix,
)
)
return diffusers_checkpoint
def unet_to_diffusers_checkpoint(model, checkpoint):
diffusers_checkpoint = {}
# pre-processing
diffusers_checkpoint.update(
{
"conv_in.weight": checkpoint["inner_model.proj_in.weight"],
"conv_in.bias": checkpoint["inner_model.proj_in.bias"],
}
)
# timestep and class embedding
diffusers_checkpoint.update(
{
"time_proj.weight": checkpoint["inner_model.timestep_embed.weight"].squeeze(-1),
"time_embedding.linear_1.weight": checkpoint["inner_model.mapping.0.weight"],
"time_embedding.linear_1.bias": checkpoint["inner_model.mapping.0.bias"],
"time_embedding.linear_2.weight": checkpoint["inner_model.mapping.2.weight"],
"time_embedding.linear_2.bias": checkpoint["inner_model.mapping.2.bias"],
"time_embedding.cond_proj.weight": checkpoint["inner_model.mapping_cond.weight"],
}
)
# down_blocks
for down_block_idx, down_block in enumerate(model.down_blocks):
diffusers_checkpoint.update(block_to_diffusers_checkpoint(down_block, checkpoint, down_block_idx, "down"))
# up_blocks
for up_block_idx, up_block in enumerate(model.up_blocks):
diffusers_checkpoint.update(block_to_diffusers_checkpoint(up_block, checkpoint, up_block_idx, "up"))
# post-processing
diffusers_checkpoint.update(
{
"conv_out.weight": checkpoint["inner_model.proj_out.weight"],
"conv_out.bias": checkpoint["inner_model.proj_out.bias"],
}
)
return diffusers_checkpoint
def unet_model_from_original_config(original_config):
in_channels = original_config["input_channels"] + original_config["unet_cond_dim"]
out_channels = original_config["input_channels"] + (1 if original_config["has_variance"] else 0)
block_out_channels = original_config["channels"]
assert (
len(set(original_config["depths"])) == 1
), "UNet2DConditionModel currently do not support blocks with different number of layers"
layers_per_block = original_config["depths"][0]
class_labels_dim = original_config["mapping_cond_dim"]
cross_attention_dim = original_config["cross_cond_dim"]
attn1_types = []
attn2_types = []
for s, c in zip(original_config["self_attn_depths"], original_config["cross_attn_depths"]):
if s:
a1 = "self"
a2 = "cross" if c else None
elif c:
a1 = "cross"
a2 = None
else:
a1 = None
a2 = None
attn1_types.append(a1)
attn2_types.append(a2)
unet = UNet2DConditionModel(
in_channels=in_channels,
out_channels=out_channels,
down_block_types=("KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D"),
mid_block_type=None,
up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
act_fn="gelu",
norm_num_groups=None,
cross_attention_dim=cross_attention_dim,
attention_head_dim=64,
time_cond_proj_dim=class_labels_dim,
resnet_time_scale_shift="scale_shift",
time_embedding_type="fourier",
timestep_post_act="gelu",
conv_in_kernel=1,
conv_out_kernel=1,
)
return unet
def main(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
orig_config_path = huggingface_hub.hf_hub_download(UPSCALER_REPO, "config_laion_text_cond_latent_upscaler_2.json")
orig_weights_path = huggingface_hub.hf_hub_download(
UPSCALER_REPO, "laion_text_cond_latent_upscaler_2_1_00470000_slim.pth"
)
print(f"loading original model configuration from {orig_config_path}")
print(f"loading original model checkpoint from {orig_weights_path}")
print("converting to diffusers unet")
orig_config = K.config.load_config(open(orig_config_path))["model"]
model = unet_model_from_original_config(orig_config)
orig_checkpoint = torch.load(orig_weights_path, map_location=device)["model_ema"]
converted_checkpoint = unet_to_diffusers_checkpoint(model, orig_checkpoint)
model.load_state_dict(converted_checkpoint, strict=True)
model.save_pretrained(args.dump_path)
print(f"saving converted unet model in {args.dump_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
args = parser.parse_args()
main(args)
| diffusers/scripts/convert_k_upscaler_to_diffusers.py/0 | {
"file_path": "diffusers/scripts/convert_k_upscaler_to_diffusers.py",
"repo_id": "diffusers",
"token_count": 5645
} |
import argparse
import safetensors.torch
from diffusers import AutoencoderTiny
"""
Example - From the diffusers root directory:
Download the weights:
```sh
$ wget -q https://huggingface.co/madebyollin/taesd/resolve/main/taesd_encoder.safetensors
$ wget -q https://huggingface.co/madebyollin/taesd/resolve/main/taesd_decoder.safetensors
```
Convert the model:
```sh
$ python scripts/convert_tiny_autoencoder_to_diffusers.py \
--encoder_ckpt_path taesd_encoder.safetensors \
--decoder_ckpt_path taesd_decoder.safetensors \
--dump_path taesd-diffusers
```
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--encoder_ckpt_path",
default=None,
type=str,
required=True,
help="Path to the encoder ckpt.",
)
parser.add_argument(
"--decoder_ckpt_path",
default=None,
type=str,
required=True,
help="Path to the decoder ckpt.",
)
parser.add_argument(
"--use_safetensors", action="store_true", help="Whether to serialize in the safetensors format."
)
args = parser.parse_args()
print("Loading the original state_dicts of the encoder and the decoder...")
encoder_state_dict = safetensors.torch.load_file(args.encoder_ckpt_path)
decoder_state_dict = safetensors.torch.load_file(args.decoder_ckpt_path)
print("Populating the state_dicts in the diffusers format...")
tiny_autoencoder = AutoencoderTiny()
new_state_dict = {}
# Modify the encoder state dict.
for k in encoder_state_dict:
new_state_dict.update({f"encoder.layers.{k}": encoder_state_dict[k]})
# Modify the decoder state dict.
for k in decoder_state_dict:
layer_id = int(k.split(".")[0]) - 1
new_k = str(layer_id) + "." + ".".join(k.split(".")[1:])
new_state_dict.update({f"decoder.layers.{new_k}": decoder_state_dict[k]})
# Assertion tests with the original implementation can be found here:
# https://gist.github.com/sayakpaul/337b0988f08bd2cf2b248206f760e28f
tiny_autoencoder.load_state_dict(new_state_dict)
print("Population successful, serializing...")
tiny_autoencoder.save_pretrained(args.dump_path, safe_serialization=args.use_safetensors)
| diffusers/scripts/convert_tiny_autoencoder_to_diffusers.py/0 | {
"file_path": "diffusers/scripts/convert_tiny_autoencoder_to_diffusers.py",
"repo_id": "diffusers",
"token_count": 990
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import subprocess
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_bitsandbytes_available,
is_flax_available,
is_google_colab,
is_peft_available,
is_safetensors_available,
is_torch_available,
is_transformers_available,
is_xformers_available,
)
from . import BaseDiffusersCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseDiffusersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser) -> None:
download_parser = parser.add_parser("env")
download_parser.set_defaults(func=info_command_factory)
def run(self) -> dict:
hub_version = huggingface_hub.__version__
safetensors_version = "not installed"
if is_safetensors_available():
import safetensors
safetensors_version = safetensors.__version__
pt_version = "not installed"
pt_cuda_available = "NA"
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
flax_version = "not installed"
jax_version = "not installed"
jaxlib_version = "not installed"
jax_backend = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
flax_version = flax.__version__
jax_version = jax.__version__
jaxlib_version = jaxlib.__version__
jax_backend = jax.lib.xla_bridge.get_backend().platform
transformers_version = "not installed"
if is_transformers_available():
import transformers
transformers_version = transformers.__version__
accelerate_version = "not installed"
if is_accelerate_available():
import accelerate
accelerate_version = accelerate.__version__
peft_version = "not installed"
if is_peft_available():
import peft
peft_version = peft.__version__
bitsandbytes_version = "not installed"
if is_bitsandbytes_available():
import bitsandbytes
bitsandbytes_version = bitsandbytes.__version__
xformers_version = "not installed"
if is_xformers_available():
import xformers
xformers_version = xformers.__version__
platform_info = platform.platform()
is_google_colab_str = "Yes" if is_google_colab() else "No"
accelerator = "NA"
if platform.system() in {"Linux", "Windows"}:
try:
sp = subprocess.Popen(
["nvidia-smi", "--query-gpu=gpu_name,memory.total", "--format=csv,noheader"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out_str, _ = sp.communicate()
out_str = out_str.decode("utf-8")
if len(out_str) > 0:
accelerator = out_str.strip()
except FileNotFoundError:
pass
elif platform.system() == "Darwin": # Mac OS
try:
sp = subprocess.Popen(
["system_profiler", "SPDisplaysDataType"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out_str, _ = sp.communicate()
out_str = out_str.decode("utf-8")
start = out_str.find("Chipset Model:")
if start != -1:
start += len("Chipset Model:")
end = out_str.find("\n", start)
accelerator = out_str[start:end].strip()
start = out_str.find("VRAM (Total):")
if start != -1:
start += len("VRAM (Total):")
end = out_str.find("\n", start)
accelerator += " VRAM: " + out_str[start:end].strip()
except FileNotFoundError:
pass
else:
print("It seems you are running an unusual OS. Could you fill in the accelerator manually?")
info = {
"🤗 Diffusers version": version,
"Platform": platform_info,
"Running on Google Colab?": is_google_colab_str,
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
"Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})",
"Jax version": jax_version,
"JaxLib version": jaxlib_version,
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"PEFT version": peft_version,
"Bitsandbytes version": bitsandbytes_version,
"Safetensors version": safetensors_version,
"xFormers version": xformers_version,
"Accelerator": accelerator,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d: dict) -> str:
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
| diffusers/src/diffusers/commands/env.py/0 | {
"file_path": "diffusers/src/diffusers/commands/env.py",
"repo_id": "diffusers",
"token_count": 2858
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import os
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import safetensors
import torch
import torch.nn as nn
from huggingface_hub import model_info
from huggingface_hub.constants import HF_HUB_OFFLINE
from ..models.modeling_utils import ModelMixin, load_state_dict
from ..utils import (
USE_PEFT_BACKEND,
_get_model_file,
convert_state_dict_to_diffusers,
convert_state_dict_to_peft,
delete_adapter_layers,
deprecate,
get_adapter_name,
get_peft_kwargs,
is_accelerate_available,
is_peft_available,
is_peft_version,
is_transformers_available,
is_transformers_version,
logging,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
)
if is_transformers_available():
from transformers import PreTrainedModel
from ..models.lora import text_encoder_attn_modules, text_encoder_mlp_modules
if is_peft_available():
from peft.tuners.tuners_utils import BaseTunerLayer
if is_accelerate_available():
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
logger = logging.get_logger(__name__)
LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, adapter_names=None):
"""
Fuses LoRAs for the text encoder.
Args:
text_encoder (`torch.nn.Module`):
The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
attribute.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]` or `str`):
The names of the adapters to use.
"""
merge_kwargs = {"safe_merge": safe_fusing}
for module in text_encoder.modules():
if isinstance(module, BaseTunerLayer):
if lora_scale != 1.0:
module.scale_layer(lora_scale)
# For BC with previous PEFT versions, we need to check the signature
# of the `merge` method to see if it supports the `adapter_names` argument.
supported_merge_kwargs = list(inspect.signature(module.merge).parameters)
if "adapter_names" in supported_merge_kwargs:
merge_kwargs["adapter_names"] = adapter_names
elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None:
raise ValueError(
"The `adapter_names` argument is not supported with your PEFT version. "
"Please upgrade to the latest version of PEFT. `pip install -U peft`"
)
module.merge(**merge_kwargs)
def unfuse_text_encoder_lora(text_encoder):
"""
Unfuses LoRAs for the text encoder.
Args:
text_encoder (`torch.nn.Module`):
The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
attribute.
"""
for module in text_encoder.modules():
if isinstance(module, BaseTunerLayer):
module.unmerge()
def set_adapters_for_text_encoder(
adapter_names: Union[List[str], str],
text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
text_encoder_weights: Optional[Union[float, List[float], List[None]]] = None,
):
"""
Sets the adapter layers for the text encoder.
Args:
adapter_names (`List[str]` or `str`):
The names of the adapters to use.
text_encoder (`torch.nn.Module`, *optional*):
The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
attribute.
text_encoder_weights (`List[float]`, *optional*):
The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters.
"""
if text_encoder is None:
raise ValueError(
"The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead."
)
def process_weights(adapter_names, weights):
# Expand weights into a list, one entry per adapter
# e.g. for 2 adapters: 7 -> [7,7] ; [3, None] -> [3, None]
if not isinstance(weights, list):
weights = [weights] * len(adapter_names)
if len(adapter_names) != len(weights):
raise ValueError(
f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}"
)
# Set None values to default of 1.0
# e.g. [7,7] -> [7,7] ; [3, None] -> [3,1]
weights = [w if w is not None else 1.0 for w in weights]
return weights
adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
text_encoder_weights = process_weights(adapter_names, text_encoder_weights)
set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights)
def disable_lora_for_text_encoder(text_encoder: Optional["PreTrainedModel"] = None):
"""
Disables the LoRA layers for the text encoder.
Args:
text_encoder (`torch.nn.Module`, *optional*):
The text encoder module to disable the LoRA layers for. If `None`, it will try to get the `text_encoder`
attribute.
"""
if text_encoder is None:
raise ValueError("Text Encoder not found.")
set_adapter_layers(text_encoder, enabled=False)
def enable_lora_for_text_encoder(text_encoder: Optional["PreTrainedModel"] = None):
"""
Enables the LoRA layers for the text encoder.
Args:
text_encoder (`torch.nn.Module`, *optional*):
The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder`
attribute.
"""
if text_encoder is None:
raise ValueError("Text Encoder not found.")
set_adapter_layers(text_encoder, enabled=True)
def _remove_text_encoder_monkey_patch(text_encoder):
recurse_remove_peft_layers(text_encoder)
if getattr(text_encoder, "peft_config", None) is not None:
del text_encoder.peft_config
text_encoder._hf_peft_config_loaded = None
def _fetch_state_dict(
pretrained_model_name_or_path_or_dict,
weight_name,
use_safetensors,
local_files_only,
cache_dir,
force_download,
proxies,
token,
revision,
subfolder,
user_agent,
allow_pickle,
):
model_file = None
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
# Let's first try to load .safetensors weights
if (use_safetensors and weight_name is None) or (
weight_name is not None and weight_name.endswith(".safetensors")
):
try:
# Here we're relaxing the loading check to enable more Inference API
# friendliness where sometimes, it's not at all possible to automatically
# determine `weight_name`.
if weight_name is None:
weight_name = _best_guess_weight_name(
pretrained_model_name_or_path_or_dict,
file_extension=".safetensors",
local_files_only=local_files_only,
)
model_file = _get_model_file(
pretrained_model_name_or_path_or_dict,
weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
subfolder=subfolder,
user_agent=user_agent,
)
state_dict = safetensors.torch.load_file(model_file, device="cpu")
except (IOError, safetensors.SafetensorError) as e:
if not allow_pickle:
raise e
# try loading non-safetensors weights
model_file = None
pass
if model_file is None:
if weight_name is None:
weight_name = _best_guess_weight_name(
pretrained_model_name_or_path_or_dict, file_extension=".bin", local_files_only=local_files_only
)
model_file = _get_model_file(
pretrained_model_name_or_path_or_dict,
weights_name=weight_name or LORA_WEIGHT_NAME,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
subfolder=subfolder,
user_agent=user_agent,
)
state_dict = load_state_dict(model_file)
else:
state_dict = pretrained_model_name_or_path_or_dict
return state_dict
def _best_guess_weight_name(
pretrained_model_name_or_path_or_dict, file_extension=".safetensors", local_files_only=False
):
if local_files_only or HF_HUB_OFFLINE:
raise ValueError("When using the offline mode, you must specify a `weight_name`.")
targeted_files = []
if os.path.isfile(pretrained_model_name_or_path_or_dict):
return
elif os.path.isdir(pretrained_model_name_or_path_or_dict):
targeted_files = [f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension)]
else:
files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings
targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)]
if len(targeted_files) == 0:
return
# "scheduler" does not correspond to a LoRA checkpoint.
# "optimizer" does not correspond to a LoRA checkpoint
# only top-level checkpoints are considered and not the other ones, hence "checkpoint".
unallowed_substrings = {"scheduler", "optimizer", "checkpoint"}
targeted_files = list(
filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files)
)
if any(f.endswith(LORA_WEIGHT_NAME) for f in targeted_files):
targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME), targeted_files))
elif any(f.endswith(LORA_WEIGHT_NAME_SAFE) for f in targeted_files):
targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME_SAFE), targeted_files))
if len(targeted_files) > 1:
raise ValueError(
f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one `.safetensors` or `.bin` file in {pretrained_model_name_or_path_or_dict}."
)
weight_name = targeted_files[0]
return weight_name
def _load_lora_into_text_encoder(
state_dict,
network_alphas,
text_encoder,
prefix=None,
lora_scale=1.0,
text_encoder_name="text_encoder",
adapter_name=None,
_pipeline=None,
low_cpu_mem_usage=False,
):
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
peft_kwargs = {}
if low_cpu_mem_usage:
if not is_peft_version(">=", "0.13.1"):
raise ValueError(
"`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`."
)
if not is_transformers_version(">", "4.45.2"):
# Note from sayakpaul: It's not in `transformers` stable yet.
# https://github.com/huggingface/transformers/pull/33725/
raise ValueError(
"`low_cpu_mem_usage=True` is not compatible with this `transformers` version. Please update it with `pip install -U transformers`."
)
peft_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
from peft import LoraConfig
# If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
# then the `state_dict` keys should have `unet_name` and/or `text_encoder_name` as
# their prefixes.
keys = list(state_dict.keys())
prefix = text_encoder_name if prefix is None else prefix
# Safe prefix to check with.
if any(text_encoder_name in key for key in keys):
# Load the layers corresponding to text encoder and make necessary adjustments.
text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix]
text_encoder_lora_state_dict = {
k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys
}
if len(text_encoder_lora_state_dict) > 0:
logger.info(f"Loading {prefix}.")
rank = {}
text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict)
# convert state dict
text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict)
for name, _ in text_encoder_attn_modules(text_encoder):
for module in ("out_proj", "q_proj", "k_proj", "v_proj"):
rank_key = f"{name}.{module}.lora_B.weight"
if rank_key not in text_encoder_lora_state_dict:
continue
rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]
for name, _ in text_encoder_mlp_modules(text_encoder):
for module in ("fc1", "fc2"):
rank_key = f"{name}.{module}.lora_B.weight"
if rank_key not in text_encoder_lora_state_dict:
continue
rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]
if network_alphas is not None:
alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix]
network_alphas = {k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys}
lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False)
if "use_dora" in lora_config_kwargs:
if lora_config_kwargs["use_dora"]:
if is_peft_version("<", "0.9.0"):
raise ValueError(
"You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`."
)
else:
if is_peft_version("<", "0.9.0"):
lora_config_kwargs.pop("use_dora")
if "lora_bias" in lora_config_kwargs:
if lora_config_kwargs["lora_bias"]:
if is_peft_version("<=", "0.13.2"):
raise ValueError(
"You need `peft` 0.14.0 at least to use `bias` in LoRAs. Please upgrade your installation of `peft`."
)
else:
if is_peft_version("<=", "0.13.2"):
lora_config_kwargs.pop("lora_bias")
lora_config = LoraConfig(**lora_config_kwargs)
# adapter_name
if adapter_name is None:
adapter_name = get_adapter_name(text_encoder)
is_model_cpu_offload, is_sequential_cpu_offload = _func_optionally_disable_offloading(_pipeline)
# inject LoRA layers and load the state dict
# in transformers we automatically check whether the adapter name is already in use or not
text_encoder.load_adapter(
adapter_name=adapter_name,
adapter_state_dict=text_encoder_lora_state_dict,
peft_config=lora_config,
**peft_kwargs,
)
# scale LoRA layers with `lora_scale`
scale_lora_layers(text_encoder, weight=lora_scale)
text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype)
# Offload back.
if is_model_cpu_offload:
_pipeline.enable_model_cpu_offload()
elif is_sequential_cpu_offload:
_pipeline.enable_sequential_cpu_offload()
# Unsafe code />
def _func_optionally_disable_offloading(_pipeline):
is_model_cpu_offload = False
is_sequential_cpu_offload = False
if _pipeline is not None and _pipeline.hf_device_map is None:
for _, component in _pipeline.components.items():
if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"):
if not is_model_cpu_offload:
is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload)
if not is_sequential_cpu_offload:
is_sequential_cpu_offload = (
isinstance(component._hf_hook, AlignDevicesHook)
or hasattr(component._hf_hook, "hooks")
and isinstance(component._hf_hook.hooks[0], AlignDevicesHook)
)
logger.info(
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
)
remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
return (is_model_cpu_offload, is_sequential_cpu_offload)
class LoraBaseMixin:
"""Utility class for handling LoRAs."""
_lora_loadable_modules = []
num_fused_loras = 0
def load_lora_weights(self, **kwargs):
raise NotImplementedError("`load_lora_weights()` is not implemented.")
@classmethod
def save_lora_weights(cls, **kwargs):
raise NotImplementedError("`save_lora_weights()` not implemented.")
@classmethod
def lora_state_dict(cls, **kwargs):
raise NotImplementedError("`lora_state_dict()` is not implemented.")
@classmethod
def _optionally_disable_offloading(cls, _pipeline):
"""
Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU.
Args:
_pipeline (`DiffusionPipeline`):
The pipeline to disable offloading for.
Returns:
tuple:
A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True.
"""
return _func_optionally_disable_offloading(_pipeline=_pipeline)
@classmethod
def _fetch_state_dict(cls, *args, **kwargs):
deprecation_message = f"Using the `_fetch_state_dict()` method from {cls} has been deprecated and will be removed in a future version. Please use `from diffusers.loaders.lora_base import _fetch_state_dict`."
deprecate("_fetch_state_dict", "0.35.0", deprecation_message)
return _fetch_state_dict(*args, **kwargs)
@classmethod
def _best_guess_weight_name(cls, *args, **kwargs):
deprecation_message = f"Using the `_best_guess_weight_name()` method from {cls} has been deprecated and will be removed in a future version. Please use `from diffusers.loaders.lora_base import _best_guess_weight_name`."
deprecate("_best_guess_weight_name", "0.35.0", deprecation_message)
return _best_guess_weight_name(*args, **kwargs)
def unload_lora_weights(self):
"""
Unloads the LoRA parameters.
Examples:
```python
>>> # Assuming `pipeline` is already loaded with the LoRA parameters.
>>> pipeline.unload_lora_weights()
>>> ...
```
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
for component in self._lora_loadable_modules:
model = getattr(self, component, None)
if model is not None:
if issubclass(model.__class__, ModelMixin):
model.unload_lora()
elif issubclass(model.__class__, PreTrainedModel):
_remove_text_encoder_monkey_patch(model)
def fuse_lora(
self,
components: List[str] = [],
lora_scale: float = 1.0,
safe_fusing: bool = False,
adapter_names: Optional[List[str]] = None,
**kwargs,
):
r"""
Fuses the LoRA parameters into the original parameters of the corresponding blocks.
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
lora_scale (`float`, defaults to 1.0):
Controls how much to influence the outputs with the LoRA parameters.
safe_fusing (`bool`, defaults to `False`):
Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
adapter_names (`List[str]`, *optional*):
Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.
Example:
```py
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
).to("cuda")
pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipeline.fuse_lora(lora_scale=0.7)
```
"""
if "fuse_unet" in kwargs:
depr_message = "Passing `fuse_unet` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_unet` will be removed in a future version."
deprecate(
"fuse_unet",
"1.0.0",
depr_message,
)
if "fuse_transformer" in kwargs:
depr_message = "Passing `fuse_transformer` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_transformer` will be removed in a future version."
deprecate(
"fuse_transformer",
"1.0.0",
depr_message,
)
if "fuse_text_encoder" in kwargs:
depr_message = "Passing `fuse_text_encoder` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_text_encoder` will be removed in a future version."
deprecate(
"fuse_text_encoder",
"1.0.0",
depr_message,
)
if len(components) == 0:
raise ValueError("`components` cannot be an empty list.")
for fuse_component in components:
if fuse_component not in self._lora_loadable_modules:
raise ValueError(f"{fuse_component} is not found in {self._lora_loadable_modules=}.")
model = getattr(self, fuse_component, None)
if model is not None:
# check if diffusers model
if issubclass(model.__class__, ModelMixin):
model.fuse_lora(lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names)
# handle transformers models.
if issubclass(model.__class__, PreTrainedModel):
fuse_text_encoder_lora(
model, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names
)
self.num_fused_loras += 1
def unfuse_lora(self, components: List[str] = [], **kwargs):
r"""
Reverses the effect of
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
<Tip warning={true}>
This is an experimental API.
</Tip>
Args:
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
unfuse_text_encoder (`bool`, defaults to `True`):
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
LoRA parameters then it won't have any effect.
"""
if "unfuse_unet" in kwargs:
depr_message = "Passing `unfuse_unet` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_unet` will be removed in a future version."
deprecate(
"unfuse_unet",
"1.0.0",
depr_message,
)
if "unfuse_transformer" in kwargs:
depr_message = "Passing `unfuse_transformer` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_transformer` will be removed in a future version."
deprecate(
"unfuse_transformer",
"1.0.0",
depr_message,
)
if "unfuse_text_encoder" in kwargs:
depr_message = "Passing `unfuse_text_encoder` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_text_encoder` will be removed in a future version."
deprecate(
"unfuse_text_encoder",
"1.0.0",
depr_message,
)
if len(components) == 0:
raise ValueError("`components` cannot be an empty list.")
for fuse_component in components:
if fuse_component not in self._lora_loadable_modules:
raise ValueError(f"{fuse_component} is not found in {self._lora_loadable_modules=}.")
model = getattr(self, fuse_component, None)
if model is not None:
if issubclass(model.__class__, (ModelMixin, PreTrainedModel)):
for module in model.modules():
if isinstance(module, BaseTunerLayer):
module.unmerge()
self.num_fused_loras -= 1
def set_adapters(
self,
adapter_names: Union[List[str], str],
adapter_weights: Optional[Union[float, Dict, List[float], List[Dict]]] = None,
):
adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
adapter_weights = copy.deepcopy(adapter_weights)
# Expand weights into a list, one entry per adapter
if not isinstance(adapter_weights, list):
adapter_weights = [adapter_weights] * len(adapter_names)
if len(adapter_names) != len(adapter_weights):
raise ValueError(
f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(adapter_weights)}"
)
list_adapters = self.get_list_adapters() # eg {"unet": ["adapter1", "adapter2"], "text_encoder": ["adapter2"]}
# eg ["adapter1", "adapter2"]
all_adapters = {adapter for adapters in list_adapters.values() for adapter in adapters}
missing_adapters = set(adapter_names) - all_adapters
if len(missing_adapters) > 0:
raise ValueError(
f"Adapter name(s) {missing_adapters} not in the list of present adapters: {all_adapters}."
)
# eg {"adapter1": ["unet"], "adapter2": ["unet", "text_encoder"]}
invert_list_adapters = {
adapter: [part for part, adapters in list_adapters.items() if adapter in adapters]
for adapter in all_adapters
}
# Decompose weights into weights for denoiser and text encoders.
_component_adapter_weights = {}
for component in self._lora_loadable_modules:
model = getattr(self, component)
for adapter_name, weights in zip(adapter_names, adapter_weights):
if isinstance(weights, dict):
component_adapter_weights = weights.pop(component, None)
if component_adapter_weights is not None and not hasattr(self, component):
logger.warning(
f"Lora weight dict contains {component} weights but will be ignored because pipeline does not have {component}."
)
if component_adapter_weights is not None and component not in invert_list_adapters[adapter_name]:
logger.warning(
(
f"Lora weight dict for adapter '{adapter_name}' contains {component},"
f"but this will be ignored because {adapter_name} does not contain weights for {component}."
f"Valid parts for {adapter_name} are: {invert_list_adapters[adapter_name]}."
)
)
else:
component_adapter_weights = weights
_component_adapter_weights.setdefault(component, [])
_component_adapter_weights[component].append(component_adapter_weights)
if issubclass(model.__class__, ModelMixin):
model.set_adapters(adapter_names, _component_adapter_weights[component])
elif issubclass(model.__class__, PreTrainedModel):
set_adapters_for_text_encoder(adapter_names, model, _component_adapter_weights[component])
def disable_lora(self):
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
for component in self._lora_loadable_modules:
model = getattr(self, component, None)
if model is not None:
if issubclass(model.__class__, ModelMixin):
model.disable_lora()
elif issubclass(model.__class__, PreTrainedModel):
disable_lora_for_text_encoder(model)
def enable_lora(self):
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
for component in self._lora_loadable_modules:
model = getattr(self, component, None)
if model is not None:
if issubclass(model.__class__, ModelMixin):
model.enable_lora()
elif issubclass(model.__class__, PreTrainedModel):
enable_lora_for_text_encoder(model)
def delete_adapters(self, adapter_names: Union[List[str], str]):
"""
Args:
Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s).
adapter_names (`Union[List[str], str]`):
The names of the adapter to delete. Can be a single string or a list of strings
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
if isinstance(adapter_names, str):
adapter_names = [adapter_names]
for component in self._lora_loadable_modules:
model = getattr(self, component, None)
if model is not None:
if issubclass(model.__class__, ModelMixin):
model.delete_adapters(adapter_names)
elif issubclass(model.__class__, PreTrainedModel):
for adapter_name in adapter_names:
delete_adapter_layers(model, adapter_name)
def get_active_adapters(self) -> List[str]:
"""
Gets the list of the current active adapters.
Example:
```python
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
).to("cuda")
pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
pipeline.get_active_adapters()
```
"""
if not USE_PEFT_BACKEND:
raise ValueError(
"PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
)
active_adapters = []
for component in self._lora_loadable_modules:
model = getattr(self, component, None)
if model is not None and issubclass(model.__class__, ModelMixin):
for module in model.modules():
if isinstance(module, BaseTunerLayer):
active_adapters = module.active_adapters
break
return active_adapters
def get_list_adapters(self) -> Dict[str, List[str]]:
"""
Gets the current list of all available adapters in the pipeline.
"""
if not USE_PEFT_BACKEND:
raise ValueError(
"PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`"
)
set_adapters = {}
for component in self._lora_loadable_modules:
model = getattr(self, component, None)
if (
model is not None
and issubclass(model.__class__, (ModelMixin, PreTrainedModel))
and hasattr(model, "peft_config")
):
set_adapters[component] = list(model.peft_config.keys())
return set_adapters
def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, str, int]) -> None:
"""
Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case
you want to load multiple adapters and free some GPU memory.
Args:
adapter_names (`List[str]`):
List of adapters to send device to.
device (`Union[torch.device, str, int]`):
Device to send the adapters to. Can be either a torch device, a str or an integer.
"""
if not USE_PEFT_BACKEND:
raise ValueError("PEFT backend is required for this method.")
for component in self._lora_loadable_modules:
model = getattr(self, component, None)
if model is not None:
for module in model.modules():
if isinstance(module, BaseTunerLayer):
for adapter_name in adapter_names:
module.lora_A[adapter_name].to(device)
module.lora_B[adapter_name].to(device)
# this is a param, not a module, so device placement is not in-place -> re-assign
if hasattr(module, "lora_magnitude_vector") and module.lora_magnitude_vector is not None:
if adapter_name in module.lora_magnitude_vector:
module.lora_magnitude_vector[adapter_name] = module.lora_magnitude_vector[
adapter_name
].to(device)
@staticmethod
def pack_weights(layers, prefix):
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
return layers_state_dict
@staticmethod
def write_lora_layers(
state_dict: Dict[str, torch.Tensor],
save_directory: str,
is_main_process: bool,
weight_name: str,
save_function: Callable,
safe_serialization: bool,
):
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
if save_function is None:
if safe_serialization:
def save_function(weights, filename):
return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})
else:
save_function = torch.save
os.makedirs(save_directory, exist_ok=True)
if weight_name is None:
if safe_serialization:
weight_name = LORA_WEIGHT_NAME_SAFE
else:
weight_name = LORA_WEIGHT_NAME
save_path = Path(save_directory, weight_name).as_posix()
save_function(state_dict, save_path)
logger.info(f"Model weights saved in {save_path}")
@property
def lora_scale(self) -> float:
# property function that returns the lora scale which can be set at run time by the pipeline.
# if _lora_scale has not been set, return 1
return self._lora_scale if hasattr(self, "_lora_scale") else 1.0
| diffusers/src/diffusers/loaders/lora_base.py/0 | {
"file_path": "diffusers/src/diffusers/loaders/lora_base.py",
"repo_id": "diffusers",
"token_count": 17466
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Callable, List, Optional, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import logging
from .modeling_utils import ModelMixin
logger = logging.get_logger(__name__)
class MultiAdapter(ModelMixin):
r"""
MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to
user-assigned weighting.
This model inherits from [`ModelMixin`]. Check the superclass documentation for common methods such as downloading
or saving.
Args:
adapters (`List[T2IAdapter]`, *optional*, defaults to None):
A list of `T2IAdapter` model instances.
"""
def __init__(self, adapters: List["T2IAdapter"]):
super(MultiAdapter, self).__init__()
self.num_adapter = len(adapters)
self.adapters = nn.ModuleList(adapters)
if len(adapters) == 0:
raise ValueError("Expecting at least one adapter")
if len(adapters) == 1:
raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`")
# The outputs from each adapter are added together with a weight.
# This means that the change in dimensions from downsampling must
# be the same for all adapters. Inductively, it also means the
# downscale_factor and total_downscale_factor must be the same for all
# adapters.
first_adapter_total_downscale_factor = adapters[0].total_downscale_factor
first_adapter_downscale_factor = adapters[0].downscale_factor
for idx in range(1, len(adapters)):
if (
adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor
or adapters[idx].downscale_factor != first_adapter_downscale_factor
):
raise ValueError(
f"Expecting all adapters to have the same downscaling behavior, but got:\n"
f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n"
f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n"
f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n"
f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}"
)
self.total_downscale_factor = first_adapter_total_downscale_factor
self.downscale_factor = first_adapter_downscale_factor
def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]:
r"""
Args:
xs (`torch.Tensor`):
A tensor of shape (batch, channel, height, width) representing input images for multiple adapter
models, concatenated along dimension 1(channel dimension). The `channel` dimension should be equal to
`num_adapter` * number of channel per image.
adapter_weights (`List[float]`, *optional*, defaults to None):
A list of floats representing the weights which will be multiplied by each adapter's output before
summing them together. If `None`, equal weights will be used for all adapters.
"""
if adapter_weights is None:
adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter)
else:
adapter_weights = torch.tensor(adapter_weights)
accume_state = None
for x, w, adapter in zip(xs, adapter_weights, self.adapters):
features = adapter(x)
if accume_state is None:
accume_state = features
for i in range(len(accume_state)):
accume_state[i] = w * accume_state[i]
else:
for i in range(len(features)):
accume_state[i] += w * features[i]
return accume_state
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
is_main_process: bool = True,
save_function: Callable = None,
safe_serialization: bool = True,
variant: Optional[str] = None,
):
"""
Save a model and its configuration file to a specified directory, allowing it to be re-loaded with the
`[`~models.adapter.MultiAdapter.from_pretrained`]` class method.
Args:
save_directory (`str` or `os.PathLike`):
The directory where the model will be saved. If the directory does not exist, it will be created.
is_main_process (`bool`, optional, defaults=True):
Indicates whether current process is the main process or not. Useful for distributed training (e.g.,
TPUs) and need to call this function on all processes. In this case, set `is_main_process=True` only
for the main process to avoid race conditions.
save_function (`Callable`):
Function used to save the state dictionary. Useful for distributed training (e.g., TPUs) to replace
`torch.save` with another method. Can also be configured using`DIFFUSERS_SAVE_MODE` environment
variable.
safe_serialization (`bool`, optional, defaults=True):
If `True`, save the model using `safetensors`. If `False`, save the model with `pickle`.
variant (`str`, *optional*):
If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
"""
idx = 0
model_path_to_save = save_directory
for adapter in self.adapters:
adapter.save_pretrained(
model_path_to_save,
is_main_process=is_main_process,
save_function=save_function,
safe_serialization=safe_serialization,
variant=variant,
)
idx += 1
model_path_to_save = model_path_to_save + f"_{idx}"
@classmethod
def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
r"""
Instantiate a pretrained `MultiAdapter` model from multiple pre-trained adapter models.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, set it back to training mode using `model.train()`.
Warnings:
*Weights from XXX not initialized from pretrained model* means that the weights of XXX are not pretrained
with the rest of the model. It is up to you to train those weights with a downstream fine-tuning. *Weights
from XXX not used in YYY* means that the layer XXX is not used by YYY, so those weights are discarded.
Args:
pretrained_model_path (`os.PathLike`):
A path to a *directory* containing model weights saved using
[`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`.
torch_dtype (`str` or `torch.dtype`, *optional*):
Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
will be automatically derived from the model's weights.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be refined to each
parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
same device.
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
max_memory (`Dict`, *optional*):
A dictionary mapping device identifiers to their maximum memory. Default to the maximum memory
available for each GPU and the available CPU RAM if unset.
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
setting this argument to `True` will raise an error.
variant (`str`, *optional*):
If specified, load weights from a `variant` file (*e.g.* pytorch_model.<variant>.bin). `variant` will
be ignored when using `from_flax`.
use_safetensors (`bool`, *optional*, defaults to `None`):
If `None`, the `safetensors` weights will be downloaded if available **and** if`safetensors` library is
installed. If `True`, the model will be forcibly loaded from`safetensors` weights. If `False`,
`safetensors` is not used.
"""
idx = 0
adapters = []
# load adapter and append to list until no adapter directory exists anymore
# first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained`
# second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ...
model_path_to_load = pretrained_model_path
while os.path.isdir(model_path_to_load):
adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs)
adapters.append(adapter)
idx += 1
model_path_to_load = pretrained_model_path + f"_{idx}"
logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.")
if len(adapters) == 0:
raise ValueError(
f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
)
return cls(adapters)
class T2IAdapter(ModelMixin, ConfigMixin):
r"""
A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model
generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's
architecture follows the original implementation of
[Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97)
and
[AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235).
This model inherits from [`ModelMixin`]. Check the superclass documentation for the common methods, such as
downloading or saving.
Args:
in_channels (`int`, *optional*, defaults to `3`):
The number of channels in the adapter's input (*control image*). Set it to 1 if you're using a gray scale
image.
channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The number of channels in each downsample block's output hidden state. The `len(block_out_channels)`
determines the number of downsample blocks in the adapter.
num_res_blocks (`int`, *optional*, defaults to `2`):
Number of ResNet blocks in each downsample block.
downscale_factor (`int`, *optional*, defaults to `8`):
A factor that determines the total downscale factor of the Adapter.
adapter_type (`str`, *optional*, defaults to `full_adapter`):
Adapter type (`full_adapter` or `full_adapter_xl` or `light_adapter`) to use.
"""
@register_to_config
def __init__(
self,
in_channels: int = 3,
channels: List[int] = [320, 640, 1280, 1280],
num_res_blocks: int = 2,
downscale_factor: int = 8,
adapter_type: str = "full_adapter",
):
super().__init__()
if adapter_type == "full_adapter":
self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor)
elif adapter_type == "full_adapter_xl":
self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor)
elif adapter_type == "light_adapter":
self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor)
else:
raise ValueError(
f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or "
"'full_adapter_xl' or 'light_adapter'."
)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
r"""
This function processes the input tensor `x` through the adapter model and returns a list of feature tensors,
each representing information extracted at a different scale from the input. The length of the list is
determined by the number of downsample blocks in the Adapter, as specified by the `channels` and
`num_res_blocks` parameters during initialization.
"""
return self.adapter(x)
@property
def total_downscale_factor(self):
return self.adapter.total_downscale_factor
@property
def downscale_factor(self):
"""The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are
not evenly divisible by the downscale_factor then an exception will be raised.
"""
return self.adapter.unshuffle.downscale_factor
# full adapter
class FullAdapter(nn.Module):
r"""
See [`T2IAdapter`] for more information.
"""
def __init__(
self,
in_channels: int = 3,
channels: List[int] = [320, 640, 1280, 1280],
num_res_blocks: int = 2,
downscale_factor: int = 8,
):
super().__init__()
in_channels = in_channels * downscale_factor**2
self.unshuffle = nn.PixelUnshuffle(downscale_factor)
self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
self.body = nn.ModuleList(
[
AdapterBlock(channels[0], channels[0], num_res_blocks),
*[
AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)
for i in range(1, len(channels))
],
]
)
self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
r"""
This method processes the input tensor `x` through the FullAdapter model and performs operations including
pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each
capturing information at a different stage of processing within the FullAdapter model. The number of feature
tensors in the list is determined by the number of downsample blocks specified during initialization.
"""
x = self.unshuffle(x)
x = self.conv_in(x)
features = []
for block in self.body:
x = block(x)
features.append(x)
return features
class FullAdapterXL(nn.Module):
r"""
See [`T2IAdapter`] for more information.
"""
def __init__(
self,
in_channels: int = 3,
channels: List[int] = [320, 640, 1280, 1280],
num_res_blocks: int = 2,
downscale_factor: int = 16,
):
super().__init__()
in_channels = in_channels * downscale_factor**2
self.unshuffle = nn.PixelUnshuffle(downscale_factor)
self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
self.body = []
# blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32]
for i in range(len(channels)):
if i == 1:
self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks))
elif i == 2:
self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True))
else:
self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks))
self.body = nn.ModuleList(self.body)
# XL has only one downsampling AdapterBlock.
self.total_downscale_factor = downscale_factor * 2
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
r"""
This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations
including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors.
"""
x = self.unshuffle(x)
x = self.conv_in(x)
features = []
for block in self.body:
x = block(x)
features.append(x)
return features
class AdapterBlock(nn.Module):
r"""
An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and
`FullAdapterXL` models.
Args:
in_channels (`int`):
Number of channels of AdapterBlock's input.
out_channels (`int`):
Number of channels of AdapterBlock's output.
num_res_blocks (`int`):
Number of ResNet blocks in the AdapterBlock.
down (`bool`, *optional*, defaults to `False`):
If `True`, perform downsampling on AdapterBlock's input.
"""
def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
super().__init__()
self.downsample = None
if down:
self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.in_conv = None
if in_channels != out_channels:
self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
self.resnets = nn.Sequential(
*[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)],
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""
This method takes tensor x as input and performs operations downsampling and convolutional layers if the
self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of
residual blocks to the input tensor.
"""
if self.downsample is not None:
x = self.downsample(x)
if self.in_conv is not None:
x = self.in_conv(x)
x = self.resnets(x)
return x
class AdapterResnetBlock(nn.Module):
r"""
An `AdapterResnetBlock` is a helper model that implements a ResNet-like block.
Args:
channels (`int`):
Number of channels of AdapterResnetBlock's input and output.
"""
def __init__(self, channels: int):
super().__init__()
self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.act = nn.ReLU()
self.block2 = nn.Conv2d(channels, channels, kernel_size=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""
This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional
layer on the input tensor. It returns addition with the input tensor.
"""
h = self.act(self.block1(x))
h = self.block2(h)
return h + x
# light adapter
class LightAdapter(nn.Module):
r"""
See [`T2IAdapter`] for more information.
"""
def __init__(
self,
in_channels: int = 3,
channels: List[int] = [320, 640, 1280],
num_res_blocks: int = 4,
downscale_factor: int = 8,
):
super().__init__()
in_channels = in_channels * downscale_factor**2
self.unshuffle = nn.PixelUnshuffle(downscale_factor)
self.body = nn.ModuleList(
[
LightAdapterBlock(in_channels, channels[0], num_res_blocks),
*[
LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True)
for i in range(len(channels) - 1)
],
LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True),
]
)
self.total_downscale_factor = downscale_factor * (2 ** len(channels))
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
r"""
This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each
feature tensor corresponds to a different level of processing within the LightAdapter.
"""
x = self.unshuffle(x)
features = []
for block in self.body:
x = block(x)
features.append(x)
return features
class LightAdapterBlock(nn.Module):
r"""
A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the
`LightAdapter` model.
Args:
in_channels (`int`):
Number of channels of LightAdapterBlock's input.
out_channels (`int`):
Number of channels of LightAdapterBlock's output.
num_res_blocks (`int`):
Number of LightAdapterResnetBlocks in the LightAdapterBlock.
down (`bool`, *optional*, defaults to `False`):
If `True`, perform downsampling on LightAdapterBlock's input.
"""
def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False):
super().__init__()
mid_channels = out_channels // 4
self.downsample = None
if down:
self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1)
self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)])
self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""
This method takes tensor x as input and performs downsampling if required. Then it applies in convolution
layer, a sequence of residual blocks, and out convolutional layer.
"""
if self.downsample is not None:
x = self.downsample(x)
x = self.in_conv(x)
x = self.resnets(x)
x = self.out_conv(x)
return x
class LightAdapterResnetBlock(nn.Module):
"""
A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different
architecture than `AdapterResnetBlock`.
Args:
channels (`int`):
Number of channels of LightAdapterResnetBlock's input and output.
"""
def __init__(self, channels: int):
super().__init__()
self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.act = nn.ReLU()
self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""
This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and
another convolutional layer and adds it to input tensor.
"""
h = self.act(self.block1(x))
h = self.block2(h)
return h + x
| diffusers/src/diffusers/models/adapter.py/0 | {
"file_path": "diffusers/src/diffusers/models/adapter.py",
"repo_id": "diffusers",
"token_count": 10132
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...schedulers import ConsistencyDecoderScheduler
from ...utils import BaseOutput
from ...utils.accelerate_utils import apply_forward_hook
from ...utils.torch_utils import randn_tensor
from ..attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
)
from ..modeling_utils import ModelMixin
from ..unets.unet_2d import UNet2DModel
from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class ConsistencyDecoderVAEOutput(BaseOutput):
"""
Output of encoding method.
Args:
latent_dist (`DiagonalGaussianDistribution`):
Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
`DiagonalGaussianDistribution` allows for sampling latents from the distribution.
"""
latent_dist: "DiagonalGaussianDistribution"
class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
r"""
The consistency decoder used with DALL-E 3.
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionPipeline, ConsistencyDecoderVAE
>>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
>>> pipe = StableDiffusionPipeline.from_pretrained(
... "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
... ).to("cuda")
>>> image = pipe("horse", generator=torch.manual_seed(0)).images[0]
>>> image
```
"""
@register_to_config
def __init__(
self,
scaling_factor: float = 0.18215,
latent_channels: int = 4,
sample_size: int = 32,
encoder_act_fn: str = "silu",
encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
encoder_double_z: bool = True,
encoder_down_block_types: Tuple[str, ...] = (
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
),
encoder_in_channels: int = 3,
encoder_layers_per_block: int = 2,
encoder_norm_num_groups: int = 32,
encoder_out_channels: int = 4,
decoder_add_attention: bool = False,
decoder_block_out_channels: Tuple[int, ...] = (320, 640, 1024, 1024),
decoder_down_block_types: Tuple[str, ...] = (
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
),
decoder_downsample_padding: int = 1,
decoder_in_channels: int = 7,
decoder_layers_per_block: int = 3,
decoder_norm_eps: float = 1e-05,
decoder_norm_num_groups: int = 32,
decoder_num_train_timesteps: int = 1024,
decoder_out_channels: int = 6,
decoder_resnet_time_scale_shift: str = "scale_shift",
decoder_time_embedding_type: str = "learned",
decoder_up_block_types: Tuple[str, ...] = (
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
),
):
super().__init__()
self.encoder = Encoder(
act_fn=encoder_act_fn,
block_out_channels=encoder_block_out_channels,
double_z=encoder_double_z,
down_block_types=encoder_down_block_types,
in_channels=encoder_in_channels,
layers_per_block=encoder_layers_per_block,
norm_num_groups=encoder_norm_num_groups,
out_channels=encoder_out_channels,
)
self.decoder_unet = UNet2DModel(
add_attention=decoder_add_attention,
block_out_channels=decoder_block_out_channels,
down_block_types=decoder_down_block_types,
downsample_padding=decoder_downsample_padding,
in_channels=decoder_in_channels,
layers_per_block=decoder_layers_per_block,
norm_eps=decoder_norm_eps,
norm_num_groups=decoder_norm_num_groups,
num_train_timesteps=decoder_num_train_timesteps,
out_channels=decoder_out_channels,
resnet_time_scale_shift=decoder_resnet_time_scale_shift,
time_embedding_type=decoder_time_embedding_type,
up_block_types=decoder_up_block_types,
)
self.decoder_scheduler = ConsistencyDecoderScheduler()
self.register_to_config(block_out_channels=encoder_block_out_channels)
self.register_to_config(force_upcast=False)
self.register_buffer(
"means",
torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None],
persistent=False,
)
self.register_buffer(
"stds", torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False
)
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
self.use_slicing = False
self.use_tiling = False
# only relevant if vae tiling is enabled
self.tile_sample_min_size = self.config.sample_size
sample_size = (
self.config.sample_size[0]
if isinstance(self.config.sample_size, (list, tuple))
else self.config.sample_size
)
self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
self.tile_overlap_factor = 0.25
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_tiling
def enable_tiling(self, use_tiling: bool = True):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.use_tiling = use_tiling
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_tiling
def disable_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
decoding in one step.
"""
self.enable_tiling(False)
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_slicing
def enable_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.use_slicing = True
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_slicing
def disable_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
decoding in one step.
"""
self.use_slicing = False
@property
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor()
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor)
@apply_forward_hook
def encode(
self, x: torch.Tensor, return_dict: bool = True
) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]:
"""
Encode a batch of images into latents.
Args:
x (`torch.Tensor`): Input batch of images.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
instead of a plain tuple.
Returns:
The latent representations of the encoded images. If `return_dict` is True, a
[`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a
plain `tuple` is returned.
"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(x, return_dict=return_dict)
if self.use_slicing and x.shape[0] > 1:
encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
h = torch.cat(encoded_slices)
else:
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
if not return_dict:
return (posterior,)
return ConsistencyDecoderVAEOutput(latent_dist=posterior)
@apply_forward_hook
def decode(
self,
z: torch.Tensor,
generator: Optional[torch.Generator] = None,
return_dict: bool = True,
num_inference_steps: int = 2,
) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
"""
Decodes the input latent vector `z` using the consistency decoder VAE model.
Args:
z (torch.Tensor): The input latent vector.
generator (Optional[torch.Generator]): The random number generator. Default is None.
return_dict (bool): Whether to return the output as a dictionary. Default is True.
num_inference_steps (int): The number of inference steps. Default is 2.
Returns:
Union[DecoderOutput, Tuple[torch.Tensor]]: The decoded output.
"""
z = (z * self.config.scaling_factor - self.means) / self.stds
scale_factor = 2 ** (len(self.config.block_out_channels) - 1)
z = F.interpolate(z, mode="nearest", scale_factor=scale_factor)
batch_size, _, height, width = z.shape
self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device)
x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor(
(batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device
)
for t in self.decoder_scheduler.timesteps:
model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1)
model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :]
prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample
x_t = prev_sample
x_0 = x_t
if not return_dict:
return (x_0,)
return DecoderOutput(sample=x_0)
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_v
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[2], b.shape[2], blend_extent)
for y in range(blend_extent):
b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
# Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_h
def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[3], b.shape[3], blend_extent)
for x in range(blend_extent):
b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> Union[ConsistencyDecoderVAEOutput, Tuple]:
r"""Encode a batch of images using a tiled encoder.
When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
output, but they should be much less noticeable.
Args:
x (`torch.Tensor`): Input batch of images.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
instead of a plain tuple.
Returns:
[`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`:
If return_dict is True, a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
is returned, otherwise a plain `tuple` is returned.
"""
overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
row_limit = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
rows = []
for i in range(0, x.shape[2], overlap_size):
row = []
for j in range(0, x.shape[3], overlap_size):
tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
tile = self.encoder(tile)
tile = self.quant_conv(tile)
row.append(tile)
rows.append(row)
result_rows = []
for i, row in enumerate(rows):
result_row = []
for j, tile in enumerate(row):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
if j > 0:
tile = self.blend_h(row[j - 1], tile, blend_extent)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(result_row, dim=3))
moments = torch.cat(result_rows, dim=2)
posterior = DiagonalGaussianDistribution(moments)
if not return_dict:
return (posterior,)
return ConsistencyDecoderVAEOutput(latent_dist=posterior)
def forward(
self,
sample: torch.Tensor,
sample_posterior: bool = False,
return_dict: bool = True,
generator: Optional[torch.Generator] = None,
) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
r"""
Args:
sample (`torch.Tensor`): Input sample.
sample_posterior (`bool`, *optional*, defaults to `False`):
Whether to sample from the posterior.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
generator (`torch.Generator`, *optional*, defaults to `None`):
Generator to use for sampling.
Returns:
[`DecoderOutput`] or `tuple`:
If return_dict is True, a [`DecoderOutput`] is returned, otherwise a plain `tuple` is returned.
"""
x = sample
posterior = self.encode(x).latent_dist
if sample_posterior:
z = posterior.sample(generator=generator)
else:
z = posterior.mode()
dec = self.decode(z, generator=generator).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
| diffusers/src/diffusers/models/autoencoders/consistency_decoder_vae.py/0 | {
"file_path": "diffusers/src/diffusers/models/autoencoders/consistency_decoder_vae.py",
"repo_id": "diffusers",
"token_count": 8595
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from math import gcd
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...utils import BaseOutput, logging
from ...utils.torch_utils import apply_freeu
from ..attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
Attention,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
FusedAttnProcessor2_0,
)
from ..embeddings import TimestepEmbedding, Timesteps
from ..modeling_utils import ModelMixin
from ..unets.unet_2d_blocks import (
CrossAttnDownBlock2D,
CrossAttnUpBlock2D,
Downsample2D,
ResnetBlock2D,
Transformer2DModel,
UNetMidBlock2DCrossAttn,
Upsample2D,
)
from ..unets.unet_2d_condition import UNet2DConditionModel
from .controlnet import ControlNetConditioningEmbedding
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetXSOutput(BaseOutput):
"""
The output of [`UNetControlNetXSModel`].
Args:
sample (`Tensor` of shape `(batch_size, num_channels, height, width)`):
The output of the `UNetControlNetXSModel`. Unlike `ControlNetOutput` this is NOT to be added to the base
model output, but is already the final output.
"""
sample: Tensor = None
class DownBlockControlNetXSAdapter(nn.Module):
"""Components that together with corresponding components from the base model will form a
`ControlNetXSCrossAttnDownBlock2D`"""
def __init__(
self,
resnets: nn.ModuleList,
base_to_ctrl: nn.ModuleList,
ctrl_to_base: nn.ModuleList,
attentions: Optional[nn.ModuleList] = None,
downsampler: Optional[nn.Conv2d] = None,
):
super().__init__()
self.resnets = resnets
self.base_to_ctrl = base_to_ctrl
self.ctrl_to_base = ctrl_to_base
self.attentions = attentions
self.downsamplers = downsampler
class MidBlockControlNetXSAdapter(nn.Module):
"""Components that together with corresponding components from the base model will form a
`ControlNetXSCrossAttnMidBlock2D`"""
def __init__(self, midblock: UNetMidBlock2DCrossAttn, base_to_ctrl: nn.ModuleList, ctrl_to_base: nn.ModuleList):
super().__init__()
self.midblock = midblock
self.base_to_ctrl = base_to_ctrl
self.ctrl_to_base = ctrl_to_base
class UpBlockControlNetXSAdapter(nn.Module):
"""Components that together with corresponding components from the base model will form a `ControlNetXSCrossAttnUpBlock2D`"""
def __init__(self, ctrl_to_base: nn.ModuleList):
super().__init__()
self.ctrl_to_base = ctrl_to_base
def get_down_block_adapter(
base_in_channels: int,
base_out_channels: int,
ctrl_in_channels: int,
ctrl_out_channels: int,
temb_channels: int,
max_norm_num_groups: Optional[int] = 32,
has_crossattn=True,
transformer_layers_per_block: Optional[Union[int, Tuple[int]]] = 1,
num_attention_heads: Optional[int] = 1,
cross_attention_dim: Optional[int] = 1024,
add_downsample: bool = True,
upcast_attention: Optional[bool] = False,
use_linear_projection: Optional[bool] = True,
):
num_layers = 2 # only support sd + sdxl
resnets = []
attentions = []
ctrl_to_base = []
base_to_ctrl = []
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * num_layers
for i in range(num_layers):
base_in_channels = base_in_channels if i == 0 else base_out_channels
ctrl_in_channels = ctrl_in_channels if i == 0 else ctrl_out_channels
# Before the resnet/attention application, information is concatted from base to control.
# Concat doesn't require change in number of channels
base_to_ctrl.append(make_zero_conv(base_in_channels, base_in_channels))
resnets.append(
ResnetBlock2D(
in_channels=ctrl_in_channels + base_in_channels, # information from base is concatted to ctrl
out_channels=ctrl_out_channels,
temb_channels=temb_channels,
groups=find_largest_factor(ctrl_in_channels + base_in_channels, max_factor=max_norm_num_groups),
groups_out=find_largest_factor(ctrl_out_channels, max_factor=max_norm_num_groups),
eps=1e-5,
)
)
if has_crossattn:
attentions.append(
Transformer2DModel(
num_attention_heads,
ctrl_out_channels // num_attention_heads,
in_channels=ctrl_out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
norm_num_groups=find_largest_factor(ctrl_out_channels, max_factor=max_norm_num_groups),
)
)
# After the resnet/attention application, information is added from control to base
# Addition requires change in number of channels
ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels))
if add_downsample:
# Before the downsampler application, information is concatted from base to control
# Concat doesn't require change in number of channels
base_to_ctrl.append(make_zero_conv(base_out_channels, base_out_channels))
downsamplers = Downsample2D(
ctrl_out_channels + base_out_channels, use_conv=True, out_channels=ctrl_out_channels, name="op"
)
# After the downsampler application, information is added from control to base
# Addition requires change in number of channels
ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels))
else:
downsamplers = None
down_block_components = DownBlockControlNetXSAdapter(
resnets=nn.ModuleList(resnets),
base_to_ctrl=nn.ModuleList(base_to_ctrl),
ctrl_to_base=nn.ModuleList(ctrl_to_base),
)
if has_crossattn:
down_block_components.attentions = nn.ModuleList(attentions)
if downsamplers is not None:
down_block_components.downsamplers = downsamplers
return down_block_components
def get_mid_block_adapter(
base_channels: int,
ctrl_channels: int,
temb_channels: Optional[int] = None,
max_norm_num_groups: Optional[int] = 32,
transformer_layers_per_block: int = 1,
num_attention_heads: Optional[int] = 1,
cross_attention_dim: Optional[int] = 1024,
upcast_attention: bool = False,
use_linear_projection: bool = True,
):
# Before the midblock application, information is concatted from base to control.
# Concat doesn't require change in number of channels
base_to_ctrl = make_zero_conv(base_channels, base_channels)
midblock = UNetMidBlock2DCrossAttn(
transformer_layers_per_block=transformer_layers_per_block,
in_channels=ctrl_channels + base_channels,
out_channels=ctrl_channels,
temb_channels=temb_channels,
# number or norm groups must divide both in_channels and out_channels
resnet_groups=find_largest_factor(gcd(ctrl_channels, ctrl_channels + base_channels), max_norm_num_groups),
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
# After the midblock application, information is added from control to base
# Addition requires change in number of channels
ctrl_to_base = make_zero_conv(ctrl_channels, base_channels)
return MidBlockControlNetXSAdapter(base_to_ctrl=base_to_ctrl, midblock=midblock, ctrl_to_base=ctrl_to_base)
def get_up_block_adapter(
out_channels: int,
prev_output_channel: int,
ctrl_skip_channels: List[int],
):
ctrl_to_base = []
num_layers = 3 # only support sd + sdxl
for i in range(num_layers):
resnet_in_channels = prev_output_channel if i == 0 else out_channels
ctrl_to_base.append(make_zero_conv(ctrl_skip_channels[i], resnet_in_channels))
return UpBlockControlNetXSAdapter(ctrl_to_base=nn.ModuleList(ctrl_to_base))
class ControlNetXSAdapter(ModelMixin, ConfigMixin):
r"""
A `ControlNetXSAdapter` model. To use it, pass it into a `UNetControlNetXSModel` (together with a
`UNet2DConditionModel` base model).
This model inherits from [`ModelMixin`] and [`ConfigMixin`]. Check the superclass documentation for it's generic
methods implemented for all models (such as downloading or saving).
Like `UNetControlNetXSModel`, `ControlNetXSAdapter` is compatible with StableDiffusion and StableDiffusion-XL. It's
default parameters are compatible with StableDiffusion.
Parameters:
conditioning_channels (`int`, defaults to 3):
Number of channels of conditioning input (e.g. an image)
conditioning_channel_order (`str`, defaults to `"rgb"`):
The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
conditioning_embedding_out_channels (`tuple[int]`, defaults to `(16, 32, 96, 256)`):
The tuple of output channels for each block in the `controlnet_cond_embedding` layer.
time_embedding_mix (`float`, defaults to 1.0):
If 0, then only the control adapters's time embedding is used. If 1, then only the base unet's time
embedding is used. Otherwise, both are combined.
learn_time_embedding (`bool`, defaults to `False`):
Whether a time embedding should be learned. If yes, `UNetControlNetXSModel` will combine the time
embeddings of the base model and the control adapter. If no, `UNetControlNetXSModel` will use the base
model's time embedding.
num_attention_heads (`list[int]`, defaults to `[4]`):
The number of attention heads.
block_out_channels (`list[int]`, defaults to `[4, 8, 16, 16]`):
The tuple of output channels for each block.
base_block_out_channels (`list[int]`, defaults to `[320, 640, 1280, 1280]`):
The tuple of output channels for each block in the base unet.
cross_attention_dim (`int`, defaults to 1024):
The dimension of the cross attention features.
down_block_types (`list[str]`, defaults to `["CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"]`):
The tuple of downsample blocks to use.
sample_size (`int`, defaults to 96):
Height and width of input/output sample.
transformer_layers_per_block (`Union[int, Tuple[int]]`, defaults to 1):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
[`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
upcast_attention (`bool`, defaults to `True`):
Whether the attention computation should always be upcasted.
max_norm_num_groups (`int`, defaults to 32):
Maximum number of groups in group normal. The actual number will be the largest divisor of the respective
channels, that is <= max_norm_num_groups.
"""
@register_to_config
def __init__(
self,
conditioning_channels: int = 3,
conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
time_embedding_mix: float = 1.0,
learn_time_embedding: bool = False,
num_attention_heads: Union[int, Tuple[int]] = 4,
block_out_channels: Tuple[int] = (4, 8, 16, 16),
base_block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
cross_attention_dim: int = 1024,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
sample_size: Optional[int] = 96,
transformer_layers_per_block: Union[int, Tuple[int]] = 1,
upcast_attention: bool = True,
max_norm_num_groups: int = 32,
use_linear_projection: bool = True,
):
super().__init__()
time_embedding_input_dim = base_block_out_channels[0]
time_embedding_dim = base_block_out_channels[0] * 4
# Check inputs
if conditioning_channel_order not in ["rgb", "bgr"]:
raise ValueError(f"unknown `conditioning_channel_order`: {conditioning_channel_order}")
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(transformer_layers_per_block, (list, tuple)):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
if not isinstance(cross_attention_dim, (list, tuple)):
cross_attention_dim = [cross_attention_dim] * len(down_block_types)
# see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why `ControlNetXSAdapter` takes `num_attention_heads` instead of `attention_head_dim`
if not isinstance(num_attention_heads, (list, tuple)):
num_attention_heads = [num_attention_heads] * len(down_block_types)
if len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
# 5 - Create conditioning hint embedding
self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0],
block_out_channels=conditioning_embedding_out_channels,
conditioning_channels=conditioning_channels,
)
# time
if learn_time_embedding:
self.time_embedding = TimestepEmbedding(time_embedding_input_dim, time_embedding_dim)
else:
self.time_embedding = None
self.down_blocks = nn.ModuleList([])
self.up_connections = nn.ModuleList([])
# input
self.conv_in = nn.Conv2d(4, block_out_channels[0], kernel_size=3, padding=1)
self.control_to_base_for_conv_in = make_zero_conv(block_out_channels[0], base_block_out_channels[0])
# down
base_out_channels = base_block_out_channels[0]
ctrl_out_channels = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
base_in_channels = base_out_channels
base_out_channels = base_block_out_channels[i]
ctrl_in_channels = ctrl_out_channels
ctrl_out_channels = block_out_channels[i]
has_crossattn = "CrossAttn" in down_block_type
is_final_block = i == len(down_block_types) - 1
self.down_blocks.append(
get_down_block_adapter(
base_in_channels=base_in_channels,
base_out_channels=base_out_channels,
ctrl_in_channels=ctrl_in_channels,
ctrl_out_channels=ctrl_out_channels,
temb_channels=time_embedding_dim,
max_norm_num_groups=max_norm_num_groups,
has_crossattn=has_crossattn,
transformer_layers_per_block=transformer_layers_per_block[i],
num_attention_heads=num_attention_heads[i],
cross_attention_dim=cross_attention_dim[i],
add_downsample=not is_final_block,
upcast_attention=upcast_attention,
use_linear_projection=use_linear_projection,
)
)
# mid
self.mid_block = get_mid_block_adapter(
base_channels=base_block_out_channels[-1],
ctrl_channels=block_out_channels[-1],
temb_channels=time_embedding_dim,
transformer_layers_per_block=transformer_layers_per_block[-1],
num_attention_heads=num_attention_heads[-1],
cross_attention_dim=cross_attention_dim[-1],
upcast_attention=upcast_attention,
use_linear_projection=use_linear_projection,
)
# up
# The skip connection channels are the output of the conv_in and of all the down subblocks
ctrl_skip_channels = [block_out_channels[0]]
for i, out_channels in enumerate(block_out_channels):
number_of_subblocks = (
3 if i < len(block_out_channels) - 1 else 2
) # every block has 3 subblocks, except last one, which has 2 as it has no downsampler
ctrl_skip_channels.extend([out_channels] * number_of_subblocks)
reversed_base_block_out_channels = list(reversed(base_block_out_channels))
base_out_channels = reversed_base_block_out_channels[0]
for i in range(len(down_block_types)):
prev_base_output_channel = base_out_channels
base_out_channels = reversed_base_block_out_channels[i]
ctrl_skip_channels_ = [ctrl_skip_channels.pop() for _ in range(3)]
self.up_connections.append(
get_up_block_adapter(
out_channels=base_out_channels,
prev_output_channel=prev_base_output_channel,
ctrl_skip_channels=ctrl_skip_channels_,
)
)
@classmethod
def from_unet(
cls,
unet: UNet2DConditionModel,
size_ratio: Optional[float] = None,
block_out_channels: Optional[List[int]] = None,
num_attention_heads: Optional[List[int]] = None,
learn_time_embedding: bool = False,
time_embedding_mix: int = 1.0,
conditioning_channels: int = 3,
conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
):
r"""
Instantiate a [`ControlNetXSAdapter`] from a [`UNet2DConditionModel`].
Parameters:
unet (`UNet2DConditionModel`):
The UNet model we want to control. The dimensions of the ControlNetXSAdapter will be adapted to it.
size_ratio (float, *optional*, defaults to `None`):
When given, block_out_channels is set to a fraction of the base model's block_out_channels. Either this
or `block_out_channels` must be given.
block_out_channels (`List[int]`, *optional*, defaults to `None`):
Down blocks output channels in control model. Either this or `size_ratio` must be given.
num_attention_heads (`List[int]`, *optional*, defaults to `None`):
The dimension of the attention heads. The naming seems a bit confusing and it is, see
https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
learn_time_embedding (`bool`, defaults to `False`):
Whether the `ControlNetXSAdapter` should learn a time embedding.
time_embedding_mix (`float`, defaults to 1.0):
If 0, then only the control adapter's time embedding is used. If 1, then only the base unet's time
embedding is used. Otherwise, both are combined.
conditioning_channels (`int`, defaults to 3):
Number of channels of conditioning input (e.g. an image)
conditioning_channel_order (`str`, defaults to `"rgb"`):
The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
conditioning_embedding_out_channels (`Tuple[int]`, defaults to `(16, 32, 96, 256)`):
The tuple of output channel for each block in the `controlnet_cond_embedding` layer.
"""
# Check input
fixed_size = block_out_channels is not None
relative_size = size_ratio is not None
if not (fixed_size ^ relative_size):
raise ValueError(
"Pass exactly one of `block_out_channels` (for absolute sizing) or `size_ratio` (for relative sizing)."
)
# Create model
block_out_channels = block_out_channels or [int(b * size_ratio) for b in unet.config.block_out_channels]
if num_attention_heads is None:
# The naming seems a bit confusing and it is, see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
num_attention_heads = unet.config.attention_head_dim
model = cls(
conditioning_channels=conditioning_channels,
conditioning_channel_order=conditioning_channel_order,
conditioning_embedding_out_channels=conditioning_embedding_out_channels,
time_embedding_mix=time_embedding_mix,
learn_time_embedding=learn_time_embedding,
num_attention_heads=num_attention_heads,
block_out_channels=block_out_channels,
base_block_out_channels=unet.config.block_out_channels,
cross_attention_dim=unet.config.cross_attention_dim,
down_block_types=unet.config.down_block_types,
sample_size=unet.config.sample_size,
transformer_layers_per_block=unet.config.transformer_layers_per_block,
upcast_attention=unet.config.upcast_attention,
max_norm_num_groups=unet.config.norm_num_groups,
use_linear_projection=unet.config.use_linear_projection,
)
# ensure that the ControlNetXSAdapter is the same dtype as the UNet2DConditionModel
model.to(unet.dtype)
return model
def forward(self, *args, **kwargs):
raise ValueError(
"A ControlNetXSAdapter cannot be run by itself. Use it together with a UNet2DConditionModel to instantiate a UNetControlNetXSModel."
)
class UNetControlNetXSModel(ModelMixin, ConfigMixin):
r"""
A UNet fused with a ControlNet-XS adapter model
This model inherits from [`ModelMixin`] and [`ConfigMixin`]. Check the superclass documentation for it's generic
methods implemented for all models (such as downloading or saving).
`UNetControlNetXSModel` is compatible with StableDiffusion and StableDiffusion-XL. It's default parameters are
compatible with StableDiffusion.
It's parameters are either passed to the underlying `UNet2DConditionModel` or used exactly like in
`ControlNetXSAdapter` . See their documentation for details.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
# unet configs
sample_size: Optional[int] = 96,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
norm_num_groups: Optional[int] = 32,
cross_attention_dim: Union[int, Tuple[int]] = 1024,
transformer_layers_per_block: Union[int, Tuple[int]] = 1,
num_attention_heads: Union[int, Tuple[int]] = 8,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
upcast_attention: bool = True,
use_linear_projection: bool = True,
time_cond_proj_dim: Optional[int] = None,
projection_class_embeddings_input_dim: Optional[int] = None,
# additional controlnet configs
time_embedding_mix: float = 1.0,
ctrl_conditioning_channels: int = 3,
ctrl_conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
ctrl_conditioning_channel_order: str = "rgb",
ctrl_learn_time_embedding: bool = False,
ctrl_block_out_channels: Tuple[int] = (4, 8, 16, 16),
ctrl_num_attention_heads: Union[int, Tuple[int]] = 4,
ctrl_max_norm_num_groups: int = 32,
):
super().__init__()
if time_embedding_mix < 0 or time_embedding_mix > 1:
raise ValueError("`time_embedding_mix` needs to be between 0 and 1.")
if time_embedding_mix < 1 and not ctrl_learn_time_embedding:
raise ValueError("To use `time_embedding_mix` < 1, `ctrl_learn_time_embedding` must be `True`")
if addition_embed_type is not None and addition_embed_type != "text_time":
raise ValueError(
"As `UNetControlNetXSModel` currently only supports StableDiffusion and StableDiffusion-XL, `addition_embed_type` must be `None` or `'text_time'`."
)
if not isinstance(transformer_layers_per_block, (list, tuple)):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
if not isinstance(cross_attention_dim, (list, tuple)):
cross_attention_dim = [cross_attention_dim] * len(down_block_types)
if not isinstance(num_attention_heads, (list, tuple)):
num_attention_heads = [num_attention_heads] * len(down_block_types)
if not isinstance(ctrl_num_attention_heads, (list, tuple)):
ctrl_num_attention_heads = [ctrl_num_attention_heads] * len(down_block_types)
base_num_attention_heads = num_attention_heads
self.in_channels = 4
# # Input
self.base_conv_in = nn.Conv2d(4, block_out_channels[0], kernel_size=3, padding=1)
self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
conditioning_embedding_channels=ctrl_block_out_channels[0],
block_out_channels=ctrl_conditioning_embedding_out_channels,
conditioning_channels=ctrl_conditioning_channels,
)
self.ctrl_conv_in = nn.Conv2d(4, ctrl_block_out_channels[0], kernel_size=3, padding=1)
self.control_to_base_for_conv_in = make_zero_conv(ctrl_block_out_channels[0], block_out_channels[0])
# # Time
time_embed_input_dim = block_out_channels[0]
time_embed_dim = block_out_channels[0] * 4
self.base_time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos=True, downscale_freq_shift=0)
self.base_time_embedding = TimestepEmbedding(
time_embed_input_dim,
time_embed_dim,
cond_proj_dim=time_cond_proj_dim,
)
if ctrl_learn_time_embedding:
self.ctrl_time_embedding = TimestepEmbedding(
in_channels=time_embed_input_dim, time_embed_dim=time_embed_dim
)
else:
self.ctrl_time_embedding = None
if addition_embed_type is None:
self.base_add_time_proj = None
self.base_add_embedding = None
else:
self.base_add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos=True, downscale_freq_shift=0)
self.base_add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
# # Create down blocks
down_blocks = []
base_out_channels = block_out_channels[0]
ctrl_out_channels = ctrl_block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
base_in_channels = base_out_channels
base_out_channels = block_out_channels[i]
ctrl_in_channels = ctrl_out_channels
ctrl_out_channels = ctrl_block_out_channels[i]
has_crossattn = "CrossAttn" in down_block_type
is_final_block = i == len(down_block_types) - 1
down_blocks.append(
ControlNetXSCrossAttnDownBlock2D(
base_in_channels=base_in_channels,
base_out_channels=base_out_channels,
ctrl_in_channels=ctrl_in_channels,
ctrl_out_channels=ctrl_out_channels,
temb_channels=time_embed_dim,
norm_num_groups=norm_num_groups,
ctrl_max_norm_num_groups=ctrl_max_norm_num_groups,
has_crossattn=has_crossattn,
transformer_layers_per_block=transformer_layers_per_block[i],
base_num_attention_heads=base_num_attention_heads[i],
ctrl_num_attention_heads=ctrl_num_attention_heads[i],
cross_attention_dim=cross_attention_dim[i],
add_downsample=not is_final_block,
upcast_attention=upcast_attention,
use_linear_projection=use_linear_projection,
)
)
# # Create mid block
self.mid_block = ControlNetXSCrossAttnMidBlock2D(
base_channels=block_out_channels[-1],
ctrl_channels=ctrl_block_out_channels[-1],
temb_channels=time_embed_dim,
norm_num_groups=norm_num_groups,
ctrl_max_norm_num_groups=ctrl_max_norm_num_groups,
transformer_layers_per_block=transformer_layers_per_block[-1],
base_num_attention_heads=base_num_attention_heads[-1],
ctrl_num_attention_heads=ctrl_num_attention_heads[-1],
cross_attention_dim=cross_attention_dim[-1],
upcast_attention=upcast_attention,
use_linear_projection=use_linear_projection,
)
# # Create up blocks
up_blocks = []
rev_transformer_layers_per_block = list(reversed(transformer_layers_per_block))
rev_num_attention_heads = list(reversed(base_num_attention_heads))
rev_cross_attention_dim = list(reversed(cross_attention_dim))
# The skip connection channels are the output of the conv_in and of all the down subblocks
ctrl_skip_channels = [ctrl_block_out_channels[0]]
for i, out_channels in enumerate(ctrl_block_out_channels):
number_of_subblocks = (
3 if i < len(ctrl_block_out_channels) - 1 else 2
) # every block has 3 subblocks, except last one, which has 2 as it has no downsampler
ctrl_skip_channels.extend([out_channels] * number_of_subblocks)
reversed_block_out_channels = list(reversed(block_out_channels))
out_channels = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
prev_output_channel = out_channels
out_channels = reversed_block_out_channels[i]
in_channels = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
ctrl_skip_channels_ = [ctrl_skip_channels.pop() for _ in range(3)]
has_crossattn = "CrossAttn" in up_block_type
is_final_block = i == len(block_out_channels) - 1
up_blocks.append(
ControlNetXSCrossAttnUpBlock2D(
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
ctrl_skip_channels=ctrl_skip_channels_,
temb_channels=time_embed_dim,
resolution_idx=i,
has_crossattn=has_crossattn,
transformer_layers_per_block=rev_transformer_layers_per_block[i],
num_attention_heads=rev_num_attention_heads[i],
cross_attention_dim=rev_cross_attention_dim[i],
add_upsample=not is_final_block,
upcast_attention=upcast_attention,
norm_num_groups=norm_num_groups,
use_linear_projection=use_linear_projection,
)
)
self.down_blocks = nn.ModuleList(down_blocks)
self.up_blocks = nn.ModuleList(up_blocks)
self.base_conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups)
self.base_conv_act = nn.SiLU()
self.base_conv_out = nn.Conv2d(block_out_channels[0], 4, kernel_size=3, padding=1)
@classmethod
def from_unet(
cls,
unet: UNet2DConditionModel,
controlnet: Optional[ControlNetXSAdapter] = None,
size_ratio: Optional[float] = None,
ctrl_block_out_channels: Optional[List[float]] = None,
time_embedding_mix: Optional[float] = None,
ctrl_optional_kwargs: Optional[Dict] = None,
):
r"""
Instantiate a [`UNetControlNetXSModel`] from a [`UNet2DConditionModel`] and an optional [`ControlNetXSAdapter`]
.
Parameters:
unet (`UNet2DConditionModel`):
The UNet model we want to control.
controlnet (`ControlNetXSAdapter`):
The ConntrolNet-XS adapter with which the UNet will be fused. If none is given, a new ConntrolNet-XS
adapter will be created.
size_ratio (float, *optional*, defaults to `None`):
Used to contruct the controlnet if none is given. See [`ControlNetXSAdapter.from_unet`] for details.
ctrl_block_out_channels (`List[int]`, *optional*, defaults to `None`):
Used to contruct the controlnet if none is given. See [`ControlNetXSAdapter.from_unet`] for details,
where this parameter is called `block_out_channels`.
time_embedding_mix (`float`, *optional*, defaults to None):
Used to contruct the controlnet if none is given. See [`ControlNetXSAdapter.from_unet`] for details.
ctrl_optional_kwargs (`Dict`, *optional*, defaults to `None`):
Passed to the `init` of the new controlent if no controlent was given.
"""
if controlnet is None:
controlnet = ControlNetXSAdapter.from_unet(
unet, size_ratio, ctrl_block_out_channels, **ctrl_optional_kwargs
)
else:
if any(
o is not None for o in (size_ratio, ctrl_block_out_channels, time_embedding_mix, ctrl_optional_kwargs)
):
raise ValueError(
"When a controlnet is passed, none of these parameters should be passed: size_ratio, ctrl_block_out_channels, time_embedding_mix, ctrl_optional_kwargs."
)
# # get params
params_for_unet = [
"sample_size",
"down_block_types",
"up_block_types",
"block_out_channels",
"norm_num_groups",
"cross_attention_dim",
"transformer_layers_per_block",
"addition_embed_type",
"addition_time_embed_dim",
"upcast_attention",
"use_linear_projection",
"time_cond_proj_dim",
"projection_class_embeddings_input_dim",
]
params_for_unet = {k: v for k, v in unet.config.items() if k in params_for_unet}
# The naming seems a bit confusing and it is, see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
params_for_unet["num_attention_heads"] = unet.config.attention_head_dim
params_for_controlnet = [
"conditioning_channels",
"conditioning_embedding_out_channels",
"conditioning_channel_order",
"learn_time_embedding",
"block_out_channels",
"num_attention_heads",
"max_norm_num_groups",
]
params_for_controlnet = {"ctrl_" + k: v for k, v in controlnet.config.items() if k in params_for_controlnet}
params_for_controlnet["time_embedding_mix"] = controlnet.config.time_embedding_mix
# # create model
model = cls.from_config({**params_for_unet, **params_for_controlnet})
# # load weights
# from unet
modules_from_unet = [
"time_embedding",
"conv_in",
"conv_norm_out",
"conv_out",
]
for m in modules_from_unet:
getattr(model, "base_" + m).load_state_dict(getattr(unet, m).state_dict())
optional_modules_from_unet = [
"add_time_proj",
"add_embedding",
]
for m in optional_modules_from_unet:
if hasattr(unet, m) and getattr(unet, m) is not None:
getattr(model, "base_" + m).load_state_dict(getattr(unet, m).state_dict())
# from controlnet
model.controlnet_cond_embedding.load_state_dict(controlnet.controlnet_cond_embedding.state_dict())
model.ctrl_conv_in.load_state_dict(controlnet.conv_in.state_dict())
if controlnet.time_embedding is not None:
model.ctrl_time_embedding.load_state_dict(controlnet.time_embedding.state_dict())
model.control_to_base_for_conv_in.load_state_dict(controlnet.control_to_base_for_conv_in.state_dict())
# from both
model.down_blocks = nn.ModuleList(
ControlNetXSCrossAttnDownBlock2D.from_modules(b, c)
for b, c in zip(unet.down_blocks, controlnet.down_blocks)
)
model.mid_block = ControlNetXSCrossAttnMidBlock2D.from_modules(unet.mid_block, controlnet.mid_block)
model.up_blocks = nn.ModuleList(
ControlNetXSCrossAttnUpBlock2D.from_modules(b, c)
for b, c in zip(unet.up_blocks, controlnet.up_connections)
)
# ensure that the UNetControlNetXSModel is the same dtype as the UNet2DConditionModel
model.to(unet.dtype)
return model
def freeze_unet_params(self) -> None:
"""Freeze the weights of the parts belonging to the base UNet2DConditionModel, and leave everything else unfrozen for fine
tuning."""
# Freeze everything
for param in self.parameters():
param.requires_grad = True
# Unfreeze ControlNetXSAdapter
base_parts = [
"base_time_proj",
"base_time_embedding",
"base_add_time_proj",
"base_add_embedding",
"base_conv_in",
"base_conv_norm_out",
"base_conv_act",
"base_conv_out",
]
base_parts = [getattr(self, part) for part in base_parts if getattr(self, part) is not None]
for part in base_parts:
for param in part.parameters():
param.requires_grad = False
for d in self.down_blocks:
d.freeze_base_params()
self.mid_block.freeze_base_params()
for u in self.up_blocks:
u.freeze_base_params()
@property
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor()
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stage blocks where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
for i, upsample_block in enumerate(self.up_blocks):
setattr(upsample_block, "s1", s1)
setattr(upsample_block, "s2", s2)
setattr(upsample_block, "b1", b1)
setattr(upsample_block, "b2", b2)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu
def disable_freeu(self):
"""Disables the FreeU mechanism."""
freeu_keys = {"s1", "s2", "b1", "b2"}
for i, upsample_block in enumerate(self.up_blocks):
for k in freeu_keys:
if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
setattr(upsample_block, k, None)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True)
self.set_attn_processor(FusedAttnProcessor2_0())
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors)
def forward(
self,
sample: Tensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
controlnet_cond: Optional[torch.Tensor] = None,
conditioning_scale: Optional[float] = 1.0,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
return_dict: bool = True,
apply_control: bool = True,
) -> Union[ControlNetXSOutput, Tuple]:
"""
The [`ControlNetXSModel`] forward method.
Args:
sample (`Tensor`):
The noisy input tensor.
timestep (`Union[torch.Tensor, float, int]`):
The number of timesteps to denoise an input.
encoder_hidden_states (`torch.Tensor`):
The encoder hidden states.
controlnet_cond (`Tensor`):
The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
conditioning_scale (`float`, defaults to `1.0`):
How much the control model affects the base model outputs.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
added_cond_kwargs (`dict`):
Additional conditions for the Stable Diffusion XL UNet.
return_dict (`bool`, defaults to `True`):
Whether or not to return a [`~models.controlnets.controlnet.ControlNetOutput`] instead of a plain
tuple.
apply_control (`bool`, defaults to `True`):
If `False`, the input is run only through the base model.
Returns:
[`~models.controlnetxs.ControlNetXSOutput`] **or** `tuple`:
If `return_dict` is `True`, a [`~models.controlnetxs.ControlNetXSOutput`] is returned, otherwise a
tuple is returned where the first element is the sample tensor.
"""
# check channel order
if self.config.ctrl_conditioning_channel_order == "bgr":
controlnet_cond = torch.flip(controlnet_cond, dims=[1])
# prepare attention_mask
if attention_mask is not None:
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
is_npu = sample.device.type == "npu"
if isinstance(timestep, float):
dtype = torch.float32 if (is_mps or is_npu) else torch.float64
else:
dtype = torch.int32 if (is_mps or is_npu) else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.base_time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
if self.config.ctrl_learn_time_embedding and apply_control:
ctrl_temb = self.ctrl_time_embedding(t_emb, timestep_cond)
base_temb = self.base_time_embedding(t_emb, timestep_cond)
interpolation_param = self.config.time_embedding_mix**0.3
temb = ctrl_temb * interpolation_param + base_temb * (1 - interpolation_param)
else:
temb = self.base_time_embedding(t_emb)
# added time & text embeddings
aug_emb = None
if self.config.addition_embed_type is None:
pass
elif self.config.addition_embed_type == "text_time":
# SDXL - style
if "text_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
)
text_embeds = added_cond_kwargs.get("text_embeds")
if "time_ids" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
)
time_ids = added_cond_kwargs.get("time_ids")
time_embeds = self.base_add_time_proj(time_ids.flatten())
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
add_embeds = add_embeds.to(temb.dtype)
aug_emb = self.base_add_embedding(add_embeds)
else:
raise ValueError(
f"ControlNet-XS currently only supports StableDiffusion and StableDiffusion-XL, so addition_embed_type = {self.config.addition_embed_type} is currently not supported."
)
temb = temb + aug_emb if aug_emb is not None else temb
# text embeddings
cemb = encoder_hidden_states
# Preparation
h_ctrl = h_base = sample
hs_base, hs_ctrl = [], []
# Cross Control
guided_hint = self.controlnet_cond_embedding(controlnet_cond)
# 1 - conv in & down
h_base = self.base_conv_in(h_base)
h_ctrl = self.ctrl_conv_in(h_ctrl)
if guided_hint is not None:
h_ctrl += guided_hint
if apply_control:
h_base = h_base + self.control_to_base_for_conv_in(h_ctrl) * conditioning_scale # add ctrl -> base
hs_base.append(h_base)
hs_ctrl.append(h_ctrl)
for down in self.down_blocks:
h_base, h_ctrl, residual_hb, residual_hc = down(
hidden_states_base=h_base,
hidden_states_ctrl=h_ctrl,
temb=temb,
encoder_hidden_states=cemb,
conditioning_scale=conditioning_scale,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
apply_control=apply_control,
)
hs_base.extend(residual_hb)
hs_ctrl.extend(residual_hc)
# 2 - mid
h_base, h_ctrl = self.mid_block(
hidden_states_base=h_base,
hidden_states_ctrl=h_ctrl,
temb=temb,
encoder_hidden_states=cemb,
conditioning_scale=conditioning_scale,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
apply_control=apply_control,
)
# 3 - up
for up in self.up_blocks:
n_resnets = len(up.resnets)
skips_hb = hs_base[-n_resnets:]
skips_hc = hs_ctrl[-n_resnets:]
hs_base = hs_base[:-n_resnets]
hs_ctrl = hs_ctrl[:-n_resnets]
h_base = up(
hidden_states=h_base,
res_hidden_states_tuple_base=skips_hb,
res_hidden_states_tuple_ctrl=skips_hc,
temb=temb,
encoder_hidden_states=cemb,
conditioning_scale=conditioning_scale,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
apply_control=apply_control,
)
# 4 - conv out
h_base = self.base_conv_norm_out(h_base)
h_base = self.base_conv_act(h_base)
h_base = self.base_conv_out(h_base)
if not return_dict:
return (h_base,)
return ControlNetXSOutput(sample=h_base)
class ControlNetXSCrossAttnDownBlock2D(nn.Module):
def __init__(
self,
base_in_channels: int,
base_out_channels: int,
ctrl_in_channels: int,
ctrl_out_channels: int,
temb_channels: int,
norm_num_groups: int = 32,
ctrl_max_norm_num_groups: int = 32,
has_crossattn=True,
transformer_layers_per_block: Optional[Union[int, Tuple[int]]] = 1,
base_num_attention_heads: Optional[int] = 1,
ctrl_num_attention_heads: Optional[int] = 1,
cross_attention_dim: Optional[int] = 1024,
add_downsample: bool = True,
upcast_attention: Optional[bool] = False,
use_linear_projection: Optional[bool] = True,
):
super().__init__()
base_resnets = []
base_attentions = []
ctrl_resnets = []
ctrl_attentions = []
ctrl_to_base = []
base_to_ctrl = []
num_layers = 2 # only support sd + sdxl
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * num_layers
for i in range(num_layers):
base_in_channels = base_in_channels if i == 0 else base_out_channels
ctrl_in_channels = ctrl_in_channels if i == 0 else ctrl_out_channels
# Before the resnet/attention application, information is concatted from base to control.
# Concat doesn't require change in number of channels
base_to_ctrl.append(make_zero_conv(base_in_channels, base_in_channels))
base_resnets.append(
ResnetBlock2D(
in_channels=base_in_channels,
out_channels=base_out_channels,
temb_channels=temb_channels,
groups=norm_num_groups,
)
)
ctrl_resnets.append(
ResnetBlock2D(
in_channels=ctrl_in_channels + base_in_channels, # information from base is concatted to ctrl
out_channels=ctrl_out_channels,
temb_channels=temb_channels,
groups=find_largest_factor(
ctrl_in_channels + base_in_channels, max_factor=ctrl_max_norm_num_groups
),
groups_out=find_largest_factor(ctrl_out_channels, max_factor=ctrl_max_norm_num_groups),
eps=1e-5,
)
)
if has_crossattn:
base_attentions.append(
Transformer2DModel(
base_num_attention_heads,
base_out_channels // base_num_attention_heads,
in_channels=base_out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
norm_num_groups=norm_num_groups,
)
)
ctrl_attentions.append(
Transformer2DModel(
ctrl_num_attention_heads,
ctrl_out_channels // ctrl_num_attention_heads,
in_channels=ctrl_out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
norm_num_groups=find_largest_factor(ctrl_out_channels, max_factor=ctrl_max_norm_num_groups),
)
)
# After the resnet/attention application, information is added from control to base
# Addition requires change in number of channels
ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels))
if add_downsample:
# Before the downsampler application, information is concatted from base to control
# Concat doesn't require change in number of channels
base_to_ctrl.append(make_zero_conv(base_out_channels, base_out_channels))
self.base_downsamplers = Downsample2D(
base_out_channels, use_conv=True, out_channels=base_out_channels, name="op"
)
self.ctrl_downsamplers = Downsample2D(
ctrl_out_channels + base_out_channels, use_conv=True, out_channels=ctrl_out_channels, name="op"
)
# After the downsampler application, information is added from control to base
# Addition requires change in number of channels
ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels))
else:
self.base_downsamplers = None
self.ctrl_downsamplers = None
self.base_resnets = nn.ModuleList(base_resnets)
self.ctrl_resnets = nn.ModuleList(ctrl_resnets)
self.base_attentions = nn.ModuleList(base_attentions) if has_crossattn else [None] * num_layers
self.ctrl_attentions = nn.ModuleList(ctrl_attentions) if has_crossattn else [None] * num_layers
self.base_to_ctrl = nn.ModuleList(base_to_ctrl)
self.ctrl_to_base = nn.ModuleList(ctrl_to_base)
self.gradient_checkpointing = False
@classmethod
def from_modules(cls, base_downblock: CrossAttnDownBlock2D, ctrl_downblock: DownBlockControlNetXSAdapter):
# get params
def get_first_cross_attention(block):
return block.attentions[0].transformer_blocks[0].attn2
base_in_channels = base_downblock.resnets[0].in_channels
base_out_channels = base_downblock.resnets[0].out_channels
ctrl_in_channels = (
ctrl_downblock.resnets[0].in_channels - base_in_channels
) # base channels are concatted to ctrl channels in init
ctrl_out_channels = ctrl_downblock.resnets[0].out_channels
temb_channels = base_downblock.resnets[0].time_emb_proj.in_features
num_groups = base_downblock.resnets[0].norm1.num_groups
ctrl_num_groups = ctrl_downblock.resnets[0].norm1.num_groups
if hasattr(base_downblock, "attentions"):
has_crossattn = True
transformer_layers_per_block = len(base_downblock.attentions[0].transformer_blocks)
base_num_attention_heads = get_first_cross_attention(base_downblock).heads
ctrl_num_attention_heads = get_first_cross_attention(ctrl_downblock).heads
cross_attention_dim = get_first_cross_attention(base_downblock).cross_attention_dim
upcast_attention = get_first_cross_attention(base_downblock).upcast_attention
use_linear_projection = base_downblock.attentions[0].use_linear_projection
else:
has_crossattn = False
transformer_layers_per_block = None
base_num_attention_heads = None
ctrl_num_attention_heads = None
cross_attention_dim = None
upcast_attention = None
use_linear_projection = None
add_downsample = base_downblock.downsamplers is not None
# create model
model = cls(
base_in_channels=base_in_channels,
base_out_channels=base_out_channels,
ctrl_in_channels=ctrl_in_channels,
ctrl_out_channels=ctrl_out_channels,
temb_channels=temb_channels,
norm_num_groups=num_groups,
ctrl_max_norm_num_groups=ctrl_num_groups,
has_crossattn=has_crossattn,
transformer_layers_per_block=transformer_layers_per_block,
base_num_attention_heads=base_num_attention_heads,
ctrl_num_attention_heads=ctrl_num_attention_heads,
cross_attention_dim=cross_attention_dim,
add_downsample=add_downsample,
upcast_attention=upcast_attention,
use_linear_projection=use_linear_projection,
)
# # load weights
model.base_resnets.load_state_dict(base_downblock.resnets.state_dict())
model.ctrl_resnets.load_state_dict(ctrl_downblock.resnets.state_dict())
if has_crossattn:
model.base_attentions.load_state_dict(base_downblock.attentions.state_dict())
model.ctrl_attentions.load_state_dict(ctrl_downblock.attentions.state_dict())
if add_downsample:
model.base_downsamplers.load_state_dict(base_downblock.downsamplers[0].state_dict())
model.ctrl_downsamplers.load_state_dict(ctrl_downblock.downsamplers.state_dict())
model.base_to_ctrl.load_state_dict(ctrl_downblock.base_to_ctrl.state_dict())
model.ctrl_to_base.load_state_dict(ctrl_downblock.ctrl_to_base.state_dict())
return model
def freeze_base_params(self) -> None:
"""Freeze the weights of the parts belonging to the base UNet2DConditionModel, and leave everything else unfrozen for fine
tuning."""
# Unfreeze everything
for param in self.parameters():
param.requires_grad = True
# Freeze base part
base_parts = [self.base_resnets]
if isinstance(self.base_attentions, nn.ModuleList): # attentions can be a list of Nones
base_parts.append(self.base_attentions)
if self.base_downsamplers is not None:
base_parts.append(self.base_downsamplers)
for part in base_parts:
for param in part.parameters():
param.requires_grad = False
def forward(
self,
hidden_states_base: Tensor,
temb: Tensor,
encoder_hidden_states: Optional[Tensor] = None,
hidden_states_ctrl: Optional[Tensor] = None,
conditioning_scale: Optional[float] = 1.0,
attention_mask: Optional[Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[Tensor] = None,
apply_control: bool = True,
) -> Tuple[Tensor, Tensor, Tuple[Tensor, ...], Tuple[Tensor, ...]]:
if cross_attention_kwargs is not None:
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
h_base = hidden_states_base
h_ctrl = hidden_states_ctrl
base_output_states = ()
ctrl_output_states = ()
base_blocks = list(zip(self.base_resnets, self.base_attentions))
ctrl_blocks = list(zip(self.ctrl_resnets, self.ctrl_attentions))
for (b_res, b_attn), (c_res, c_attn), b2c, c2b in zip(
base_blocks, ctrl_blocks, self.base_to_ctrl, self.ctrl_to_base
):
# concat base -> ctrl
if apply_control:
h_ctrl = torch.cat([h_ctrl, b2c(h_base)], dim=1)
# apply base subblock
if torch.is_grad_enabled() and self.gradient_checkpointing:
h_base = self._gradient_checkpointing_func(b_res, h_base, temb)
else:
h_base = b_res(h_base, temb)
if b_attn is not None:
h_base = b_attn(
h_base,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
# apply ctrl subblock
if apply_control:
if torch.is_grad_enabled() and self.gradient_checkpointing:
h_ctrl = self._gradient_checkpointing_func(c_res, h_ctrl, temb)
else:
h_ctrl = c_res(h_ctrl, temb)
if c_attn is not None:
h_ctrl = c_attn(
h_ctrl,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
# add ctrl -> base
if apply_control:
h_base = h_base + c2b(h_ctrl) * conditioning_scale
base_output_states = base_output_states + (h_base,)
ctrl_output_states = ctrl_output_states + (h_ctrl,)
if self.base_downsamplers is not None: # if we have a base_downsampler, then also a ctrl_downsampler
b2c = self.base_to_ctrl[-1]
c2b = self.ctrl_to_base[-1]
# concat base -> ctrl
if apply_control:
h_ctrl = torch.cat([h_ctrl, b2c(h_base)], dim=1)
# apply base subblock
h_base = self.base_downsamplers(h_base)
# apply ctrl subblock
if apply_control:
h_ctrl = self.ctrl_downsamplers(h_ctrl)
# add ctrl -> base
if apply_control:
h_base = h_base + c2b(h_ctrl) * conditioning_scale
base_output_states = base_output_states + (h_base,)
ctrl_output_states = ctrl_output_states + (h_ctrl,)
return h_base, h_ctrl, base_output_states, ctrl_output_states
class ControlNetXSCrossAttnMidBlock2D(nn.Module):
def __init__(
self,
base_channels: int,
ctrl_channels: int,
temb_channels: Optional[int] = None,
norm_num_groups: int = 32,
ctrl_max_norm_num_groups: int = 32,
transformer_layers_per_block: int = 1,
base_num_attention_heads: Optional[int] = 1,
ctrl_num_attention_heads: Optional[int] = 1,
cross_attention_dim: Optional[int] = 1024,
upcast_attention: bool = False,
use_linear_projection: Optional[bool] = True,
):
super().__init__()
# Before the midblock application, information is concatted from base to control.
# Concat doesn't require change in number of channels
self.base_to_ctrl = make_zero_conv(base_channels, base_channels)
self.base_midblock = UNetMidBlock2DCrossAttn(
transformer_layers_per_block=transformer_layers_per_block,
in_channels=base_channels,
temb_channels=temb_channels,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
num_attention_heads=base_num_attention_heads,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
self.ctrl_midblock = UNetMidBlock2DCrossAttn(
transformer_layers_per_block=transformer_layers_per_block,
in_channels=ctrl_channels + base_channels,
out_channels=ctrl_channels,
temb_channels=temb_channels,
# number or norm groups must divide both in_channels and out_channels
resnet_groups=find_largest_factor(
gcd(ctrl_channels, ctrl_channels + base_channels), ctrl_max_norm_num_groups
),
cross_attention_dim=cross_attention_dim,
num_attention_heads=ctrl_num_attention_heads,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
# After the midblock application, information is added from control to base
# Addition requires change in number of channels
self.ctrl_to_base = make_zero_conv(ctrl_channels, base_channels)
self.gradient_checkpointing = False
@classmethod
def from_modules(
cls,
base_midblock: UNetMidBlock2DCrossAttn,
ctrl_midblock: MidBlockControlNetXSAdapter,
):
base_to_ctrl = ctrl_midblock.base_to_ctrl
ctrl_to_base = ctrl_midblock.ctrl_to_base
ctrl_midblock = ctrl_midblock.midblock
# get params
def get_first_cross_attention(midblock):
return midblock.attentions[0].transformer_blocks[0].attn2
base_channels = ctrl_to_base.out_channels
ctrl_channels = ctrl_to_base.in_channels
transformer_layers_per_block = len(base_midblock.attentions[0].transformer_blocks)
temb_channels = base_midblock.resnets[0].time_emb_proj.in_features
num_groups = base_midblock.resnets[0].norm1.num_groups
ctrl_num_groups = ctrl_midblock.resnets[0].norm1.num_groups
base_num_attention_heads = get_first_cross_attention(base_midblock).heads
ctrl_num_attention_heads = get_first_cross_attention(ctrl_midblock).heads
cross_attention_dim = get_first_cross_attention(base_midblock).cross_attention_dim
upcast_attention = get_first_cross_attention(base_midblock).upcast_attention
use_linear_projection = base_midblock.attentions[0].use_linear_projection
# create model
model = cls(
base_channels=base_channels,
ctrl_channels=ctrl_channels,
temb_channels=temb_channels,
norm_num_groups=num_groups,
ctrl_max_norm_num_groups=ctrl_num_groups,
transformer_layers_per_block=transformer_layers_per_block,
base_num_attention_heads=base_num_attention_heads,
ctrl_num_attention_heads=ctrl_num_attention_heads,
cross_attention_dim=cross_attention_dim,
upcast_attention=upcast_attention,
use_linear_projection=use_linear_projection,
)
# load weights
model.base_to_ctrl.load_state_dict(base_to_ctrl.state_dict())
model.base_midblock.load_state_dict(base_midblock.state_dict())
model.ctrl_midblock.load_state_dict(ctrl_midblock.state_dict())
model.ctrl_to_base.load_state_dict(ctrl_to_base.state_dict())
return model
def freeze_base_params(self) -> None:
"""Freeze the weights of the parts belonging to the base UNet2DConditionModel, and leave everything else unfrozen for fine
tuning."""
# Unfreeze everything
for param in self.parameters():
param.requires_grad = True
# Freeze base part
for param in self.base_midblock.parameters():
param.requires_grad = False
def forward(
self,
hidden_states_base: Tensor,
temb: Tensor,
encoder_hidden_states: Tensor,
hidden_states_ctrl: Optional[Tensor] = None,
conditioning_scale: Optional[float] = 1.0,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
attention_mask: Optional[Tensor] = None,
encoder_attention_mask: Optional[Tensor] = None,
apply_control: bool = True,
) -> Tuple[Tensor, Tensor]:
if cross_attention_kwargs is not None:
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
h_base = hidden_states_base
h_ctrl = hidden_states_ctrl
joint_args = {
"temb": temb,
"encoder_hidden_states": encoder_hidden_states,
"attention_mask": attention_mask,
"cross_attention_kwargs": cross_attention_kwargs,
"encoder_attention_mask": encoder_attention_mask,
}
if apply_control:
h_ctrl = torch.cat([h_ctrl, self.base_to_ctrl(h_base)], dim=1) # concat base -> ctrl
h_base = self.base_midblock(h_base, **joint_args) # apply base mid block
if apply_control:
h_ctrl = self.ctrl_midblock(h_ctrl, **joint_args) # apply ctrl mid block
h_base = h_base + self.ctrl_to_base(h_ctrl) * conditioning_scale # add ctrl -> base
return h_base, h_ctrl
class ControlNetXSCrossAttnUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
ctrl_skip_channels: List[int],
temb_channels: int,
norm_num_groups: int = 32,
resolution_idx: Optional[int] = None,
has_crossattn=True,
transformer_layers_per_block: int = 1,
num_attention_heads: int = 1,
cross_attention_dim: int = 1024,
add_upsample: bool = True,
upcast_attention: bool = False,
use_linear_projection: Optional[bool] = True,
):
super().__init__()
resnets = []
attentions = []
ctrl_to_base = []
num_layers = 3 # only support sd + sdxl
self.has_cross_attention = has_crossattn
self.num_attention_heads = num_attention_heads
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * num_layers
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
ctrl_to_base.append(make_zero_conv(ctrl_skip_channels[i], resnet_in_channels))
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
groups=norm_num_groups,
)
)
if has_crossattn:
attentions.append(
Transformer2DModel(
num_attention_heads,
out_channels // num_attention_heads,
in_channels=out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
norm_num_groups=norm_num_groups,
)
)
self.resnets = nn.ModuleList(resnets)
self.attentions = nn.ModuleList(attentions) if has_crossattn else [None] * num_layers
self.ctrl_to_base = nn.ModuleList(ctrl_to_base)
if add_upsample:
self.upsamplers = Upsample2D(out_channels, use_conv=True, out_channels=out_channels)
else:
self.upsamplers = None
self.gradient_checkpointing = False
self.resolution_idx = resolution_idx
@classmethod
def from_modules(cls, base_upblock: CrossAttnUpBlock2D, ctrl_upblock: UpBlockControlNetXSAdapter):
ctrl_to_base_skip_connections = ctrl_upblock.ctrl_to_base
# get params
def get_first_cross_attention(block):
return block.attentions[0].transformer_blocks[0].attn2
out_channels = base_upblock.resnets[0].out_channels
in_channels = base_upblock.resnets[-1].in_channels - out_channels
prev_output_channels = base_upblock.resnets[0].in_channels - out_channels
ctrl_skip_channelss = [c.in_channels for c in ctrl_to_base_skip_connections]
temb_channels = base_upblock.resnets[0].time_emb_proj.in_features
num_groups = base_upblock.resnets[0].norm1.num_groups
resolution_idx = base_upblock.resolution_idx
if hasattr(base_upblock, "attentions"):
has_crossattn = True
transformer_layers_per_block = len(base_upblock.attentions[0].transformer_blocks)
num_attention_heads = get_first_cross_attention(base_upblock).heads
cross_attention_dim = get_first_cross_attention(base_upblock).cross_attention_dim
upcast_attention = get_first_cross_attention(base_upblock).upcast_attention
use_linear_projection = base_upblock.attentions[0].use_linear_projection
else:
has_crossattn = False
transformer_layers_per_block = None
num_attention_heads = None
cross_attention_dim = None
upcast_attention = None
use_linear_projection = None
add_upsample = base_upblock.upsamplers is not None
# create model
model = cls(
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channels,
ctrl_skip_channels=ctrl_skip_channelss,
temb_channels=temb_channels,
norm_num_groups=num_groups,
resolution_idx=resolution_idx,
has_crossattn=has_crossattn,
transformer_layers_per_block=transformer_layers_per_block,
num_attention_heads=num_attention_heads,
cross_attention_dim=cross_attention_dim,
add_upsample=add_upsample,
upcast_attention=upcast_attention,
use_linear_projection=use_linear_projection,
)
# load weights
model.resnets.load_state_dict(base_upblock.resnets.state_dict())
if has_crossattn:
model.attentions.load_state_dict(base_upblock.attentions.state_dict())
if add_upsample:
model.upsamplers.load_state_dict(base_upblock.upsamplers[0].state_dict())
model.ctrl_to_base.load_state_dict(ctrl_to_base_skip_connections.state_dict())
return model
def freeze_base_params(self) -> None:
"""Freeze the weights of the parts belonging to the base UNet2DConditionModel, and leave everything else unfrozen for fine
tuning."""
# Unfreeze everything
for param in self.parameters():
param.requires_grad = True
# Freeze base part
base_parts = [self.resnets]
if isinstance(self.attentions, nn.ModuleList): # attentions can be a list of Nones
base_parts.append(self.attentions)
if self.upsamplers is not None:
base_parts.append(self.upsamplers)
for part in base_parts:
for param in part.parameters():
param.requires_grad = False
def forward(
self,
hidden_states: Tensor,
res_hidden_states_tuple_base: Tuple[Tensor, ...],
res_hidden_states_tuple_ctrl: Tuple[Tensor, ...],
temb: Tensor,
encoder_hidden_states: Optional[Tensor] = None,
conditioning_scale: Optional[float] = 1.0,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
attention_mask: Optional[Tensor] = None,
upsample_size: Optional[int] = None,
encoder_attention_mask: Optional[Tensor] = None,
apply_control: bool = True,
) -> Tensor:
if cross_attention_kwargs is not None:
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
is_freeu_enabled = (
getattr(self, "s1", None)
and getattr(self, "s2", None)
and getattr(self, "b1", None)
and getattr(self, "b2", None)
)
def maybe_apply_freeu_to_subblock(hidden_states, res_h_base):
# FreeU: Only operate on the first two stages
if is_freeu_enabled:
return apply_freeu(
self.resolution_idx,
hidden_states,
res_h_base,
s1=self.s1,
s2=self.s2,
b1=self.b1,
b2=self.b2,
)
else:
return hidden_states, res_h_base
for resnet, attn, c2b, res_h_base, res_h_ctrl in zip(
self.resnets,
self.attentions,
self.ctrl_to_base,
reversed(res_hidden_states_tuple_base),
reversed(res_hidden_states_tuple_ctrl),
):
if apply_control:
hidden_states += c2b(res_h_ctrl) * conditioning_scale
hidden_states, res_h_base = maybe_apply_freeu_to_subblock(hidden_states, res_h_base)
hidden_states = torch.cat([hidden_states, res_h_base], dim=1)
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
else:
hidden_states = resnet(hidden_states, temb)
if attn is not None:
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
if self.upsamplers is not None:
hidden_states = self.upsamplers(hidden_states, upsample_size)
return hidden_states
def make_zero_conv(in_channels, out_channels=None):
return zero_module(nn.Conv2d(in_channels, out_channels, 1, padding=0))
def zero_module(module):
for p in module.parameters():
nn.init.zeros_(p)
return module
def find_largest_factor(number, max_factor):
factor = max_factor
if factor >= number:
return number
while factor != 0:
residual = number % factor
if residual == 0:
return factor
factor -= 1
| diffusers/src/diffusers/models/controlnets/controlnet_xs.py/0 | {
"file_path": "diffusers/src/diffusers/models/controlnets/controlnet_xs.py",
"repo_id": "diffusers",
"token_count": 39504
} |
# Copyright 2024 AuraFlow Authors, The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...configuration_utils import ConfigMixin, register_to_config
from ...loaders import FromOriginalModelMixin
from ...utils import logging
from ...utils.torch_utils import maybe_allow_in_graph
from ..attention_processor import (
Attention,
AttentionProcessor,
AuraFlowAttnProcessor2_0,
FusedAuraFlowAttnProcessor2_0,
)
from ..embeddings import TimestepEmbedding, Timesteps
from ..modeling_outputs import Transformer2DModelOutput
from ..modeling_utils import ModelMixin
from ..normalization import AdaLayerNormZero, FP32LayerNorm
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
# Taken from the original aura flow inference code.
def find_multiple(n: int, k: int) -> int:
if n % k == 0:
return n
return n + k - (n % k)
# Aura Flow patch embed doesn't use convs for projections.
# Additionally, it uses learned positional embeddings.
class AuraFlowPatchEmbed(nn.Module):
def __init__(
self,
height=224,
width=224,
patch_size=16,
in_channels=3,
embed_dim=768,
pos_embed_max_size=None,
):
super().__init__()
self.num_patches = (height // patch_size) * (width // patch_size)
self.pos_embed_max_size = pos_embed_max_size
self.proj = nn.Linear(patch_size * patch_size * in_channels, embed_dim)
self.pos_embed = nn.Parameter(torch.randn(1, pos_embed_max_size, embed_dim) * 0.1)
self.patch_size = patch_size
self.height, self.width = height // patch_size, width // patch_size
self.base_size = height // patch_size
def pe_selection_index_based_on_dim(self, h, w):
# select subset of positional embedding based on H, W, where H, W is size of latent
# PE will be viewed as 2d-grid, and H/p x W/p of the PE will be selected
# because original input are in flattened format, we have to flatten this 2d grid as well.
h_p, w_p = h // self.patch_size, w // self.patch_size
original_pe_indexes = torch.arange(self.pos_embed.shape[1])
h_max, w_max = int(self.pos_embed_max_size**0.5), int(self.pos_embed_max_size**0.5)
original_pe_indexes = original_pe_indexes.view(h_max, w_max)
starth = h_max // 2 - h_p // 2
endh = starth + h_p
startw = w_max // 2 - w_p // 2
endw = startw + w_p
original_pe_indexes = original_pe_indexes[starth:endh, startw:endw]
return original_pe_indexes.flatten()
def forward(self, latent):
batch_size, num_channels, height, width = latent.size()
latent = latent.view(
batch_size,
num_channels,
height // self.patch_size,
self.patch_size,
width // self.patch_size,
self.patch_size,
)
latent = latent.permute(0, 2, 4, 1, 3, 5).flatten(-3).flatten(1, 2)
latent = self.proj(latent)
pe_index = self.pe_selection_index_based_on_dim(height, width)
return latent + self.pos_embed[:, pe_index]
# Taken from the original Aura flow inference code.
# Our feedforward only has GELU but Aura uses SiLU.
class AuraFlowFeedForward(nn.Module):
def __init__(self, dim, hidden_dim=None) -> None:
super().__init__()
if hidden_dim is None:
hidden_dim = 4 * dim
final_hidden_dim = int(2 * hidden_dim / 3)
final_hidden_dim = find_multiple(final_hidden_dim, 256)
self.linear_1 = nn.Linear(dim, final_hidden_dim, bias=False)
self.linear_2 = nn.Linear(dim, final_hidden_dim, bias=False)
self.out_projection = nn.Linear(final_hidden_dim, dim, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = F.silu(self.linear_1(x)) * self.linear_2(x)
x = self.out_projection(x)
return x
class AuraFlowPreFinalBlock(nn.Module):
def __init__(self, embedding_dim: int, conditioning_embedding_dim: int):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=False)
def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor:
emb = self.linear(self.silu(conditioning_embedding).to(x.dtype))
scale, shift = torch.chunk(emb, 2, dim=1)
x = x * (1 + scale)[:, None, :] + shift[:, None, :]
return x
@maybe_allow_in_graph
class AuraFlowSingleTransformerBlock(nn.Module):
"""Similar to `AuraFlowJointTransformerBlock` with a single DiT instead of an MMDiT."""
def __init__(self, dim, num_attention_heads, attention_head_dim):
super().__init__()
self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm")
processor = AuraFlowAttnProcessor2_0()
self.attn = Attention(
query_dim=dim,
cross_attention_dim=None,
dim_head=attention_head_dim,
heads=num_attention_heads,
qk_norm="fp32_layer_norm",
out_dim=dim,
bias=False,
out_bias=False,
processor=processor,
)
self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False)
self.ff = AuraFlowFeedForward(dim, dim * 4)
def forward(self, hidden_states: torch.FloatTensor, temb: torch.FloatTensor):
residual = hidden_states
# Norm + Projection.
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
# Attention.
attn_output = self.attn(hidden_states=norm_hidden_states)
# Process attention outputs for the `hidden_states`.
hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output)
hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
ff_output = self.ff(hidden_states)
hidden_states = gate_mlp.unsqueeze(1) * ff_output
hidden_states = residual + hidden_states
return hidden_states
@maybe_allow_in_graph
class AuraFlowJointTransformerBlock(nn.Module):
r"""
Transformer block for Aura Flow. Similar to SD3 MMDiT. Differences (non-exhaustive):
* QK Norm in the attention blocks
* No bias in the attention blocks
* Most LayerNorms are in FP32
Parameters:
dim (`int`): The number of channels in the input and output.
num_attention_heads (`int`): The number of heads to use for multi-head attention.
attention_head_dim (`int`): The number of channels in each head.
is_last (`bool`): Boolean to determine if this is the last block in the model.
"""
def __init__(self, dim, num_attention_heads, attention_head_dim):
super().__init__()
self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm")
self.norm1_context = AdaLayerNormZero(dim, bias=False, norm_type="fp32_layer_norm")
processor = AuraFlowAttnProcessor2_0()
self.attn = Attention(
query_dim=dim,
cross_attention_dim=None,
added_kv_proj_dim=dim,
added_proj_bias=False,
dim_head=attention_head_dim,
heads=num_attention_heads,
qk_norm="fp32_layer_norm",
out_dim=dim,
bias=False,
out_bias=False,
processor=processor,
context_pre_only=False,
)
self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False)
self.ff = AuraFlowFeedForward(dim, dim * 4)
self.norm2_context = FP32LayerNorm(dim, elementwise_affine=False, bias=False)
self.ff_context = AuraFlowFeedForward(dim, dim * 4)
def forward(
self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor
):
residual = hidden_states
residual_context = encoder_hidden_states
# Norm + Projection.
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
encoder_hidden_states, emb=temb
)
# Attention.
attn_output, context_attn_output = self.attn(
hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states
)
# Process attention outputs for the `hidden_states`.
hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output)
hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
hidden_states = gate_mlp.unsqueeze(1) * self.ff(hidden_states)
hidden_states = residual + hidden_states
# Process attention outputs for the `encoder_hidden_states`.
encoder_hidden_states = self.norm2_context(residual_context + c_gate_msa.unsqueeze(1) * context_attn_output)
encoder_hidden_states = encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
encoder_hidden_states = c_gate_mlp.unsqueeze(1) * self.ff_context(encoder_hidden_states)
encoder_hidden_states = residual_context + encoder_hidden_states
return encoder_hidden_states, hidden_states
class AuraFlowTransformer2DModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
r"""
A 2D Transformer model as introduced in AuraFlow (https://blog.fal.ai/auraflow/).
Parameters:
sample_size (`int`): The width of the latent images. This is fixed during training since
it is used to learn a number of position embeddings.
patch_size (`int`): Patch size to turn the input data into small patches.
in_channels (`int`, *optional*, defaults to 16): The number of channels in the input.
num_mmdit_layers (`int`, *optional*, defaults to 4): The number of layers of MMDiT Transformer blocks to use.
num_single_dit_layers (`int`, *optional*, defaults to 4):
The number of layers of Transformer blocks to use. These blocks use concatenated image and text
representations.
attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention.
joint_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
caption_projection_dim (`int`): Number of dimensions to use when projecting the `encoder_hidden_states`.
out_channels (`int`, defaults to 16): Number of output channels.
pos_embed_max_size (`int`, defaults to 4096): Maximum positions to embed from the image latents.
"""
_no_split_modules = ["AuraFlowJointTransformerBlock", "AuraFlowSingleTransformerBlock", "AuraFlowPatchEmbed"]
_skip_layerwise_casting_patterns = ["pos_embed", "norm"]
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: int = 64,
patch_size: int = 2,
in_channels: int = 4,
num_mmdit_layers: int = 4,
num_single_dit_layers: int = 32,
attention_head_dim: int = 256,
num_attention_heads: int = 12,
joint_attention_dim: int = 2048,
caption_projection_dim: int = 3072,
out_channels: int = 4,
pos_embed_max_size: int = 1024,
):
super().__init__()
default_out_channels = in_channels
self.out_channels = out_channels if out_channels is not None else default_out_channels
self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
self.pos_embed = AuraFlowPatchEmbed(
height=self.config.sample_size,
width=self.config.sample_size,
patch_size=self.config.patch_size,
in_channels=self.config.in_channels,
embed_dim=self.inner_dim,
pos_embed_max_size=pos_embed_max_size,
)
self.context_embedder = nn.Linear(
self.config.joint_attention_dim, self.config.caption_projection_dim, bias=False
)
self.time_step_embed = Timesteps(num_channels=256, downscale_freq_shift=0, scale=1000, flip_sin_to_cos=True)
self.time_step_proj = TimestepEmbedding(in_channels=256, time_embed_dim=self.inner_dim)
self.joint_transformer_blocks = nn.ModuleList(
[
AuraFlowJointTransformerBlock(
dim=self.inner_dim,
num_attention_heads=self.config.num_attention_heads,
attention_head_dim=self.config.attention_head_dim,
)
for i in range(self.config.num_mmdit_layers)
]
)
self.single_transformer_blocks = nn.ModuleList(
[
AuraFlowSingleTransformerBlock(
dim=self.inner_dim,
num_attention_heads=self.config.num_attention_heads,
attention_head_dim=self.config.attention_head_dim,
)
for _ in range(self.config.num_single_dit_layers)
]
)
self.norm_out = AuraFlowPreFinalBlock(self.inner_dim, self.inner_dim)
self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=False)
# https://arxiv.org/abs/2309.16588
# prevents artifacts in the attention maps
self.register_tokens = nn.Parameter(torch.randn(1, 8, self.inner_dim) * 0.02)
self.gradient_checkpointing = False
@property
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor()
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedAuraFlowAttnProcessor2_0
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True)
self.set_attn_processor(FusedAuraFlowAttnProcessor2_0())
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors)
def forward(
self,
hidden_states: torch.FloatTensor,
encoder_hidden_states: torch.FloatTensor = None,
timestep: torch.LongTensor = None,
return_dict: bool = True,
) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
height, width = hidden_states.shape[-2:]
# Apply patch embedding, timestep embedding, and project the caption embeddings.
hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
temb = self.time_step_embed(timestep).to(dtype=next(self.parameters()).dtype)
temb = self.time_step_proj(temb)
encoder_hidden_states = self.context_embedder(encoder_hidden_states)
encoder_hidden_states = torch.cat(
[self.register_tokens.repeat(encoder_hidden_states.size(0), 1, 1), encoder_hidden_states], dim=1
)
# MMDiT blocks.
for index_block, block in enumerate(self.joint_transformer_blocks):
if torch.is_grad_enabled() and self.gradient_checkpointing:
encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
block,
hidden_states,
encoder_hidden_states,
temb,
)
else:
encoder_hidden_states, hidden_states = block(
hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb
)
# Single DiT blocks that combine the `hidden_states` (image) and `encoder_hidden_states` (text)
if len(self.single_transformer_blocks) > 0:
encoder_seq_len = encoder_hidden_states.size(1)
combined_hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
for index_block, block in enumerate(self.single_transformer_blocks):
if torch.is_grad_enabled() and self.gradient_checkpointing:
combined_hidden_states = self._gradient_checkpointing_func(
block,
combined_hidden_states,
temb,
)
else:
combined_hidden_states = block(hidden_states=combined_hidden_states, temb=temb)
hidden_states = combined_hidden_states[:, encoder_seq_len:]
hidden_states = self.norm_out(hidden_states, temb)
hidden_states = self.proj_out(hidden_states)
# unpatchify
patch_size = self.config.patch_size
out_channels = self.config.out_channels
height = height // patch_size
width = width // patch_size
hidden_states = hidden_states.reshape(
shape=(hidden_states.shape[0], height, width, patch_size, patch_size, out_channels)
)
hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
output = hidden_states.reshape(
shape=(hidden_states.shape[0], out_channels, height * patch_size, width * patch_size)
)
if not return_dict:
return (output,)
return Transformer2DModelOutput(sample=output)
| diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py/0 | {
"file_path": "diffusers/src/diffusers/models/transformers/auraflow_transformer_2d.py",
"repo_id": "diffusers",
"token_count": 9262
} |
# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...configuration_utils import ConfigMixin, register_to_config
from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin
from ...models.attention import FeedForward
from ...models.attention_processor import (
Attention,
AttentionProcessor,
FluxAttnProcessor2_0,
FluxAttnProcessor2_0_NPU,
FusedFluxAttnProcessor2_0,
)
from ...models.modeling_utils import ModelMixin
from ...models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
from ...utils.import_utils import is_torch_npu_available
from ...utils.torch_utils import maybe_allow_in_graph
from ..cache_utils import CacheMixin
from ..embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed
from ..modeling_outputs import Transformer2DModelOutput
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@maybe_allow_in_graph
class FluxSingleTransformerBlock(nn.Module):
r"""
A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3.
Reference: https://arxiv.org/abs/2403.03206
Parameters:
dim (`int`): The number of channels in the input and output.
num_attention_heads (`int`): The number of heads to use for multi-head attention.
attention_head_dim (`int`): The number of channels in each head.
context_pre_only (`bool`): Boolean to determine if we should add some blocks associated with the
processing of `context` conditions.
"""
def __init__(self, dim, num_attention_heads, attention_head_dim, mlp_ratio=4.0):
super().__init__()
self.mlp_hidden_dim = int(dim * mlp_ratio)
self.norm = AdaLayerNormZeroSingle(dim)
self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim)
self.act_mlp = nn.GELU(approximate="tanh")
self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
if is_torch_npu_available():
processor = FluxAttnProcessor2_0_NPU()
else:
processor = FluxAttnProcessor2_0()
self.attn = Attention(
query_dim=dim,
cross_attention_dim=None,
dim_head=attention_head_dim,
heads=num_attention_heads,
out_dim=dim,
bias=True,
processor=processor,
qk_norm="rms_norm",
eps=1e-6,
pre_only=True,
)
def forward(
self,
hidden_states: torch.Tensor,
temb: torch.Tensor,
image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
) -> torch.Tensor:
residual = hidden_states
norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
joint_attention_kwargs = joint_attention_kwargs or {}
attn_output = self.attn(
hidden_states=norm_hidden_states,
image_rotary_emb=image_rotary_emb,
**joint_attention_kwargs,
)
hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
gate = gate.unsqueeze(1)
hidden_states = gate * self.proj_out(hidden_states)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16:
hidden_states = hidden_states.clip(-65504, 65504)
return hidden_states
@maybe_allow_in_graph
class FluxTransformerBlock(nn.Module):
r"""
A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3.
Reference: https://arxiv.org/abs/2403.03206
Args:
dim (`int`):
The embedding dimension of the block.
num_attention_heads (`int`):
The number of attention heads to use.
attention_head_dim (`int`):
The number of dimensions to use for each attention head.
qk_norm (`str`, defaults to `"rms_norm"`):
The normalization to use for the query and key tensors.
eps (`float`, defaults to `1e-6`):
The epsilon value to use for the normalization.
"""
def __init__(
self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6
):
super().__init__()
self.norm1 = AdaLayerNormZero(dim)
self.norm1_context = AdaLayerNormZero(dim)
if hasattr(F, "scaled_dot_product_attention"):
processor = FluxAttnProcessor2_0()
else:
raise ValueError(
"The current PyTorch version does not support the `scaled_dot_product_attention` function."
)
self.attn = Attention(
query_dim=dim,
cross_attention_dim=None,
added_kv_proj_dim=dim,
dim_head=attention_head_dim,
heads=num_attention_heads,
out_dim=dim,
context_pre_only=False,
bias=True,
processor=processor,
qk_norm=qk_norm,
eps=eps,
)
self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
# let chunk size default to None
self._chunk_size = None
self._chunk_dim = 0
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: torch.Tensor,
temb: torch.Tensor,
image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
encoder_hidden_states, emb=temb
)
joint_attention_kwargs = joint_attention_kwargs or {}
# Attention.
attention_outputs = self.attn(
hidden_states=norm_hidden_states,
encoder_hidden_states=norm_encoder_hidden_states,
image_rotary_emb=image_rotary_emb,
**joint_attention_kwargs,
)
if len(attention_outputs) == 2:
attn_output, context_attn_output = attention_outputs
elif len(attention_outputs) == 3:
attn_output, context_attn_output, ip_attn_output = attention_outputs
# Process attention outputs for the `hidden_states`.
attn_output = gate_msa.unsqueeze(1) * attn_output
hidden_states = hidden_states + attn_output
norm_hidden_states = self.norm2(hidden_states)
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
ff_output = self.ff(norm_hidden_states)
ff_output = gate_mlp.unsqueeze(1) * ff_output
hidden_states = hidden_states + ff_output
if len(attention_outputs) == 3:
hidden_states = hidden_states + ip_attn_output
# Process attention outputs for the `encoder_hidden_states`.
context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
encoder_hidden_states = encoder_hidden_states + context_attn_output
norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
context_ff_output = self.ff_context(norm_encoder_hidden_states)
encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
if encoder_hidden_states.dtype == torch.float16:
encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504)
return encoder_hidden_states, hidden_states
class FluxTransformer2DModel(
ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, FluxTransformer2DLoadersMixin, CacheMixin
):
"""
The Transformer model introduced in Flux.
Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
Args:
patch_size (`int`, defaults to `1`):
Patch size to turn the input data into small patches.
in_channels (`int`, defaults to `64`):
The number of channels in the input.
out_channels (`int`, *optional*, defaults to `None`):
The number of channels in the output. If not specified, it defaults to `in_channels`.
num_layers (`int`, defaults to `19`):
The number of layers of dual stream DiT blocks to use.
num_single_layers (`int`, defaults to `38`):
The number of layers of single stream DiT blocks to use.
attention_head_dim (`int`, defaults to `128`):
The number of dimensions to use for each attention head.
num_attention_heads (`int`, defaults to `24`):
The number of attention heads to use.
joint_attention_dim (`int`, defaults to `4096`):
The number of dimensions to use for the joint attention (embedding/channel dimension of
`encoder_hidden_states`).
pooled_projection_dim (`int`, defaults to `768`):
The number of dimensions to use for the pooled projection.
guidance_embeds (`bool`, defaults to `False`):
Whether to use guidance embeddings for guidance-distilled variant of the model.
axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`):
The dimensions to use for the rotary positional embeddings.
"""
_supports_gradient_checkpointing = True
_no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"]
_skip_layerwise_casting_patterns = ["pos_embed", "norm"]
@register_to_config
def __init__(
self,
patch_size: int = 1,
in_channels: int = 64,
out_channels: Optional[int] = None,
num_layers: int = 19,
num_single_layers: int = 38,
attention_head_dim: int = 128,
num_attention_heads: int = 24,
joint_attention_dim: int = 4096,
pooled_projection_dim: int = 768,
guidance_embeds: bool = False,
axes_dims_rope: Tuple[int] = (16, 56, 56),
):
super().__init__()
self.out_channels = out_channels or in_channels
self.inner_dim = num_attention_heads * attention_head_dim
self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope)
text_time_guidance_cls = (
CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings
)
self.time_text_embed = text_time_guidance_cls(
embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim
)
self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim)
self.x_embedder = nn.Linear(in_channels, self.inner_dim)
self.transformer_blocks = nn.ModuleList(
[
FluxTransformerBlock(
dim=self.inner_dim,
num_attention_heads=num_attention_heads,
attention_head_dim=attention_head_dim,
)
for _ in range(num_layers)
]
)
self.single_transformer_blocks = nn.ModuleList(
[
FluxSingleTransformerBlock(
dim=self.inner_dim,
num_attention_heads=num_attention_heads,
attention_head_dim=attention_head_dim,
)
for _ in range(num_single_layers)
]
)
self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
self.gradient_checkpointing = False
@property
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor()
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedFluxAttnProcessor2_0
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True)
self.set_attn_processor(FusedFluxAttnProcessor2_0())
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors)
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: torch.Tensor = None,
pooled_projections: torch.Tensor = None,
timestep: torch.LongTensor = None,
img_ids: torch.Tensor = None,
txt_ids: torch.Tensor = None,
guidance: torch.Tensor = None,
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_block_samples=None,
controlnet_single_block_samples=None,
return_dict: bool = True,
controlnet_blocks_repeat: bool = False,
) -> Union[torch.Tensor, Transformer2DModelOutput]:
"""
The [`FluxTransformer2DModel`] forward method.
Args:
hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`):
Input `hidden_states`.
encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`):
Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected
from the embeddings of input conditions.
timestep ( `torch.LongTensor`):
Used to indicate denoising step.
block_controlnet_hidden_states: (`list` of `torch.Tensor`):
A list of tensors that if specified are added to the residuals of transformer blocks.
joint_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
tuple.
Returns:
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
`tuple` where the first element is the sample tensor.
"""
if joint_attention_kwargs is not None:
joint_attention_kwargs = joint_attention_kwargs.copy()
lora_scale = joint_attention_kwargs.pop("scale", 1.0)
else:
lora_scale = 1.0
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
else:
if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
logger.warning(
"Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
)
hidden_states = self.x_embedder(hidden_states)
timestep = timestep.to(hidden_states.dtype) * 1000
if guidance is not None:
guidance = guidance.to(hidden_states.dtype) * 1000
else:
guidance = None
temb = (
self.time_text_embed(timestep, pooled_projections)
if guidance is None
else self.time_text_embed(timestep, guidance, pooled_projections)
)
encoder_hidden_states = self.context_embedder(encoder_hidden_states)
if txt_ids.ndim == 3:
logger.warning(
"Passing `txt_ids` 3d torch.Tensor is deprecated."
"Please remove the batch dimension and pass it as a 2d torch Tensor"
)
txt_ids = txt_ids[0]
if img_ids.ndim == 3:
logger.warning(
"Passing `img_ids` 3d torch.Tensor is deprecated."
"Please remove the batch dimension and pass it as a 2d torch Tensor"
)
img_ids = img_ids[0]
ids = torch.cat((txt_ids, img_ids), dim=0)
image_rotary_emb = self.pos_embed(ids)
if joint_attention_kwargs is not None and "ip_adapter_image_embeds" in joint_attention_kwargs:
ip_adapter_image_embeds = joint_attention_kwargs.pop("ip_adapter_image_embeds")
ip_hidden_states = self.encoder_hid_proj(ip_adapter_image_embeds)
joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states})
for index_block, block in enumerate(self.transformer_blocks):
if torch.is_grad_enabled() and self.gradient_checkpointing:
encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
block,
hidden_states,
encoder_hidden_states,
temb,
image_rotary_emb,
)
else:
encoder_hidden_states, hidden_states = block(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
temb=temb,
image_rotary_emb=image_rotary_emb,
joint_attention_kwargs=joint_attention_kwargs,
)
# controlnet residual
if controlnet_block_samples is not None:
interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
interval_control = int(np.ceil(interval_control))
# For Xlabs ControlNet.
if controlnet_blocks_repeat:
hidden_states = (
hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)]
)
else:
hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
for index_block, block in enumerate(self.single_transformer_blocks):
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states = self._gradient_checkpointing_func(
block,
hidden_states,
temb,
image_rotary_emb,
)
else:
hidden_states = block(
hidden_states=hidden_states,
temb=temb,
image_rotary_emb=image_rotary_emb,
joint_attention_kwargs=joint_attention_kwargs,
)
# controlnet residual
if controlnet_single_block_samples is not None:
interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples)
interval_control = int(np.ceil(interval_control))
hidden_states[:, encoder_hidden_states.shape[1] :, ...] = (
hidden_states[:, encoder_hidden_states.shape[1] :, ...]
+ controlnet_single_block_samples[index_block // interval_control]
)
hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...]
hidden_states = self.norm_out(hidden_states, temb)
output = self.proj_out(hidden_states)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
if not return_dict:
return (output,)
return Transformer2DModelOutput(sample=output)
| diffusers/src/diffusers/models/transformers/transformer_flux.py/0 | {
"file_path": "diffusers/src/diffusers/models/transformers/transformer_flux.py",
"repo_id": "diffusers",
"token_count": 11037
} |
# Copyright 2024 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.utils.checkpoint
from ...configuration_utils import ConfigMixin, register_to_config
from ...loaders import UNet2DConditionLoadersMixin
from ...utils import logging
from ..activations import get_activation
from ..attention import Attention, FeedForward
from ..attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
FusedAttnProcessor2_0,
)
from ..embeddings import TimestepEmbedding, Timesteps
from ..modeling_utils import ModelMixin
from ..transformers.transformer_temporal import TransformerTemporalModel
from .unet_3d_blocks import (
UNetMidBlock3DCrossAttn,
get_down_block,
get_up_block,
)
from .unet_3d_condition import UNet3DConditionOutput
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class I2VGenXLTransformerTemporalEncoder(nn.Module):
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
activation_fn: str = "geglu",
upcast_attention: bool = False,
ff_inner_dim: Optional[int] = None,
dropout: int = 0.0,
):
super().__init__()
self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=1e-5)
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=False,
upcast_attention=upcast_attention,
out_bias=True,
)
self.ff = FeedForward(
dim,
dropout=dropout,
activation_fn=activation_fn,
final_dropout=False,
inner_dim=ff_inner_dim,
bias=True,
)
def forward(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor:
norm_hidden_states = self.norm1(hidden_states)
attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
hidden_states = attn_output + hidden_states
if hidden_states.ndim == 4:
hidden_states = hidden_states.squeeze(1)
ff_output = self.ff(hidden_states)
hidden_states = ff_output + hidden_states
if hidden_states.ndim == 4:
hidden_states = hidden_states.squeeze(1)
return hidden_states
class I2VGenXLUNet(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
r"""
I2VGenXL UNet. It is a conditional 3D UNet model that takes a noisy sample, conditional state, and a timestep and
returns a sample-shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, normalization and activation layers is skipped in post-processing.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 64): Attention head dim.
num_attention_heads (`int`, *optional*): The number of attention heads.
"""
_supports_gradient_checkpointing = False
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str, ...] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str, ...] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
),
block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
norm_num_groups: Optional[int] = 32,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
):
super().__init__()
# When we first integrated the UNet into the library, we didn't have `attention_head_dim`. As a consequence
# of that, we used `num_attention_heads` for arguments that actually denote attention head dimension. This
# is why we ignore `num_attention_heads` and calculate it from `attention_head_dims` below.
# This is still an incorrect way of calculating `num_attention_heads` but we need to stick to it
# without running proper depcrecation cycles for the {down,mid,up} blocks which are a
# part of the public API.
num_attention_heads = attention_head_dim
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
# input
self.conv_in = nn.Conv2d(in_channels + in_channels, block_out_channels[0], kernel_size=3, padding=1)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=num_attention_heads,
in_channels=block_out_channels[0],
num_layers=1,
norm_num_groups=norm_num_groups,
)
# image embedding
self.image_latents_proj_in = nn.Sequential(
nn.Conv2d(4, in_channels * 4, 3, padding=1),
nn.SiLU(),
nn.Conv2d(in_channels * 4, in_channels * 4, 3, stride=1, padding=1),
nn.SiLU(),
nn.Conv2d(in_channels * 4, in_channels, 3, stride=1, padding=1),
)
self.image_latents_temporal_encoder = I2VGenXLTransformerTemporalEncoder(
dim=in_channels,
num_attention_heads=2,
ff_inner_dim=in_channels * 4,
attention_head_dim=in_channels,
activation_fn="gelu",
)
self.image_latents_context_embedding = nn.Sequential(
nn.Conv2d(4, in_channels * 8, 3, padding=1),
nn.SiLU(),
nn.AdaptiveAvgPool2d((32, 32)),
nn.Conv2d(in_channels * 8, in_channels * 16, 3, stride=2, padding=1),
nn.SiLU(),
nn.Conv2d(in_channels * 16, cross_attention_dim, 3, stride=2, padding=1),
)
# other embeddings -- time, context, fps, etc.
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn="silu")
self.context_embedding = nn.Sequential(
nn.Linear(cross_attention_dim, time_embed_dim),
nn.SiLU(),
nn.Linear(time_embed_dim, cross_attention_dim * in_channels),
)
self.fps_embedding = nn.Sequential(
nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, time_embed_dim)
)
# blocks
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=1e-05,
resnet_act_fn="silu",
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads[i],
downsample_padding=1,
dual_cross_attention=False,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=1e-05,
resnet_act_fn="silu",
output_scale_factor=1,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=False,
)
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_num_attention_heads = list(reversed(num_attention_heads))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=1e-05,
resnet_act_fn="silu",
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
num_attention_heads=reversed_num_attention_heads[i],
dual_cross_attention=False,
resolution_idx=i,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-05)
self.conv_act = get_activation("silu")
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
@property
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor()
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
# Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
"""
Sets the attention processor to use [feed forward
chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
Parameters:
chunk_size (`int`, *optional*):
The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
over each tensor of dim=`dim`.
dim (`int`, *optional*, defaults to `0`):
The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
or dim=1 (sequence length).
"""
if dim not in [0, 1]:
raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
# By default chunk size is 1
chunk_size = chunk_size or 1
def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
if hasattr(module, "set_chunk_feed_forward"):
module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
for child in module.children():
fn_recursive_feed_forward(child, chunk_size, dim)
for module in self.children():
fn_recursive_feed_forward(module, chunk_size, dim)
# Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking
def disable_forward_chunking(self):
def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
if hasattr(module, "set_chunk_feed_forward"):
module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
for child in module.children():
fn_recursive_feed_forward(child, chunk_size, dim)
for module in self.children():
fn_recursive_feed_forward(module, None, 0)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.enable_freeu
def enable_freeu(self, s1, s2, b1, b2):
r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stage blocks where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
for i, upsample_block in enumerate(self.up_blocks):
setattr(upsample_block, "s1", s1)
setattr(upsample_block, "s2", s2)
setattr(upsample_block, "b1", b1)
setattr(upsample_block, "b2", b2)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.disable_freeu
def disable_freeu(self):
"""Disables the FreeU mechanism."""
freeu_keys = {"s1", "s2", "b1", "b2"}
for i, upsample_block in enumerate(self.up_blocks):
for k in freeu_keys:
if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
setattr(upsample_block, k, None)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True)
self.set_attn_processor(FusedAttnProcessor2_0())
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors)
def forward(
self,
sample: torch.Tensor,
timestep: Union[torch.Tensor, float, int],
fps: torch.Tensor,
image_latents: torch.Tensor,
image_embeddings: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
return_dict: bool = True,
) -> Union[UNet3DConditionOutput, Tuple[torch.Tensor]]:
r"""
The [`I2VGenXLUNet`] forward method.
Args:
sample (`torch.Tensor`):
The noisy input tensor with the following shape `(batch, num_frames, channel, height, width`.
timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
fps (`torch.Tensor`): Frames per second for the video being generated. Used as a "micro-condition".
image_latents (`torch.Tensor`): Image encodings from the VAE.
image_embeddings (`torch.Tensor`):
Projection embeddings of the conditioning image computed with a vision encoder.
encoder_hidden_states (`torch.Tensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] instead of a plain
tuple.
Returns:
[`~models.unets.unet_3d_condition.UNet3DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unets.unet_3d_condition.UNet3DConditionOutput`] is returned,
otherwise a `tuple` is returned where the first element is the sample tensor.
"""
batch_size, channels, num_frames, height, width = sample.shape
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info("Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass `timesteps` as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
is_npu = sample.device.type == "npu"
if isinstance(timesteps, float):
dtype = torch.float32 if (is_mps or is_npu) else torch.float64
else:
dtype = torch.int32 if (is_mps or is_npu) else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
t_emb = self.time_embedding(t_emb, timestep_cond)
# 2. FPS
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
fps = fps.expand(fps.shape[0])
fps_emb = self.fps_embedding(self.time_proj(fps).to(dtype=self.dtype))
# 3. time + FPS embeddings.
emb = t_emb + fps_emb
emb = emb.repeat_interleave(repeats=num_frames, dim=0)
# 4. context embeddings.
# The context embeddings consist of both text embeddings from the input prompt
# AND the image embeddings from the input image. For images, both VAE encodings
# and the CLIP image embeddings are incorporated.
# So the final `context_embeddings` becomes the query for cross-attention.
context_emb = sample.new_zeros(batch_size, 0, self.config.cross_attention_dim)
context_emb = torch.cat([context_emb, encoder_hidden_states], dim=1)
image_latents_for_context_embds = image_latents[:, :, :1, :]
image_latents_context_embs = image_latents_for_context_embds.permute(0, 2, 1, 3, 4).reshape(
image_latents_for_context_embds.shape[0] * image_latents_for_context_embds.shape[2],
image_latents_for_context_embds.shape[1],
image_latents_for_context_embds.shape[3],
image_latents_for_context_embds.shape[4],
)
image_latents_context_embs = self.image_latents_context_embedding(image_latents_context_embs)
_batch_size, _channels, _height, _width = image_latents_context_embs.shape
image_latents_context_embs = image_latents_context_embs.permute(0, 2, 3, 1).reshape(
_batch_size, _height * _width, _channels
)
context_emb = torch.cat([context_emb, image_latents_context_embs], dim=1)
image_emb = self.context_embedding(image_embeddings)
image_emb = image_emb.view(-1, self.config.in_channels, self.config.cross_attention_dim)
context_emb = torch.cat([context_emb, image_emb], dim=1)
context_emb = context_emb.repeat_interleave(repeats=num_frames, dim=0)
image_latents = image_latents.permute(0, 2, 1, 3, 4).reshape(
image_latents.shape[0] * image_latents.shape[2],
image_latents.shape[1],
image_latents.shape[3],
image_latents.shape[4],
)
image_latents = self.image_latents_proj_in(image_latents)
image_latents = (
image_latents[None, :]
.reshape(batch_size, num_frames, channels, height, width)
.permute(0, 3, 4, 1, 2)
.reshape(batch_size * height * width, num_frames, channels)
)
image_latents = self.image_latents_temporal_encoder(image_latents)
image_latents = image_latents.reshape(batch_size, height, width, num_frames, channels).permute(0, 4, 3, 1, 2)
# 5. pre-process
sample = torch.cat([sample, image_latents], dim=1)
sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])
sample = self.conv_in(sample)
sample = self.transformer_in(
sample,
num_frames=num_frames,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
# 6. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=context_emb,
num_frames=num_frames,
cross_attention_kwargs=cross_attention_kwargs,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)
down_block_res_samples += res_samples
# 7. mid
if self.mid_block is not None:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=context_emb,
num_frames=num_frames,
cross_attention_kwargs=cross_attention_kwargs,
)
# 8. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=context_emb,
upsample_size=upsample_size,
num_frames=num_frames,
cross_attention_kwargs=cross_attention_kwargs,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
num_frames=num_frames,
)
# 9. post-process
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
# reshape to (batch, channel, framerate, width, height)
sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)
if not return_dict:
return (sample,)
return UNet3DConditionOutput(sample=sample)
| diffusers/src/diffusers/models/unets/unet_i2vgen_xl.py/0 | {
"file_path": "diffusers/src/diffusers/models/unets/unet_i2vgen_xl.py",
"repo_id": "diffusers",
"token_count": 14547
} |
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
_LazyModule,
)
_import_structure = {"pipeline_ddpm": ["DDPMPipeline"]}
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
from .pipeline_ddpm import DDPMPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
| diffusers/src/diffusers/pipelines/ddpm/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/ddpm/__init__.py",
"repo_id": "diffusers",
"token_count": 193
} |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class TransformationModelOutput(ModelOutput):
"""
Base class for text model's outputs that also contains a pooling of the last hidden states.
Args:
text_embeds (`torch.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The text embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one
for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
projection_state: Optional[torch.Tensor] = None
last_hidden_state: torch.Tensor = None
hidden_states: Optional[Tuple[torch.Tensor]] = None
attentions: Optional[Tuple[torch.Tensor]] = None
class RobertaSeriesConfig(XLMRobertaConfig):
def __init__(
self,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
project_dim=512,
pooler_fn="cls",
learn_encoder=False,
use_attention_mask=True,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.project_dim = project_dim
self.pooler_fn = pooler_fn
self.learn_encoder = learn_encoder
self.use_attention_mask = use_attention_mask
class RobertaSeriesModelWithTransformation(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler", r"logit_scale"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
base_model_prefix = "roberta"
config_class = RobertaSeriesConfig
def __init__(self, config):
super().__init__(config)
self.roberta = XLMRobertaModel(config)
self.transformation = nn.Linear(config.hidden_size, config.project_dim)
self.has_pre_transformation = getattr(config, "has_pre_transformation", False)
if self.has_pre_transformation:
self.transformation_pre = nn.Linear(config.hidden_size, config.project_dim)
self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
):
r""" """
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=True if self.has_pre_transformation else output_hidden_states,
return_dict=return_dict,
)
if self.has_pre_transformation:
sequence_output2 = outputs["hidden_states"][-2]
sequence_output2 = self.pre_LN(sequence_output2)
projection_state2 = self.transformation_pre(sequence_output2)
return TransformationModelOutput(
projection_state=projection_state2,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
else:
projection_state = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=projection_state,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py",
"repo_id": "diffusers",
"token_count": 2322
} |
# Copyright 2022 The Music Spectrogram Diffusion Authors.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.t5.modeling_t5 import (
T5Block,
T5Config,
T5LayerNorm,
)
from ....configuration_utils import ConfigMixin, register_to_config
from ....models import ModelMixin
class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
@register_to_config
def __init__(
self,
input_dims: int,
targets_context_length: int,
d_model: int,
dropout_rate: float,
num_layers: int,
num_heads: int,
d_kv: int,
d_ff: int,
feed_forward_proj: str,
is_decoder: bool = False,
):
super().__init__()
self.input_proj = nn.Linear(input_dims, d_model, bias=False)
self.position_encoding = nn.Embedding(targets_context_length, d_model)
self.position_encoding.weight.requires_grad = False
self.dropout_pre = nn.Dropout(p=dropout_rate)
t5config = T5Config(
d_model=d_model,
num_heads=num_heads,
d_kv=d_kv,
d_ff=d_ff,
feed_forward_proj=feed_forward_proj,
dropout_rate=dropout_rate,
is_decoder=is_decoder,
is_encoder_decoder=False,
)
self.encoders = nn.ModuleList()
for lyr_num in range(num_layers):
lyr = T5Block(t5config)
self.encoders.append(lyr)
self.layer_norm = T5LayerNorm(d_model)
self.dropout_post = nn.Dropout(p=dropout_rate)
def forward(self, encoder_inputs, encoder_inputs_mask):
x = self.input_proj(encoder_inputs)
# terminal relative positional encodings
max_positions = encoder_inputs.shape[1]
input_positions = torch.arange(max_positions, device=encoder_inputs.device)
seq_lens = encoder_inputs_mask.sum(-1)
input_positions = torch.roll(input_positions.unsqueeze(0), tuple(seq_lens.tolist()), dims=0)
x += self.position_encoding(input_positions)
x = self.dropout_pre(x)
# inverted the attention mask
input_shape = encoder_inputs.size()
extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape)
for lyr in self.encoders:
x = lyr(x, extended_attention_mask)[0]
x = self.layer_norm(x)
return self.dropout_post(x), encoder_inputs_mask
| diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py",
"repo_id": "diffusers",
"token_count": 1329
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
import torch.utils.checkpoint
from transformers import (
CLIPImageProcessor,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionModelWithProjection,
)
from ....image_processor import VaeImageProcessor
from ....models import AutoencoderKL, DualTransformer2DModel, Transformer2DModel, UNet2DConditionModel
from ....schedulers import KarrasDiffusionSchedulers
from ....utils import deprecate, logging
from ....utils.torch_utils import randn_tensor
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from .modeling_text_unet import UNetFlatConditionModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline):
r"""
Pipeline for image-text dual-guided generation using Versatile Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
vqvae ([`VQModel`]):
Vector-quantized (VQ) model to encode and decode images to and from latent representations.
bert ([`LDMBertModel`]):
Text-encoder model based on [`~transformers.BERT`].
tokenizer ([`~transformers.BertTokenizer`]):
A `BertTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
model_cpu_offload_seq = "bert->unet->vqvae"
tokenizer: CLIPTokenizer
image_feature_extractor: CLIPImageProcessor
text_encoder: CLIPTextModelWithProjection
image_encoder: CLIPVisionModelWithProjection
image_unet: UNet2DConditionModel
text_unet: UNetFlatConditionModel
vae: AutoencoderKL
scheduler: KarrasDiffusionSchedulers
_optional_components = ["text_unet"]
def __init__(
self,
tokenizer: CLIPTokenizer,
image_feature_extractor: CLIPImageProcessor,
text_encoder: CLIPTextModelWithProjection,
image_encoder: CLIPVisionModelWithProjection,
image_unet: UNet2DConditionModel,
text_unet: UNetFlatConditionModel,
vae: AutoencoderKL,
scheduler: KarrasDiffusionSchedulers,
):
super().__init__()
self.register_modules(
tokenizer=tokenizer,
image_feature_extractor=image_feature_extractor,
text_encoder=text_encoder,
image_encoder=image_encoder,
image_unet=image_unet,
text_unet=text_unet,
vae=vae,
scheduler=scheduler,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
if self.text_unet is not None and (
"dual_cross_attention" not in self.image_unet.config or not self.image_unet.config.dual_cross_attention
):
# if loading from a universal checkpoint rather than a saved dual-guided pipeline
self._convert_to_dual_attention()
def remove_unused_weights(self):
self.register_modules(text_unet=None)
def _convert_to_dual_attention(self):
"""
Replace image_unet's `Transformer2DModel` blocks with `DualTransformer2DModel` that contains transformer blocks
from both `image_unet` and `text_unet`
"""
for name, module in self.image_unet.named_modules():
if isinstance(module, Transformer2DModel):
parent_name, index = name.rsplit(".", 1)
index = int(index)
image_transformer = self.image_unet.get_submodule(parent_name)[index]
text_transformer = self.text_unet.get_submodule(parent_name)[index]
config = image_transformer.config
dual_transformer = DualTransformer2DModel(
num_attention_heads=config.num_attention_heads,
attention_head_dim=config.attention_head_dim,
in_channels=config.in_channels,
num_layers=config.num_layers,
dropout=config.dropout,
norm_num_groups=config.norm_num_groups,
cross_attention_dim=config.cross_attention_dim,
attention_bias=config.attention_bias,
sample_size=config.sample_size,
num_vector_embeds=config.num_vector_embeds,
activation_fn=config.activation_fn,
num_embeds_ada_norm=config.num_embeds_ada_norm,
)
dual_transformer.transformers[0] = image_transformer
dual_transformer.transformers[1] = text_transformer
self.image_unet.get_submodule(parent_name)[index] = dual_transformer
self.image_unet.register_to_config(dual_cross_attention=True)
def _revert_dual_attention(self):
"""
Revert the image_unet `DualTransformer2DModel` blocks back to `Transformer2DModel` with image_unet weights Call
this function if you reuse `image_unet` in another pipeline, e.g. `VersatileDiffusionPipeline`
"""
for name, module in self.image_unet.named_modules():
if isinstance(module, DualTransformer2DModel):
parent_name, index = name.rsplit(".", 1)
index = int(index)
self.image_unet.get_submodule(parent_name)[index] = module.transformers[0]
self.image_unet.register_to_config(dual_cross_attention=False)
def _encode_text_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
"""
def normalize_embeddings(encoder_output):
embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state)
embeds_pooled = encoder_output.text_embeds
embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True)
return embeds
batch_size = len(prompt)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
if not torch.equal(text_input_ids, untruncated_ids):
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
prompt_embeds = self.text_encoder(
text_input_ids.to(device),
attention_mask=attention_mask,
)
prompt_embeds = normalize_embeddings(prompt_embeds)
# duplicate text embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = prompt_embeds.shape
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens = [""] * batch_size
max_length = text_input_ids.shape[-1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
def _encode_image_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
"""
def normalize_embeddings(encoder_output):
embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state)
embeds = self.image_encoder.visual_projection(embeds)
embeds_pooled = embeds[:, 0:1]
embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True)
return embeds
batch_size = len(prompt) if isinstance(prompt, list) else 1
# get prompt text embeddings
image_input = self.image_feature_extractor(images=prompt, return_tensors="pt")
pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype)
image_embeddings = self.image_encoder(pixel_values)
image_embeddings = normalize_embeddings(image_embeddings)
# duplicate image embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = image_embeddings.shape
image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size
uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt")
pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype)
negative_prompt_embeds = self.image_encoder(pixel_values)
negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and conditional embeddings into a single batch
# to avoid doing two forward passes
image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
return image_embeddings
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
def decode_latents(self, latents):
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(self, prompt, image, height, width, callback_steps):
if not isinstance(prompt, str) and not isinstance(prompt, PIL.Image.Image) and not isinstance(prompt, list):
raise ValueError(f"`prompt` has to be of type `str` `PIL.Image` or `list` but is {type(prompt)}")
if not isinstance(image, str) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list):
raise ValueError(f"`image` has to be of type `str` `PIL.Image` or `list` but is {type(image)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (
batch_size,
num_channels_latents,
int(height) // self.vae_scale_factor,
int(width) // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def set_transformer_params(self, mix_ratio: float = 0.5, condition_types: Tuple = ("text", "image")):
for name, module in self.image_unet.named_modules():
if isinstance(module, DualTransformer2DModel):
module.mix_ratio = mix_ratio
for i, type in enumerate(condition_types):
if type == "text":
module.condition_lengths[i] = self.text_encoder.config.max_position_embeddings
module.transformer_index_for_condition[i] = 1 # use the second (text) transformer
else:
module.condition_lengths[i] = 257
module.transformer_index_for_condition[i] = 0 # use the first (image) transformer
@torch.no_grad()
def __call__(
self,
prompt: Union[PIL.Image.Image, List[PIL.Image.Image]],
image: Union[str, List[str]],
text_to_image_strength: float = 0.5,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
**kwargs,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide image generation.
height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
Examples:
```py
>>> from diffusers import VersatileDiffusionDualGuidedPipeline
>>> import torch
>>> import requests
>>> from io import BytesIO
>>> from PIL import Image
>>> # let's download an initial image
>>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg"
>>> response = requests.get(url)
>>> image = Image.open(BytesIO(response.content)).convert("RGB")
>>> text = "a red car in the sun"
>>> pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained(
... "shi-labs/versatile-diffusion", torch_dtype=torch.float16
... )
>>> pipe.remove_unused_weights()
>>> pipe = pipe.to("cuda")
>>> generator = torch.Generator(device="cuda").manual_seed(0)
>>> text_to_image_strength = 0.75
>>> image = pipe(
... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator
... ).images[0]
>>> image.save("./car_variation.png")
```
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated images.
"""
# 0. Default height and width to unet
height = height or self.image_unet.config.sample_size * self.vae_scale_factor
width = width or self.image_unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(prompt, image, height, width, callback_steps)
# 2. Define call parameters
prompt = [prompt] if not isinstance(prompt, list) else prompt
image = [image] if not isinstance(image, list) else image
batch_size = len(prompt)
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompts
prompt_embeds = self._encode_text_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance)
image_embeddings = self._encode_image_prompt(image, device, num_images_per_prompt, do_classifier_free_guidance)
dual_prompt_embeddings = torch.cat([prompt_embeds, image_embeddings], dim=1)
prompt_types = ("text", "image")
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.image_unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
dual_prompt_embeddings.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Combine the attention blocks of the image and text UNets
self.set_transformer_params(text_to_image_strength, prompt_types)
# 8. Denoising loop
for i, t in enumerate(self.progress_bar(timesteps)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=dual_prompt_embeddings).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents
image = self.image_processor.postprocess(image, output_type=output_type)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
| diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py",
"repo_id": "diffusers",
"token_count": 11632
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, List, Optional, Union
import PIL.Image
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionModelWithProjection,
XLMRobertaTokenizer,
)
from ...models import PriorTransformer, UNet2DConditionModel, VQModel
from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler
from ...utils import (
replace_example_docstring,
)
from ..pipeline_utils import DiffusionPipeline
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline
from .text_encoder import MultilingualCLIP
TEXT2IMAGE_EXAMPLE_DOC_STRING = """
Examples:
```py
from diffusers import AutoPipelineForText2Image
import torch
pipe = AutoPipelineForText2Image.from_pretrained(
"kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
image = pipe(prompt=prompt, num_inference_steps=25).images[0]
```
"""
IMAGE2IMAGE_EXAMPLE_DOC_STRING = """
Examples:
```py
from diffusers import AutoPipelineForImage2Image
import torch
import requests
from io import BytesIO
from PIL import Image
import os
pipe = AutoPipelineForImage2Image.from_pretrained(
"kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
prompt = "A fantasy landscape, Cinematic lighting"
negative_prompt = "low quality, bad quality"
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
response = requests.get(url)
image = Image.open(BytesIO(response.content)).convert("RGB")
image.thumbnail((768, 768))
image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0]
```
"""
INPAINT_EXAMPLE_DOC_STRING = """
Examples:
```py
from diffusers import AutoPipelineForInpainting
from diffusers.utils import load_image
import torch
import numpy as np
pipe = AutoPipelineForInpainting.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
prompt = "A fantasy landscape, Cinematic lighting"
negative_prompt = "low quality, bad quality"
original_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
mask = np.zeros((768, 768), dtype=np.float32)
# Let's mask out an area above the cat's head
mask[:250, 250:-250] = 1
image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0]
```
"""
class KandinskyCombinedPipeline(DiffusionPipeline):
"""
Combined Pipeline for text-to-image generation using Kandinsky
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
text_encoder ([`MultilingualCLIP`]):
Frozen text-encoder.
tokenizer ([`XLMRobertaTokenizer`]):
Tokenizer of class
scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
A scheduler to be used in combination with `unet` to generate image latents.
unet ([`UNet2DConditionModel`]):
Conditional U-Net architecture to denoise the image embedding.
movq ([`VQModel`]):
MoVQ Decoder to generate the image from the latents.
prior_prior ([`PriorTransformer`]):
The canonical unCLIP prior to approximate the image embedding from the text embedding.
prior_image_encoder ([`CLIPVisionModelWithProjection`]):
Frozen image-encoder.
prior_text_encoder ([`CLIPTextModelWithProjection`]):
Frozen text-encoder.
prior_tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
prior_scheduler ([`UnCLIPScheduler`]):
A scheduler to be used in combination with `prior` to generate image embedding.
"""
_load_connected_pipes = True
model_cpu_offload_seq = "text_encoder->unet->movq->prior_prior->prior_image_encoder->prior_text_encoder"
_exclude_from_cpu_offload = ["prior_prior"]
def __init__(
self,
text_encoder: MultilingualCLIP,
tokenizer: XLMRobertaTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, DDPMScheduler],
movq: VQModel,
prior_prior: PriorTransformer,
prior_image_encoder: CLIPVisionModelWithProjection,
prior_text_encoder: CLIPTextModelWithProjection,
prior_tokenizer: CLIPTokenizer,
prior_scheduler: UnCLIPScheduler,
prior_image_processor: CLIPImageProcessor,
):
super().__init__()
self.register_modules(
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
movq=movq,
prior_prior=prior_prior,
prior_image_encoder=prior_image_encoder,
prior_text_encoder=prior_text_encoder,
prior_tokenizer=prior_tokenizer,
prior_scheduler=prior_scheduler,
prior_image_processor=prior_image_processor,
)
self.prior_pipe = KandinskyPriorPipeline(
prior=prior_prior,
image_encoder=prior_image_encoder,
text_encoder=prior_text_encoder,
tokenizer=prior_tokenizer,
scheduler=prior_scheduler,
image_processor=prior_image_processor,
)
self.decoder_pipe = KandinskyPipeline(
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
movq=movq,
)
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):
self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op)
def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
r"""
Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using 🤗
Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a
GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis.
Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower.
"""
self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device)
self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device)
def progress_bar(self, iterable=None, total=None):
self.prior_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.enable_model_cpu_offload()
def set_progress_bar_config(self, **kwargs):
self.prior_pipe.set_progress_bar_config(**kwargs)
self.decoder_pipe.set_progress_bar_config(**kwargs)
@torch.no_grad()
@replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]],
negative_prompt: Optional[Union[str, List[str]]] = None,
num_inference_steps: int = 100,
guidance_scale: float = 4.0,
num_images_per_prompt: int = 1,
height: int = 512,
width: int = 512,
prior_guidance_scale: float = 4.0,
prior_num_inference_steps: int = 25,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
return_dict: bool = True,
):
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
prior_guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
prior_num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Examples:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
prior_outputs = self.prior_pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_images_per_prompt=num_images_per_prompt,
num_inference_steps=prior_num_inference_steps,
generator=generator,
latents=latents,
guidance_scale=prior_guidance_scale,
output_type="pt",
return_dict=False,
)
image_embeds = prior_outputs[0]
negative_image_embeds = prior_outputs[1]
prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
prompt = (image_embeds.shape[0] // len(prompt)) * prompt
outputs = self.decoder_pipe(
prompt=prompt,
image_embeds=image_embeds,
negative_image_embeds=negative_image_embeds,
width=width,
height=height,
num_inference_steps=num_inference_steps,
generator=generator,
guidance_scale=guidance_scale,
output_type=output_type,
callback=callback,
callback_steps=callback_steps,
return_dict=return_dict,
)
self.maybe_free_model_hooks()
return outputs
class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline):
"""
Combined Pipeline for image-to-image generation using Kandinsky
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
text_encoder ([`MultilingualCLIP`]):
Frozen text-encoder.
tokenizer ([`XLMRobertaTokenizer`]):
Tokenizer of class
scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
A scheduler to be used in combination with `unet` to generate image latents.
unet ([`UNet2DConditionModel`]):
Conditional U-Net architecture to denoise the image embedding.
movq ([`VQModel`]):
MoVQ Decoder to generate the image from the latents.
prior_prior ([`PriorTransformer`]):
The canonical unCLIP prior to approximate the image embedding from the text embedding.
prior_image_encoder ([`CLIPVisionModelWithProjection`]):
Frozen image-encoder.
prior_text_encoder ([`CLIPTextModelWithProjection`]):
Frozen text-encoder.
prior_tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
prior_scheduler ([`UnCLIPScheduler`]):
A scheduler to be used in combination with `prior` to generate image embedding.
"""
_load_connected_pipes = True
model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->" "text_encoder->unet->movq"
_exclude_from_cpu_offload = ["prior_prior"]
def __init__(
self,
text_encoder: MultilingualCLIP,
tokenizer: XLMRobertaTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, DDPMScheduler],
movq: VQModel,
prior_prior: PriorTransformer,
prior_image_encoder: CLIPVisionModelWithProjection,
prior_text_encoder: CLIPTextModelWithProjection,
prior_tokenizer: CLIPTokenizer,
prior_scheduler: UnCLIPScheduler,
prior_image_processor: CLIPImageProcessor,
):
super().__init__()
self.register_modules(
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
movq=movq,
prior_prior=prior_prior,
prior_image_encoder=prior_image_encoder,
prior_text_encoder=prior_text_encoder,
prior_tokenizer=prior_tokenizer,
prior_scheduler=prior_scheduler,
prior_image_processor=prior_image_processor,
)
self.prior_pipe = KandinskyPriorPipeline(
prior=prior_prior,
image_encoder=prior_image_encoder,
text_encoder=prior_text_encoder,
tokenizer=prior_tokenizer,
scheduler=prior_scheduler,
image_processor=prior_image_processor,
)
self.decoder_pipe = KandinskyImg2ImgPipeline(
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
movq=movq,
)
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):
self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op)
def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
Note that offloading happens on a submodule basis. Memory savings are higher than with
`enable_model_cpu_offload`, but performance is lower.
"""
self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device)
self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device)
def progress_bar(self, iterable=None, total=None):
self.prior_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.enable_model_cpu_offload()
def set_progress_bar_config(self, **kwargs):
self.prior_pipe.set_progress_bar_config(**kwargs)
self.decoder_pipe.set_progress_bar_config(**kwargs)
@torch.no_grad()
@replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]],
image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]],
negative_prompt: Optional[Union[str, List[str]]] = None,
num_inference_steps: int = 100,
guidance_scale: float = 4.0,
num_images_per_prompt: int = 1,
strength: float = 0.3,
height: int = 512,
width: int = 512,
prior_guidance_scale: float = 4.0,
prior_num_inference_steps: int = 25,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
return_dict: bool = True,
):
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
`Image`, or tensor representing an image batch, that will be used as the starting point for the
process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded
again.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
strength (`float`, *optional*, defaults to 0.3):
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
prior_guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
prior_num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Examples:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
prior_outputs = self.prior_pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_images_per_prompt=num_images_per_prompt,
num_inference_steps=prior_num_inference_steps,
generator=generator,
latents=latents,
guidance_scale=prior_guidance_scale,
output_type="pt",
return_dict=False,
)
image_embeds = prior_outputs[0]
negative_image_embeds = prior_outputs[1]
prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
image = [image] if isinstance(prompt, PIL.Image.Image) else image
if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
prompt = (image_embeds.shape[0] // len(prompt)) * prompt
if (
isinstance(image, (list, tuple))
and len(image) < image_embeds.shape[0]
and image_embeds.shape[0] % len(image) == 0
):
image = (image_embeds.shape[0] // len(image)) * image
outputs = self.decoder_pipe(
prompt=prompt,
image=image,
image_embeds=image_embeds,
negative_image_embeds=negative_image_embeds,
strength=strength,
width=width,
height=height,
num_inference_steps=num_inference_steps,
generator=generator,
guidance_scale=guidance_scale,
output_type=output_type,
callback=callback,
callback_steps=callback_steps,
return_dict=return_dict,
)
self.maybe_free_model_hooks()
return outputs
class KandinskyInpaintCombinedPipeline(DiffusionPipeline):
"""
Combined Pipeline for generation using Kandinsky
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
text_encoder ([`MultilingualCLIP`]):
Frozen text-encoder.
tokenizer ([`XLMRobertaTokenizer`]):
Tokenizer of class
scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
A scheduler to be used in combination with `unet` to generate image latents.
unet ([`UNet2DConditionModel`]):
Conditional U-Net architecture to denoise the image embedding.
movq ([`VQModel`]):
MoVQ Decoder to generate the image from the latents.
prior_prior ([`PriorTransformer`]):
The canonical unCLIP prior to approximate the image embedding from the text embedding.
prior_image_encoder ([`CLIPVisionModelWithProjection`]):
Frozen image-encoder.
prior_text_encoder ([`CLIPTextModelWithProjection`]):
Frozen text-encoder.
prior_tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
prior_scheduler ([`UnCLIPScheduler`]):
A scheduler to be used in combination with `prior` to generate image embedding.
"""
_load_connected_pipes = True
model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq"
_exclude_from_cpu_offload = ["prior_prior"]
def __init__(
self,
text_encoder: MultilingualCLIP,
tokenizer: XLMRobertaTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, DDPMScheduler],
movq: VQModel,
prior_prior: PriorTransformer,
prior_image_encoder: CLIPVisionModelWithProjection,
prior_text_encoder: CLIPTextModelWithProjection,
prior_tokenizer: CLIPTokenizer,
prior_scheduler: UnCLIPScheduler,
prior_image_processor: CLIPImageProcessor,
):
super().__init__()
self.register_modules(
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
movq=movq,
prior_prior=prior_prior,
prior_image_encoder=prior_image_encoder,
prior_text_encoder=prior_text_encoder,
prior_tokenizer=prior_tokenizer,
prior_scheduler=prior_scheduler,
prior_image_processor=prior_image_processor,
)
self.prior_pipe = KandinskyPriorPipeline(
prior=prior_prior,
image_encoder=prior_image_encoder,
text_encoder=prior_text_encoder,
tokenizer=prior_tokenizer,
scheduler=prior_scheduler,
image_processor=prior_image_processor,
)
self.decoder_pipe = KandinskyInpaintPipeline(
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
movq=movq,
)
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):
self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op)
def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
Note that offloading happens on a submodule basis. Memory savings are higher than with
`enable_model_cpu_offload`, but performance is lower.
"""
self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device)
self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device)
def progress_bar(self, iterable=None, total=None):
self.prior_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.enable_model_cpu_offload()
def set_progress_bar_config(self, **kwargs):
self.prior_pipe.set_progress_bar_config(**kwargs)
self.decoder_pipe.set_progress_bar_config(**kwargs)
@torch.no_grad()
@replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]],
image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]],
mask_image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]],
negative_prompt: Optional[Union[str, List[str]]] = None,
num_inference_steps: int = 100,
guidance_scale: float = 4.0,
num_images_per_prompt: int = 1,
height: int = 512,
width: int = 512,
prior_guidance_scale: float = 4.0,
prior_num_inference_steps: int = 25,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
return_dict: bool = True,
):
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
`Image`, or tensor representing an image batch, that will be used as the starting point for the
process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded
again.
mask_image (`np.array`):
Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while
black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single
channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3,
so the expected shape would be `(B, H, W, 1)`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
prior_guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
prior_num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 4.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
(`np.array`) or `"pt"` (`torch.Tensor`).
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Examples:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
prior_outputs = self.prior_pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_images_per_prompt=num_images_per_prompt,
num_inference_steps=prior_num_inference_steps,
generator=generator,
latents=latents,
guidance_scale=prior_guidance_scale,
output_type="pt",
return_dict=False,
)
image_embeds = prior_outputs[0]
negative_image_embeds = prior_outputs[1]
prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt
image = [image] if isinstance(prompt, PIL.Image.Image) else image
mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image
if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0:
prompt = (image_embeds.shape[0] // len(prompt)) * prompt
if (
isinstance(image, (list, tuple))
and len(image) < image_embeds.shape[0]
and image_embeds.shape[0] % len(image) == 0
):
image = (image_embeds.shape[0] // len(image)) * image
if (
isinstance(mask_image, (list, tuple))
and len(mask_image) < image_embeds.shape[0]
and image_embeds.shape[0] % len(mask_image) == 0
):
mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image
outputs = self.decoder_pipe(
prompt=prompt,
image=image,
mask_image=mask_image,
image_embeds=image_embeds,
negative_image_embeds=negative_image_embeds,
width=width,
height=height,
num_inference_steps=num_inference_steps,
generator=generator,
guidance_scale=guidance_scale,
output_type=output_type,
callback=callback,
callback_steps=callback_steps,
return_dict=return_dict,
)
self.maybe_free_model_hooks()
return outputs
| diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py",
"repo_id": "diffusers",
"token_count": 17037
} |
from typing import Callable, Dict, List, Optional, Union
import torch
from transformers import T5EncoderModel, T5Tokenizer
from ...loaders import StableDiffusionLoraLoaderMixin
from ...models import Kandinsky3UNet, VQModel
from ...schedulers import DDPMScheduler
from ...utils import (
deprecate,
is_torch_xla_available,
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> from diffusers import AutoPipelineForText2Image
>>> import torch
>>> pipe = AutoPipelineForText2Image.from_pretrained(
... "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16
... )
>>> pipe.enable_model_cpu_offload()
>>> prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background."
>>> generator = torch.Generator(device="cpu").manual_seed(0)
>>> image = pipe(prompt, num_inference_steps=25, generator=generator).images[0]
```
"""
def downscale_height_and_width(height, width, scale_factor=8):
new_height = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
new_width = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class Kandinsky3Pipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin):
model_cpu_offload_seq = "text_encoder->unet->movq"
_callback_tensor_inputs = [
"latents",
"prompt_embeds",
"negative_prompt_embeds",
"negative_attention_mask",
"attention_mask",
]
def __init__(
self,
tokenizer: T5Tokenizer,
text_encoder: T5EncoderModel,
unet: Kandinsky3UNet,
scheduler: DDPMScheduler,
movq: VQModel,
):
super().__init__()
self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq
)
def process_embeds(self, embeddings, attention_mask, cut_context):
if cut_context:
embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0])
max_seq_length = attention_mask.sum(-1).max() + 1
embeddings = embeddings[:, :max_seq_length]
attention_mask = attention_mask[:, :max_seq_length]
return embeddings, attention_mask
@torch.no_grad()
def encode_prompt(
self,
prompt,
do_classifier_free_guidance=True,
num_images_per_prompt=1,
device=None,
negative_prompt=None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
_cut_context=False,
attention_mask: Optional[torch.Tensor] = None,
negative_attention_mask: Optional[torch.Tensor] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`, *optional*):
torch device to place the resulting embeddings on
num_images_per_prompt (`int`, *optional*, defaults to 1):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
attention_mask (`torch.Tensor`, *optional*):
Pre-generated attention mask. Must provide if passing `prompt_embeds` directly.
negative_attention_mask (`torch.Tensor`, *optional*):
Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly.
"""
if prompt is not None and negative_prompt is not None:
if type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
if device is None:
device = self._execution_device
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
max_length = 128
if prompt_embeds is None:
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids.to(device)
attention_mask = text_inputs.attention_mask.to(device)
prompt_embeds = self.text_encoder(
text_input_ids,
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
prompt_embeds, attention_mask = self.process_embeds(prompt_embeds, attention_mask, _cut_context)
prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2)
if self.text_encoder is not None:
dtype = self.text_encoder.dtype
else:
dtype = None
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
attention_mask = attention_mask.repeat(num_images_per_prompt, 1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
if negative_prompt is not None:
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=128,
truncation=True,
return_attention_mask=True,
return_tensors="pt",
)
text_input_ids = uncond_input.input_ids.to(device)
negative_attention_mask = uncond_input.attention_mask.to(device)
negative_prompt_embeds = self.text_encoder(
text_input_ids,
attention_mask=negative_attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]]
negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]]
negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2)
else:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_attention_mask = torch.zeros_like(attention_mask)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
if negative_prompt_embeds.shape != prompt_embeds.shape:
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
else:
negative_prompt_embeds = None
negative_attention_mask = None
return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask
def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
latents = latents.to(device)
latents = latents * scheduler.init_noise_sigma
return latents
def check_inputs(
self,
prompt,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
attention_mask=None,
negative_attention_mask=None,
):
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if negative_prompt_embeds is not None and negative_attention_mask is None:
raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`")
if negative_prompt_embeds is not None and negative_attention_mask is not None:
if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape:
raise ValueError(
"`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but"
f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`"
f" {negative_attention_mask.shape}."
)
if prompt_embeds is not None and attention_mask is None:
raise ValueError("Please provide `attention_mask` along with `prompt_embeds`")
if prompt_embeds is not None and attention_mask is not None:
if prompt_embeds.shape[:2] != attention_mask.shape:
raise ValueError(
"`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`"
f" {attention_mask.shape}."
)
@property
def guidance_scale(self):
return self._guidance_scale
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1
@property
def num_timesteps(self):
return self._num_timesteps
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
num_inference_steps: int = 25,
guidance_scale: float = 3.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
height: Optional[int] = 1024,
width: Optional[int] = 1024,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
negative_attention_mask: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
latents=None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
num_inference_steps (`int`, *optional*, defaults to 25):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
timesteps (`List[int]`, *optional*):
Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
timesteps are used. Must be in descending order.
guidance_scale (`float`, *optional*, defaults to 3.0):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
height (`int`, *optional*, defaults to self.unet.config.sample_size):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size):
The width in pixels of the generated image.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
attention_mask (`torch.Tensor`, *optional*):
Pre-generated attention mask. Must provide if passing `prompt_embeds` directly.
negative_attention_mask (`torch.Tensor`, *optional*):
Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
clean_caption (`bool`, *optional*, defaults to `True`):
Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
Examples:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
cut_context = True
device = self._execution_device
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
callback_on_step_end_tensor_inputs,
attention_mask,
negative_attention_mask,
)
self._guidance_scale = guidance_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# 3. Encode input prompt
prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask = self.encode_prompt(
prompt,
self.do_classifier_free_guidance,
num_images_per_prompt=num_images_per_prompt,
device=device,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
_cut_context=cut_context,
attention_mask=attention_mask,
negative_attention_mask=negative_attention_mask,
)
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool()
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latents
height, width = downscale_height_and_width(height, width, 8)
latents = self.prepare_latents(
(batch_size * num_images_per_prompt, 4, height, width),
prompt_embeds.dtype,
device,
generator,
latents,
self.scheduler,
)
if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
self.text_encoder_offload_hook.offload()
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
encoder_attention_mask=attention_mask,
return_dict=False,
)[0]
if self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond
# noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred,
t,
latents,
generator=generator,
).prev_sample
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
attention_mask = callback_outputs.pop("attention_mask", attention_mask)
negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask)
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
# post-processing
if output_type not in ["pt", "np", "pil", "latent"]:
raise ValueError(
f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}"
)
if not output_type == "latent":
image = self.movq.decode(latents, force_not_quantize=True)["sample"]
if output_type in ["np", "pil"]:
image = image * 0.5 + 0.5
image = image.clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
else:
image = latents
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
| diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py",
"repo_id": "diffusers",
"token_count": 12749
} |
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
BaseOutput,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
@dataclass
class SafetyConfig(object):
WEAK = {
"sld_warmup_steps": 15,
"sld_guidance_scale": 20,
"sld_threshold": 0.0,
"sld_momentum_scale": 0.0,
"sld_mom_beta": 0.0,
}
MEDIUM = {
"sld_warmup_steps": 10,
"sld_guidance_scale": 1000,
"sld_threshold": 0.01,
"sld_momentum_scale": 0.3,
"sld_mom_beta": 0.4,
}
STRONG = {
"sld_warmup_steps": 7,
"sld_guidance_scale": 2000,
"sld_threshold": 0.025,
"sld_momentum_scale": 0.5,
"sld_mom_beta": 0.7,
}
MAX = {
"sld_warmup_steps": 0,
"sld_guidance_scale": 5000,
"sld_threshold": 1.0,
"sld_momentum_scale": 0.5,
"sld_mom_beta": 0.7,
}
_dummy_objects = {}
_additional_imports = {}
_import_structure = {}
_additional_imports.update({"SafetyConfig": SafetyConfig})
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure.update(
{
"pipeline_output": ["StableDiffusionSafePipelineOutput"],
"pipeline_stable_diffusion_safe": ["StableDiffusionPipelineSafe"],
"safety_checker": ["StableDiffusionSafetyChecker"],
}
)
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_output import StableDiffusionSafePipelineOutput
from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe
from .safety_checker import SafeStableDiffusionSafetyChecker
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
for name, value in _additional_imports.items():
setattr(sys.modules[__name__], name, value)
| diffusers/src/diffusers/pipelines/stable_diffusion_safe/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_safe/__init__.py",
"repo_id": "diffusers",
"token_count": 1237
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.