text
stringlengths 7
318k
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
439
|
---|---|---|---|
use crate::{Error, Tensor};
use std::ops::{
Bound, Range, RangeBounds, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive,
};
impl Tensor {
/// Intended to be use by the trait `.i()`
///
/// ```
/// # use candle_core::{Tensor, DType, Device, IndexOp};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
///
/// let c = a.i(0..1)?;
/// assert_eq!(c.shape().dims(), &[1, 3]);
///
/// let c = a.i(0)?;
/// assert_eq!(c.shape().dims(), &[3]);
///
/// let c = a.i((.., ..2) )?;
/// assert_eq!(c.shape().dims(), &[2, 2]);
///
/// let c = a.i((.., ..=2))?;
/// assert_eq!(c.shape().dims(), &[2, 3]);
///
/// # Ok::<(), candle_core::Error>(())
/// ```
fn index(&self, indexers: &[TensorIndexer]) -> Result<Self, Error> {
let mut x = self.clone();
let dims = self.shape().dims();
let mut current_dim = 0;
for (i, indexer) in indexers.iter().enumerate() {
x = match indexer {
TensorIndexer::Select(n) => x.narrow(current_dim, *n, 1)?.squeeze(current_dim)?,
TensorIndexer::Narrow(left_bound, right_bound) => {
let start = match left_bound {
Bound::Included(n) => *n,
Bound::Excluded(n) => *n + 1,
Bound::Unbounded => 0,
};
let stop = match right_bound {
Bound::Included(n) => *n + 1,
Bound::Excluded(n) => *n,
Bound::Unbounded => dims[i],
};
let out = x.narrow(current_dim, start, stop.saturating_sub(start))?;
current_dim += 1;
out
}
TensorIndexer::IndexSelect(indexes) => {
if indexes.rank() != 1 {
crate::bail!("multi-dimensional tensor indexing is not supported")
}
let out = x.index_select(&indexes.to_device(x.device())?, current_dim)?;
current_dim += 1;
out
}
TensorIndexer::Err(e) => crate::bail!("indexing error {e:?}"),
};
}
Ok(x)
}
}
#[derive(Debug)]
/// Generic structure used to index a slice of the tensor
pub enum TensorIndexer {
/// This selects the elements for which an index has some specific value.
Select(usize),
/// This is a regular slice, purely indexing a chunk of the tensor
Narrow(Bound<usize>, Bound<usize>),
/// Indexing via a 1d tensor
IndexSelect(Tensor),
Err(Error),
}
impl From<usize> for TensorIndexer {
fn from(index: usize) -> Self {
TensorIndexer::Select(index)
}
}
impl From<&[u32]> for TensorIndexer {
fn from(index: &[u32]) -> Self {
match Tensor::new(index, &crate::Device::Cpu) {
Ok(tensor) => TensorIndexer::IndexSelect(tensor),
Err(e) => TensorIndexer::Err(e),
}
}
}
impl From<Vec<u32>> for TensorIndexer {
fn from(index: Vec<u32>) -> Self {
let len = index.len();
match Tensor::from_vec(index, len, &crate::Device::Cpu) {
Ok(tensor) => TensorIndexer::IndexSelect(tensor),
Err(e) => TensorIndexer::Err(e),
}
}
}
impl From<&Tensor> for TensorIndexer {
fn from(tensor: &Tensor) -> Self {
TensorIndexer::IndexSelect(tensor.clone())
}
}
trait RB: RangeBounds<usize> {}
impl RB for Range<usize> {}
impl RB for RangeFrom<usize> {}
impl RB for RangeFull {}
impl RB for RangeInclusive<usize> {}
impl RB for RangeTo<usize> {}
impl RB for RangeToInclusive<usize> {}
impl<T: RB> From<T> for TensorIndexer {
fn from(range: T) -> Self {
use std::ops::Bound::*;
let start = match range.start_bound() {
Included(idx) => Included(*idx),
Excluded(idx) => Excluded(*idx),
Unbounded => Unbounded,
};
let end = match range.end_bound() {
Included(idx) => Included(*idx),
Excluded(idx) => Excluded(*idx),
Unbounded => Unbounded,
};
TensorIndexer::Narrow(start, end)
}
}
/// Trait used to implement multiple signatures for ease of use of the slicing
/// of a tensor
pub trait IndexOp<T> {
/// Returns a slicing iterator which are the chunks of data necessary to
/// reconstruct the desired tensor.
fn i(&self, index: T) -> Result<Tensor, Error>;
}
impl<T> IndexOp<T> for Tensor
where
T: Into<TensorIndexer>,
{
fn i(&self, index: T) -> Result<Tensor, Error> {
self.index(&[index.into()])
}
}
macro_rules! index_op_tuple {
($($t:ident),+) => {
#[allow(non_snake_case)]
impl<$($t),*> IndexOp<($($t,)*)> for Tensor
where
$($t: Into<TensorIndexer>,)*
{
fn i(&self, ($($t,)*): ($($t,)*)) -> Result<Tensor, Error> {
self.index(&[$($t.into(),)*])
}
}
};
}
index_op_tuple!(A);
index_op_tuple!(A, B);
index_op_tuple!(A, B, C);
index_op_tuple!(A, B, C, D);
index_op_tuple!(A, B, C, D, E);
index_op_tuple!(A, B, C, D, E, F);
index_op_tuple!(A, B, C, D, E, F, G);
| candle/candle-core/src/indexer.rs/0 | {
"file_path": "candle/candle-core/src/indexer.rs",
"repo_id": "candle",
"token_count": 2652
} | 15 |
use crate::Result;
pub(super) fn nearest_int(v: f32) -> i32 {
v.round() as i32
}
/// Validates that the input and output are the right size and returns an iterator which maps each
/// input region `xs` to its corresponding output block in `ys`. Each output region is guaranteed
/// to be `T::BLCK_SIZE` long.
pub(super) fn group_for_quantization<'a, 'b, T: super::k_quants::GgmlType>(
xs: &'b [f32],
ys: &'a mut [T],
) -> Result<Vec<(&'a mut T, &'b [f32])>> {
let block_size = T::BLCK_SIZE;
let dtype = T::DTYPE;
let expected_blocks = xs.len() / block_size;
let actual_blocks = ys.len();
// Validate that the input is the right size
if expected_blocks != actual_blocks {
crate::bail!("quantize {dtype:?}: expected {expected_blocks} blocks but only {actual_blocks} were provided!")
}
Ok(ys.iter_mut().zip(xs.chunks_exact(block_size)).collect())
}
/// Validates that the input and output are the right size and returns an iterator which maps each
/// input block `xs` to its corresponding output region in `ys`. Each output region is guaranteed
/// to be `T::BLCK_SIZE` long.
pub(super) fn group_for_dequantization<'a, 'b, T: super::k_quants::GgmlType>(
xs: &'a [T],
ys: &'b mut [f32],
) -> Result<Vec<(&'a T, &'b mut [f32])>> {
let block_size = T::BLCK_SIZE;
let dtype = T::DTYPE;
let actual_output_len = ys.len();
let expected_output_len = xs.len() * block_size;
// Validate that the output is the right size
if expected_output_len != actual_output_len {
crate::bail!("dequantize {dtype:?}: ys (len = {actual_output_len}) does not match the expected length of {expected_output_len}!")
}
// Zip the blocks and outputs together
Ok(xs.iter().zip(ys.chunks_exact_mut(block_size)).collect())
}
pub(super) fn get_scale_min_k4(j: usize, q: &[u8]) -> (u8, u8) {
if j < 4 {
let d = q[j] & 63;
let m = q[j + 4] & 63;
(d, m)
} else {
let d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
let m = (q[j + 4] >> 4) | ((q[j] >> 6) << 4);
(d, m)
}
}
pub(super) unsafe fn make_qx_quants(
n: usize,
nmax: i32,
x: *const f32,
ls: *mut i8,
rmse_type: i32,
) -> f32 {
let mut max = 0f32;
let mut amax = 0f32;
for i in 0..n {
let x = *x.add(i);
let ax = x.abs();
if ax > amax {
amax = ax;
max = x;
}
}
if amax == 0. {
// all zero
for i in 0..n {
*ls.add(i) = 0;
}
return 0.;
}
let mut iscale = -(nmax as f32) / max;
if rmse_type == 0 {
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
return 1.0 / iscale;
}
let weight_type = rmse_type % 2;
let mut sumlx = 0f32;
let mut suml2 = 0f32;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
*ls.add(i) = (l + nmax) as i8;
let w = if weight_type == 1 { x * x } else { 1.0 };
let l = l as f32;
sumlx += w * x * l;
suml2 += w * l * l;
}
let mut scale = sumlx / suml2;
let mut best = scale * sumlx;
for _itry in 0..3 {
let iscale = 1.0 / scale;
let mut slx = 0f32;
let mut sl2 = 0f32;
let mut changed = false;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
if l + nmax != *ls.add(i) as i32 {
changed = true;
}
let w = if weight_type == 1 { x * x } else { 1f32 };
let l = l as f32;
slx += w * x * l;
sl2 += w * l * l;
}
if !changed || sl2 == 0.0 || slx * slx <= best * sl2 {
break;
}
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
sumlx = slx;
suml2 = sl2;
scale = sumlx / suml2;
best = scale * sumlx;
}
for _itry in 0..5 {
let mut n_changed = 0;
for i in 0..n {
let x = *x.add(i);
let w = if weight_type == 1 { x * x } else { 1. };
let l = *ls.add(i) as i32 - nmax;
let mut slx = sumlx - w * x * l as f32;
if slx > 0. {
let mut sl2 = suml2 - w * l as f32 * l as f32;
let new_l = nearest_int(x * sl2 / slx);
let new_l = new_l.clamp(-nmax, nmax - 1);
if new_l != l {
slx += w * x * new_l as f32;
sl2 += w * new_l as f32 * new_l as f32;
if sl2 > 0. && slx * slx * suml2 > sumlx * sumlx * sl2 {
*ls.add(i) = (nmax + new_l) as i8;
sumlx = slx;
suml2 = sl2;
scale = sumlx / suml2;
best = scale * sumlx;
n_changed += 1;
}
}
}
}
if n_changed == 0 {
break;
}
}
if rmse_type < 3 {
return scale;
}
for is in -4..4 {
if is == 0 {
continue;
}
iscale = -(nmax as f32 + 0.1f32 * is as f32) / max;
let mut sumlx = 0.;
let mut suml2 = 0.;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
let w = if weight_type == 1 { x * x } else { 1. };
let l = l as f32;
sumlx += w * x * l;
suml2 += w * l * l;
}
if suml2 > 0. && sumlx * sumlx > best * suml2 {
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
scale = sumlx / suml2;
best = scale * sumlx;
}
}
scale
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L224
pub(super) fn make_qkx1_quants(nmax: i32, ntry: usize, x: &[f32]) -> (f32, f32) {
let n = x.len();
let mut l = vec![0; n];
// Get min/max
let min = *x
.iter()
.take(n)
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&x[0]);
let max = *x.iter().max_by(|a, b| a.total_cmp(b)).unwrap_or(&x[0]);
// If min == max, all values are the same => nothing to do here
if max == min {
return (0.0, 0.0);
}
// Ensure min <= 0.0
let mut min = min.min(0.);
// Compute scale and inverse scale
let mut iscale = nmax as f32 / (max - min);
let mut scale = 1.0 / iscale;
for _ in 0..ntry {
let mut sumlx = 0.0;
let mut suml2 = 0;
let mut did_change = false;
for (i, value) in x.iter().enumerate().take(n) {
let li = nearest_int(iscale * (value - min)).clamp(0, nmax);
let clamped_li = li as u8;
if clamped_li != l[i] {
l[i] = clamped_li;
did_change = true;
}
sumlx += (value - min) * li as f32;
suml2 += li * li;
}
scale = sumlx / suml2 as f32;
let sum: f32 = x
.iter()
.take(n)
.zip(l.iter().take(n))
.map(|(xi, &li)| xi - scale * li as f32)
.sum();
min = sum / n as f32;
if min > 0.0 {
min = 0.0;
}
iscale = 1.0 / scale;
if !did_change {
break;
}
}
(scale, -min)
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L165
pub(super) fn make_q3_quants(x: &[f32], nmax: i32, do_rmse: bool) -> f32 {
let n = x.len();
let mut l = vec![0i8; n];
let mut max = 0.0;
let mut amax = 0.0;
for &xi in x.iter().take(n) {
let ax = xi.abs();
if ax > amax {
amax = ax;
max = xi;
}
}
if amax == 0.0 {
return 0.0;
}
let iscale = -(nmax as f32) / max;
if do_rmse {
let mut sumlx = 0.0;
let mut suml2 = 0.0;
for i in 0..n {
let li = (iscale * x[i]).round() as i32;
let li = li.clamp(-nmax, nmax - 1);
l[i] = li as i8;
let w = x[i] * x[i];
sumlx += w * x[i] * li as f32;
suml2 += w * (li * li) as f32;
}
for _ in 0..5 {
let mut n_changed = 0;
for i in 0..n {
let w = x[i] * x[i];
let mut slx = sumlx - w * x[i] * l[i] as f32;
if slx > 0.0 {
let mut sl2 = suml2 - w * (l[i] as i32 * l[i] as i32) as f32;
let mut new_l = (x[i] * sl2 / slx).round() as i32;
new_l = new_l.clamp(-nmax, nmax - 1);
if new_l != l[i] as i32 {
slx += w * x[i] * new_l as f32;
sl2 += w * (new_l * new_l) as f32;
if sl2 > 0.0 && slx * slx * suml2 > sumlx * sumlx * sl2 {
l[i] = new_l as i8;
sumlx = slx;
suml2 = sl2;
n_changed += 1;
}
}
}
}
if n_changed == 0 {
break;
}
}
for li in l.iter_mut() {
*li += nmax as i8;
}
return sumlx / suml2;
}
for i in 0..n {
let li = (iscale * x[i]).round() as i32;
l[i] = (li.clamp(-nmax, nmax - 1) + nmax) as i8;
}
1.0 / iscale
}
| candle/candle-core/src/quantized/utils.rs/0 | {
"file_path": "candle/candle-core/src/quantized/utils.rs",
"repo_id": "candle",
"token_count": 5775
} | 16 |
import numpy as np
x = np.arange(10)
# Write a npy file.
np.save("test.npy", x)
# Write multiple values to a npz file.
values = { "x": x, "x_plus_one": x + 1 }
np.savez("test.npz", **values)
| candle/candle-core/tests/npy.py/0 | {
"file_path": "candle/candle-core/tests/npy.py",
"repo_id": "candle",
"token_count": 83
} | 17 |
use candle::Tensor;
pub struct Dataset {
pub train_images: Tensor,
pub train_labels: Tensor,
pub test_images: Tensor,
pub test_labels: Tensor,
pub labels: usize,
}
pub mod cifar;
pub mod mnist;
| candle/candle-datasets/src/vision/mod.rs/0 | {
"file_path": "candle/candle-datasets/src/vision/mod.rs",
"repo_id": "candle",
"token_count": 92
} | 18 |
//! DINOv2: Learning Robust Visual Features without Supervision
//! https://github.com/facebookresearch/dinov2
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::Parser;
use candle::{DType, IndexOp, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::dinov2;
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = candle_examples::imagenet::load_image224(args.image)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("lmz/candle-dino-v2".into());
api.get("dinov2_vits14.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = dinov2::vit_small(vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| candle/candle-examples/examples/dinov2/main.rs/0 | {
"file_path": "candle/candle-examples/examples/dinov2/main.rs",
"repo_id": "candle",
"token_count": 784
} | 19 |
# candle-marian-mt
`marian-mt` is a neural machine translation model. In this example it is used to
translate text from French to English. See the associated [model
card](https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-fr-en) for details on
the model itself.
## Running an example
```bash
cargo run --example marian-mt --release -- \
--text "Demain, dès l'aube, à l'heure où blanchit la campagne, Je partirai. Vois-tu, je sais que tu m'attends. J'irai par la forêt, j'irai par la montagne. Je ne puis demeurer loin de toi plus longtemps."
```
```
<NIL> Tomorrow, at dawn, at the time when the country is whitening, I will go. See,
I know you are waiting for me. I will go through the forest, I will go through the
mountain. I cannot stay far from you any longer.</s>
```
## Generating the tokenizer.json files
You can use the following script to generate the `tokenizer.json` config files
from the hf-hub repos. This requires the `tokenizers` and `sentencepiece`
packages to be install and use the `convert_slow_tokenizer.py` script from this
directory.
```python
from convert_slow_tokenizer import MarianConverter
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en", use_fast=False)
fast_tokenizer = MarianConverter(tokenizer, index=0).converted()
fast_tokenizer.save(f"tokenizer-marian-base-fr.json")
fast_tokenizer = MarianConverter(tokenizer, index=1).converted()
fast_tokenizer.save(f"tokenizer-marian-base-en.json")
```
| candle/candle-examples/examples/marian-mt/README.md/0 | {
"file_path": "candle/candle-examples/examples/marian-mt/README.md",
"repo_id": "candle",
"token_count": 497
} | 20 |
use anyhow::Result;
use candle::{Device, Tensor};
use clap::{Parser, Subcommand};
#[derive(Subcommand, Debug, Clone)]
enum Command {
Print {
#[arg(long)]
file: String,
},
SimpleEval {
#[arg(long)]
file: String,
},
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct Args {
#[command(subcommand)]
command: Command,
}
pub fn main() -> Result<()> {
let args = Args::parse();
match args.command {
Command::Print { file } => {
let model = candle_onnx::read_file(file)?;
println!("{model:?}");
let graph = model.graph.unwrap();
for node in graph.node.iter() {
println!("{node:?}");
}
}
Command::SimpleEval { file } => {
let model = candle_onnx::read_file(file)?;
let graph = model.graph.as_ref().unwrap();
let constants: std::collections::HashSet<_> =
graph.initializer.iter().map(|i| i.name.as_str()).collect();
let mut inputs = std::collections::HashMap::new();
for input in graph.input.iter() {
use candle_onnx::onnx::tensor_proto::DataType;
if constants.contains(input.name.as_str()) {
continue;
}
let type_ = input.r#type.as_ref().expect("no type for input");
let type_ = type_.value.as_ref().expect("no type.value for input");
let value = match type_ {
candle_onnx::onnx::type_proto::Value::TensorType(tt) => {
let dt = match DataType::try_from(tt.elem_type) {
Ok(dt) => match candle_onnx::dtype(dt) {
Some(dt) => dt,
None => {
anyhow::bail!(
"unsupported 'value' data-type {dt:?} for {}",
input.name
)
}
},
type_ => anyhow::bail!("unsupported input type {type_:?}"),
};
let shape = tt.shape.as_ref().expect("no tensortype.shape for input");
let dims = shape
.dim
.iter()
.map(|dim| match dim.value.as_ref().expect("no dim value") {
candle_onnx::onnx::tensor_shape_proto::dimension::Value::DimValue(v) => Ok(*v as usize),
candle_onnx::onnx::tensor_shape_proto::dimension::Value::DimParam(_) => Ok(42),
})
.collect::<Result<Vec<usize>>>()?;
Tensor::zeros(dims, dt, &Device::Cpu)?
}
type_ => anyhow::bail!("unsupported input type {type_:?}"),
};
println!("input {}: {value:?}", input.name);
inputs.insert(input.name.clone(), value);
}
let outputs = candle_onnx::simple_eval(&model, inputs)?;
for (name, value) in outputs.iter() {
println!("output {name}: {value:?}")
}
}
}
Ok(())
}
| candle/candle-examples/examples/onnx_basics.rs/0 | {
"file_path": "candle/candle-examples/examples/onnx_basics.rs",
"repo_id": "candle",
"token_count": 2016
} | 21 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::{Error as E, Result};
use clap::Parser;
use candle_transformers::models::mpt::{Config, Model as M};
use candle_transformers::models::quantized_mpt::Model as Q;
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
enum Model {
M(M),
Q(Q),
}
impl Model {
fn forward(&mut self, xs: &Tensor) -> candle::Result<Tensor> {
match self {
Self::M(model) => model.forward(xs),
Self::Q(model) => model.forward(xs),
}
}
}
struct TextGeneration {
model: Model,
device: Device,
tokenizer: Tokenizer,
logits_processor: LogitsProcessor,
repeat_penalty: f32,
repeat_last_n: usize,
verbose_prompt: bool,
}
impl TextGeneration {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
temp: Option<f64>,
top_p: Option<f64>,
repeat_penalty: f32,
repeat_last_n: usize,
verbose_prompt: bool,
device: &Device,
) -> Self {
let logits_processor = LogitsProcessor::new(seed, temp, top_p);
Self {
model,
tokenizer,
logits_processor,
repeat_penalty,
repeat_last_n,
verbose_prompt,
device: device.clone(),
}
}
fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> {
use std::io::Write;
println!("starting the inference loop");
let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?;
if tokens.is_empty() {
anyhow::bail!("Empty prompts are not supported in the phi model.")
}
if self.verbose_prompt {
for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) {
let token = token.replace('▁', " ").replace("<0x0A>", "\n");
println!("{id:7} -> '{token}'");
}
}
let mut tokens = tokens.get_ids().to_vec();
let mut generated_tokens = 0usize;
let eos_token = match self.tokenizer.get_vocab(true).get("<|endoftext|>") {
Some(token) => *token,
None => anyhow::bail!("cannot find the endoftext token"),
};
print!("{prompt}");
std::io::stdout().flush()?;
let start_gen = std::time::Instant::now();
for index in 0..sample_len {
let context_size = if index > 0 { 1 } else { tokens.len() };
let ctxt = &tokens[tokens.len().saturating_sub(context_size)..];
let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?;
let logits = self.model.forward(&input)?;
let logits = logits.squeeze(0)?.to_dtype(DType::F32)?;
let logits = if self.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(self.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
self.repeat_penalty,
&tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
tokens.push(next_token);
generated_tokens += 1;
if next_token == eos_token {
break;
}
let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?;
print!("{token}");
std::io::stdout().flush()?;
}
let dt = start_gen.elapsed();
println!(
"\n{generated_tokens} tokens generated ({:.2} token/s)",
generated_tokens as f64 / dt.as_secs_f64(),
);
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// Display the token for the specified prompt.
#[arg(long)]
verbose_prompt: bool,
#[arg(long)]
prompt: String,
/// The temperature used to generate samples.
#[arg(long)]
temperature: Option<f64>,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(long, short = 'n', default_value_t = 1000)]
sample_len: usize,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
quantized: bool,
#[arg(long)]
weight_file: Option<String>,
#[arg(long)]
tokenizer: Option<String>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature.unwrap_or(0.),
args.repeat_penalty,
args.repeat_last_n
);
let start = std::time::Instant::now();
let api = Api::new()?;
let model_id = match args.model_id {
Some(model_id) => model_id.to_string(),
None => "lmz/candle-replit-code".to_string(),
};
let revision = match args.revision {
Some(rev) => rev.to_string(),
None => "main".to_string(),
};
let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision));
let tokenizer_filename = match args.tokenizer {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("tokenizer.json")?,
};
let filename = match args.weight_file {
Some(weight_file) => std::path::PathBuf::from(weight_file),
None => {
if args.quantized {
repo.get("model-replit-code-v1_5-q4k.gguf")?
} else {
repo.get("model.safetensors")?
}
}
};
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let config = Config::replit_code_v1_5_3b();
let model = if args.quantized {
let vb =
candle_transformers::quantized_var_builder::VarBuilder::from_gguf(&filename, &device)?;
Model::Q(Q::new(&config, vb.pp("transformer"))?)
} else {
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[filename], DType::F32, &device)? };
Model::M(M::new(&config, vb.pp("transformer"))?)
};
println!("loaded the model in {:?}", start.elapsed());
let mut pipeline = TextGeneration::new(
model,
tokenizer,
args.seed,
args.temperature,
args.top_p,
args.repeat_penalty,
args.repeat_last_n,
args.verbose_prompt,
&device,
);
pipeline.run(&args.prompt, args.sample_len)?;
Ok(())
}
| candle/candle-examples/examples/replit-code/main.rs/0 | {
"file_path": "candle/candle-examples/examples/replit-code/main.rs",
"repo_id": "candle",
"token_count": 3752
} | 22 |
# candle-t5
## Encoder-decoder example:
```bash
$ cargo run --example t5 --release -- --model-id "t5-small" --prompt "translate to German: A beautiful candle." --decode
...
Eine schöne Kerze.
9 tokens generated (2.42 token/s)
```
Variants such as [flan-t5](https://huggingface.co/google/flan-t5-small), [flan-ul2](https://huggingface.co/google/flan-ul2) (with `--revision "refs/pr/25"`), and [Co-EdIT](https://huggingface.co/grammarly/coedit-large) are also supported.
## Translation with [MADLAD-400](https://arxiv.org/abs/2309.04662)
MADLAD-400 is a series of multilingual machine translation T5 models trained on 250 billion tokens covering over 450 languages using publicly available data. These models are competitive with significantly larger models.
```bash
cargo run --example t5 --release -- \
--model-id "jbochi/madlad400-3b-mt" \
--prompt "<2de> How are you, my friend?" \
--decode --temperature 0
...
Wie geht es dir, mein Freund?
```
## Sentence embedding example
```bash
$ cargo run --example t5 --release -- --model-id "t5-small" --prompt "A beautiful candle."
...
[[[ 0.0515, -0.0541, -0.0761, ..., -0.0392, 0.1511, -0.0265],
[-0.0974, 0.0998, -0.1659, ..., -0.2450, 0.1738, -0.0164],
[ 0.0624, -0.1024, 0.0430, ..., -0.1388, 0.0564, -0.2962],
[-0.0389, -0.1173, 0.0026, ..., 0.1064, -0.1065, 0.0990],
[ 0.1300, 0.0027, -0.0326, ..., 0.0026, -0.0317, 0.0851]]]
Tensor[[1, 5, 512], f32]
Took 303.766583ms
```
| candle/candle-examples/examples/t5/README.md/0 | {
"file_path": "candle/candle-examples/examples/t5/README.md",
"repo_id": "candle",
"token_count": 608
} | 23 |
# candle-wuerstchen: Efficient Pretraining of Text-to-Image Models

The `wuerstchen` example is a port of the [diffusers
implementation](https://github.com/huggingface/diffusers/tree/19edca82f1ff194c07317369a92b470dbae97f34/src/diffusers/pipelines/wuerstchen) for Würstchen v2.
The candle implementation reproduces the same structure/files for models and
pipelines. Useful resources:
- [Official implementation](https://github.com/dome272/Wuerstchen).
- [Arxiv paper](https://arxiv.org/abs/2306.00637).
- Blog post: [Introducing Würstchen: Fast Diffusion for Image Generation](https://huggingface.co/blog/wuerstchen).
## Getting the weights
The weights are automatically downloaded for you from the [HuggingFace
Hub](https://huggingface.co/) on the first run. There are various command line
flags to use local files instead, run with `--help` to learn about them.
## Running some example.
```bash
cargo run --example wuerstchen --release --features cuda,cudnn -- \
--prompt "Anthropomorphic cat dressed as a fire fighter"
```
The final image is named `sd_final.png` by default.
| candle/candle-examples/examples/wuerstchen/README.md/0 | {
"file_path": "candle/candle-examples/examples/wuerstchen/README.md",
"repo_id": "candle",
"token_count": 358
} | 24 |
use candle::{Device, Result, Tensor};
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 224, 224). imagenet normalization is applied.
pub fn load_image224<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> {
let img = image::io::Reader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(224, 224, image::imageops::FilterType::Triangle);
let img = img.to_rgb8();
let data = img.into_raw();
let data = Tensor::from_vec(data, (224, 224, 3), &Device::Cpu)?.permute((2, 0, 1))?;
let mean = Tensor::new(&[0.485f32, 0.456, 0.406], &Device::Cpu)?.reshape((3, 1, 1))?;
let std = Tensor::new(&[0.229f32, 0.224, 0.225], &Device::Cpu)?.reshape((3, 1, 1))?;
(data.to_dtype(candle::DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)
}
pub const CLASS_COUNT: i64 = 1000;
pub const CLASSES: [&str; 1000] = [
"tench, Tinca tinca",
"goldfish, Carassius auratus",
"great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias",
"tiger shark, Galeocerdo cuvieri",
"hammerhead, hammerhead shark",
"electric ray, crampfish, numbfish, torpedo",
"stingray",
"cock",
"hen",
"ostrich, Struthio camelus",
"brambling, Fringilla montifringilla",
"goldfinch, Carduelis carduelis",
"house finch, linnet, Carpodacus mexicanus",
"junco, snowbird",
"indigo bunting, indigo finch, indigo bird, Passerina cyanea",
"robin, American robin, Turdus migratorius",
"bulbul",
"jay",
"magpie",
"chickadee",
"water ouzel, dipper",
"kite",
"bald eagle, American eagle, Haliaeetus leucocephalus",
"vulture",
"great grey owl, great gray owl, Strix nebulosa",
"European fire salamander, Salamandra salamandra",
"common newt, Triturus vulgaris",
"eft",
"spotted salamander, Ambystoma maculatum",
"axolotl, mud puppy, Ambystoma mexicanum",
"bullfrog, Rana catesbeiana",
"tree frog, tree-frog",
"tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui",
"loggerhead, loggerhead turtle, Caretta caretta",
"leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea",
"mud turtle",
"terrapin",
"box turtle, box tortoise",
"banded gecko",
"common iguana, iguana, Iguana iguana",
"American chameleon, anole, Anolis carolinensis",
"whiptail, whiptail lizard",
"agama",
"frilled lizard, Chlamydosaurus kingi",
"alligator lizard",
"Gila monster, Heloderma suspectum",
"green lizard, Lacerta viridis",
"African chameleon, Chamaeleo chamaeleon",
"Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis",
"African crocodile, Nile crocodile, Crocodylus niloticus",
"American alligator, Alligator mississipiensis",
"triceratops",
"thunder snake, worm snake, Carphophis amoenus",
"ringneck snake, ring-necked snake, ring snake",
"hognose snake, puff adder, sand viper",
"green snake, grass snake",
"king snake, kingsnake",
"garter snake, grass snake",
"water snake",
"vine snake",
"night snake, Hypsiglena torquata",
"boa constrictor, Constrictor constrictor",
"rock python, rock snake, Python sebae",
"Indian cobra, Naja naja",
"green mamba",
"sea snake",
"horned viper, cerastes, sand viper, horned asp, Cerastes cornutus",
"diamondback, diamondback rattlesnake, Crotalus adamanteus",
"sidewinder, horned rattlesnake, Crotalus cerastes",
"trilobite",
"harvestman, daddy longlegs, Phalangium opilio",
"scorpion",
"black and gold garden spider, Argiope aurantia",
"barn spider, Araneus cavaticus",
"garden spider, Aranea diademata",
"black widow, Latrodectus mactans",
"tarantula",
"wolf spider, hunting spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse, partridge, Bonasa umbellus",
"prairie chicken, prairie grouse, prairie fowl",
"peacock",
"quail",
"partridge",
"African grey, African gray, Psittacus erithacus",
"macaw",
"sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"drake",
"red-breasted merganser, Mergus serrator",
"goose",
"black swan, Cygnus atratus",
"tusker",
"echidna, spiny anteater, anteater",
"platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus",
"wallaby, brush kangaroo",
"koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus",
"wombat",
"jellyfish",
"sea anemone, anemone",
"brain coral",
"flatworm, platyhelminth",
"nematode, nematode worm, roundworm",
"conch",
"snail",
"slug",
"sea slug, nudibranch",
"chiton, coat-of-mail shell, sea cradle, polyplacophore",
"chambered nautilus, pearly nautilus, nautilus",
"Dungeness crab, Cancer magister",
"rock crab, Cancer irroratus",
"fiddler crab",
"king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica",
"American lobster, Northern lobster, Maine lobster, Homarus americanus",
"spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish",
"crayfish, crawfish, crawdad, crawdaddy",
"hermit crab",
"isopod",
"white stork, Ciconia ciconia",
"black stork, Ciconia nigra",
"spoonbill",
"flamingo",
"little blue heron, Egretta caerulea",
"American egret, great white heron, Egretta albus",
"bittern",
"crane",
"limpkin, Aramus pictus",
"European gallinule, Porphyrio porphyrio",
"American coot, marsh hen, mud hen, water hen, Fulica americana",
"bustard",
"ruddy turnstone, Arenaria interpres",
"red-backed sandpiper, dunlin, Erolia alpina",
"redshank, Tringa totanus",
"dowitcher",
"oystercatcher, oyster catcher",
"pelican",
"king penguin, Aptenodytes patagonica",
"albatross, mollymawk",
"grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus",
"killer whale, killer, orca, grampus, sea wolf, Orcinus orca",
"dugong, Dugong dugon",
"sea lion",
"Chihuahua",
"Japanese spaniel",
"Maltese dog, Maltese terrier, Maltese",
"Pekinese, Pekingese, Peke",
"Shih-Tzu",
"Blenheim spaniel",
"papillon",
"toy terrier",
"Rhodesian ridgeback",
"Afghan hound, Afghan",
"basset, basset hound",
"beagle",
"bloodhound, sleuthhound",
"bluetick",
"black-and-tan coonhound",
"Walker hound, Walker foxhound",
"English foxhound",
"redbone",
"borzoi, Russian wolfhound",
"Irish wolfhound",
"Italian greyhound",
"whippet",
"Ibizan hound, Ibizan Podenco",
"Norwegian elkhound, elkhound",
"otterhound, otter hound",
"Saluki, gazelle hound",
"Scottish deerhound, deerhound",
"Weimaraner",
"Staffordshire bullterrier, Staffordshire bull terrier",
"American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier",
"Bedlington terrier",
"Border terrier",
"Kerry blue terrier",
"Irish terrier",
"Norfolk terrier",
"Norwich terrier",
"Yorkshire terrier",
"wire-haired fox terrier",
"Lakeland terrier",
"Sealyham terrier, Sealyham",
"Airedale, Airedale terrier",
"cairn, cairn terrier",
"Australian terrier",
"Dandie Dinmont, Dandie Dinmont terrier",
"Boston bull, Boston terrier",
"miniature schnauzer",
"giant schnauzer",
"standard schnauzer",
"Scotch terrier, Scottish terrier, Scottie",
"Tibetan terrier, chrysanthemum dog",
"silky terrier, Sydney silky",
"soft-coated wheaten terrier",
"West Highland white terrier",
"Lhasa, Lhasa apso",
"flat-coated retriever",
"curly-coated retriever",
"golden retriever",
"Labrador retriever",
"Chesapeake Bay retriever",
"German short-haired pointer",
"vizsla, Hungarian pointer",
"English setter",
"Irish setter, red setter",
"Gordon setter",
"Brittany spaniel",
"clumber, clumber spaniel",
"English springer, English springer spaniel",
"Welsh springer spaniel",
"cocker spaniel, English cocker spaniel, cocker",
"Sussex spaniel",
"Irish water spaniel",
"kuvasz",
"schipperke",
"groenendael",
"malinois",
"briard",
"kelpie",
"komondor",
"Old English sheepdog, bobtail",
"Shetland sheepdog, Shetland sheep dog, Shetland",
"collie",
"Border collie",
"Bouvier des Flandres, Bouviers des Flandres",
"Rottweiler",
"German shepherd, German shepherd dog, German police dog, alsatian",
"Doberman, Doberman pinscher",
"miniature pinscher",
"Greater Swiss Mountain dog",
"Bernese mountain dog",
"Appenzeller",
"EntleBucher",
"boxer",
"bull mastiff",
"Tibetan mastiff",
"French bulldog",
"Great Dane",
"Saint Bernard, St Bernard",
"Eskimo dog, husky",
"malamute, malemute, Alaskan malamute",
"Siberian husky",
"dalmatian, coach dog, carriage dog",
"affenpinscher, monkey pinscher, monkey dog",
"basenji",
"pug, pug-dog",
"Leonberg",
"Newfoundland, Newfoundland dog",
"Great Pyrenees",
"Samoyed, Samoyede",
"Pomeranian",
"chow, chow chow",
"keeshond",
"Brabancon griffon",
"Pembroke, Pembroke Welsh corgi",
"Cardigan, Cardigan Welsh corgi",
"toy poodle",
"miniature poodle",
"standard poodle",
"Mexican hairless",
"timber wolf, grey wolf, gray wolf, Canis lupus",
"white wolf, Arctic wolf, Canis lupus tundrarum",
"red wolf, maned wolf, Canis rufus, Canis niger",
"coyote, prairie wolf, brush wolf, Canis latrans",
"dingo, warrigal, warragal, Canis dingo",
"dhole, Cuon alpinus",
"African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus",
"hyena, hyaena",
"red fox, Vulpes vulpes",
"kit fox, Vulpes macrotis",
"Arctic fox, white fox, Alopex lagopus",
"grey fox, gray fox, Urocyon cinereoargenteus",
"tabby, tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat, Siamese",
"Egyptian cat",
"cougar, puma, catamount, mountain lion, painter, panther, Felis concolor",
"lynx, catamount",
"leopard, Panthera pardus",
"snow leopard, ounce, Panthera uncia",
"jaguar, panther, Panthera onca, Felis onca",
"lion, king of beasts, Panthera leo",
"tiger, Panthera tigris",
"cheetah, chetah, Acinonyx jubatus",
"brown bear, bruin, Ursus arctos",
"American black bear, black bear, Ursus americanus, Euarctos americanus",
"ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus",
"sloth bear, Melursus ursinus, Ursus ursinus",
"mongoose",
"meerkat, mierkat",
"tiger beetle",
"ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle",
"ground beetle, carabid beetle",
"long-horned beetle, longicorn, longicorn beetle",
"leaf beetle, chrysomelid",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant, emmet, pismire",
"grasshopper, hopper",
"cricket",
"walking stick, walkingstick, stick insect",
"cockroach, roach",
"mantis, mantid",
"cicada, cicala",
"leafhopper",
"lacewing, lacewing fly",
"dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
"damselfly",
"admiral",
"ringlet, ringlet butterfly",
"monarch, monarch butterfly, milkweed butterfly, Danaus plexippus",
"cabbage butterfly",
"sulphur butterfly, sulfur butterfly",
"lycaenid, lycaenid butterfly",
"starfish, sea star",
"sea urchin",
"sea cucumber, holothurian",
"wood rabbit, cottontail, cottontail rabbit",
"hare",
"Angora, Angora rabbit",
"hamster",
"porcupine, hedgehog",
"fox squirrel, eastern fox squirrel, Sciurus niger",
"marmot",
"beaver",
"guinea pig, Cavia cobaya",
"sorrel",
"zebra",
"hog, pig, grunter, squealer, Sus scrofa",
"wild boar, boar, Sus scrofa",
"warthog",
"hippopotamus, hippo, river horse, Hippopotamus amphibius",
"ox",
"water buffalo, water ox, Asiatic buffalo, Bubalus bubalis",
"bison",
"ram, tup",
"bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis",
"ibex, Capra ibex",
"hartebeest",
"impala, Aepyceros melampus",
"gazelle",
"Arabian camel, dromedary, Camelus dromedarius",
"llama",
"weasel",
"mink",
"polecat, fitch, foulmart, foumart, Mustela putorius",
"black-footed ferret, ferret, Mustela nigripes",
"otter",
"skunk, polecat, wood pussy",
"badger",
"armadillo",
"three-toed sloth, ai, Bradypus tridactylus",
"orangutan, orang, orangutang, Pongo pygmaeus",
"gorilla, Gorilla gorilla",
"chimpanzee, chimp, Pan troglodytes",
"gibbon, Hylobates lar",
"siamang, Hylobates syndactylus, Symphalangus syndactylus",
"guenon, guenon monkey",
"patas, hussar monkey, Erythrocebus patas",
"baboon",
"macaque",
"langur",
"colobus, colobus monkey",
"proboscis monkey, Nasalis larvatus",
"marmoset",
"capuchin, ringtail, Cebus capucinus",
"howler monkey, howler",
"titi, titi monkey",
"spider monkey, Ateles geoffroyi",
"squirrel monkey, Saimiri sciureus",
"Madagascar cat, ring-tailed lemur, Lemur catta",
"indri, indris, Indri indri, Indri brevicaudatus",
"Indian elephant, Elephas maximus",
"African elephant, Loxodonta africana",
"lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens",
"giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca",
"barracouta, snoek",
"eel",
"coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch",
"rock beauty, Holocanthus tricolor",
"anemone fish",
"sturgeon",
"gar, garfish, garpike, billfish, Lepisosteus osseus",
"lionfish",
"puffer, pufferfish, blowfish, globefish",
"abacus",
"abaya",
"academic gown, academic robe, judge's robe",
"accordion, piano accordion, squeeze box",
"acoustic guitar",
"aircraft carrier, carrier, flattop, attack aircraft carrier",
"airliner",
"airship, dirigible",
"altar",
"ambulance",
"amphibian, amphibious vehicle",
"analog clock",
"apiary, bee house",
"apron",
"ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin",
"assault rifle, assault gun",
"backpack, back pack, knapsack, packsack, rucksack, haversack",
"bakery, bakeshop, bakehouse",
"balance beam, beam",
"balloon",
"ballpoint, ballpoint pen, ballpen, Biro",
"Band Aid",
"banjo",
"bannister, banister, balustrade, balusters, handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel, cask",
"barrow, garden cart, lawn cart, wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"bathing cap, swimming cap",
"bath towel",
"bathtub, bathing tub, bath, tub",
"beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon",
"beacon, lighthouse, beacon light, pharos",
"beaker",
"bearskin, busby, shako",
"beer bottle",
"beer glass",
"bell cote, bell cot",
"bib",
"bicycle-built-for-two, tandem bicycle, tandem",
"bikini, two-piece",
"binder, ring-binder",
"binoculars, field glasses, opera glasses",
"birdhouse",
"boathouse",
"bobsled, bobsleigh, bob",
"bolo tie, bolo, bola tie, bola",
"bonnet, poke bonnet",
"bookcase",
"bookshop, bookstore, bookstall",
"bottlecap",
"bow",
"bow tie, bow-tie, bowtie",
"brass, memorial tablet, plaque",
"brassiere, bra, bandeau",
"breakwater, groin, groyne, mole, bulwark, seawall, jetty",
"breastplate, aegis, egis",
"broom",
"bucket, pail",
"buckle",
"bulletproof vest",
"bullet train, bullet",
"butcher shop, meat market",
"cab, hack, taxi, taxicab",
"caldron, cauldron",
"candle, taper, wax light",
"cannon",
"canoe",
"can opener, tin opener",
"cardigan",
"car mirror",
"carousel, carrousel, merry-go-round, roundabout, whirligig",
"carpenter's kit, tool kit",
"carton",
"car wheel",
"cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello, violoncello",
"cellular telephone, cellular phone, cellphone, cell, mobile phone",
"chain",
"chainlink fence",
"chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour",
"chain saw, chainsaw",
"chest",
"chiffonier, commode",
"chime, bell, gong",
"china cabinet, china closet",
"Christmas stocking",
"church, church building",
"cinema, movie theater, movie theatre, movie house, picture palace",
"cleaver, meat cleaver, chopper",
"cliff dwelling",
"cloak",
"clog, geta, patten, sabot",
"cocktail shaker",
"coffee mug",
"coffeepot",
"coil, spiral, volute, whorl, helix",
"combination lock",
"computer keyboard, keypad",
"confectionery, confectionary, candy store",
"container ship, containership, container vessel",
"convertible",
"corkscrew, bottle screw",
"cornet, horn, trumpet, trump",
"cowboy boot",
"cowboy hat, ten-gallon hat",
"cradle",
"crane",
"crash helmet",
"crate",
"crib, cot",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam, dike, dyke",
"desk",
"desktop computer",
"dial telephone, dial phone",
"diaper, nappy, napkin",
"digital clock",
"digital watch",
"dining table, board",
"dishrag, dishcloth",
"dishwasher, dish washer, dishwashing machine",
"disk brake, disc brake",
"dock, dockage, docking facility",
"dogsled, dog sled, dog sleigh",
"dome",
"doormat, welcome mat",
"drilling platform, offshore rig",
"drum, membranophone, tympan",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan, blower",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso maker",
"face powder",
"feather boa, boa",
"file, file cabinet, filing cabinet",
"fireboat",
"fire engine, fire truck",
"fire screen, fireguard",
"flagpole, flagstaff",
"flute, transverse flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster",
"freight car",
"French horn, horn",
"frying pan, frypan, skillet",
"fur coat",
"garbage truck, dustcart",
"gasmask, respirator, gas helmet",
"gas pump, gasoline pump, petrol pump, island dispenser",
"goblet",
"go-kart",
"golf ball",
"golfcart, golf cart",
"gondola",
"gong, tam-tam",
"gown",
"grand piano, grand",
"greenhouse, nursery, glasshouse",
"grille, radiator grille",
"grocery store, grocery, food market, market",
"guillotine",
"hair slide",
"hair spray",
"half track",
"hammer",
"hamper",
"hand blower, blow dryer, blow drier, hair dryer, hair drier",
"hand-held computer, hand-held microcomputer",
"handkerchief, hankie, hanky, hankey",
"hard disc, hard disk, fixed disk",
"harmonica, mouth organ, harp, mouth harp",
"harp",
"harvester, reaper",
"hatchet",
"holster",
"home theater, home theatre",
"honeycomb",
"hook, claw",
"hoopskirt, crinoline",
"horizontal bar, high bar",
"horse cart, horse-cart",
"hourglass",
"iPod",
"iron, smoothing iron",
"jack-o'-lantern",
"jean, blue jean, denim",
"jeep, landrover",
"jersey, T-shirt, tee shirt",
"jigsaw puzzle",
"jinrikisha, ricksha, rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat, laboratory coat",
"ladle",
"lampshade, lamp shade",
"laptop, laptop computer",
"lawn mower, mower",
"lens cap, lens cover",
"letter opener, paper knife, paperknife",
"library",
"lifeboat",
"lighter, light, igniter, ignitor",
"limousine, limo",
"liner, ocean liner",
"lipstick, lip rouge",
"Loafer",
"lotion",
"loudspeaker, speaker, speaker unit, loudspeaker system, speaker system",
"loupe, jeweler's loupe",
"lumbermill, sawmill",
"magnetic compass",
"mailbag, postbag",
"mailbox, letter box",
"maillot",
"maillot, tank suit",
"manhole cover",
"maraca",
"marimba, xylophone",
"mask",
"matchstick",
"maypole",
"maze, labyrinth",
"measuring cup",
"medicine chest, medicine cabinet",
"megalith, megalithic structure",
"microphone, mike",
"microwave, microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt, mini",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home, manufactured home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"mortarboard",
"mosque",
"mosquito net",
"motor scooter, scooter",
"mountain bike, all-terrain bike, off-roader",
"mountain tent",
"mouse, computer mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook, notebook computer",
"obelisk",
"oboe, hautboy, hautbois",
"ocarina, sweet potato",
"odometer, hodometer, mileometer, milometer",
"oil filter",
"organ, pipe organ",
"oscilloscope, scope, cathode-ray oscilloscope, CRO",
"overskirt",
"oxcart",
"oxygen mask",
"packet",
"paddle, boat paddle",
"paddlewheel, paddle wheel",
"padlock",
"paintbrush",
"pajama, pyjama, pj's, jammies",
"palace",
"panpipe, pandean pipe, syrinx",
"paper towel",
"parachute, chute",
"parallel bars, bars",
"park bench",
"parking meter",
"passenger car, coach, carriage",
"patio, terrace",
"pay-phone, pay-station",
"pedestal, plinth, footstall",
"pencil box, pencil case",
"pencil sharpener",
"perfume, essence",
"Petri dish",
"photocopier",
"pick, plectrum, plectron",
"pickelhaube",
"picket fence, paling",
"pickup, pickup truck",
"pier",
"piggy bank, penny bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate, pirate ship",
"pitcher, ewer",
"plane, carpenter's plane, woodworking plane",
"planetarium",
"plastic bag",
"plate rack",
"plow, plough",
"plunger, plumber's helper",
"Polaroid camera, Polaroid Land camera",
"pole",
"police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria",
"poncho",
"pool table, billiard table, snooker table",
"pop bottle, soda bottle",
"pot, flowerpot",
"potter's wheel",
"power drill",
"prayer rug, prayer mat",
"printer",
"prison, prison house",
"projectile, missile",
"projector",
"puck, hockey puck",
"punching bag, punch bag, punching ball, punchball",
"purse",
"quill, quill pen",
"quilt, comforter, comfort, puff",
"racer, race car, racing car",
"racket, racquet",
"radiator",
"radio, wireless",
"radio telescope, radio reflector",
"rain barrel",
"recreational vehicle, RV, R.V.",
"reel",
"reflex camera",
"refrigerator, icebox",
"remote control, remote",
"restaurant, eating house, eating place, eatery",
"revolver, six-gun, six-shooter",
"rifle",
"rocking chair, rocker",
"rotisserie",
"rubber eraser, rubber, pencil eraser",
"rugby ball",
"rule, ruler",
"running shoe",
"safe",
"safety pin",
"saltshaker, salt shaker",
"sandal",
"sarong",
"sax, saxophone",
"scabbard",
"scale, weighing machine",
"school bus",
"schooner",
"scoreboard",
"screen, CRT screen",
"screw",
"screwdriver",
"seat belt, seatbelt",
"sewing machine",
"shield, buckler",
"shoe shop, shoe-shop, shoe store",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule, slipstick",
"sliding door",
"slot, one-armed bandit",
"snorkel",
"snowmobile",
"snowplow, snowplough",
"soap dispenser",
"soccer ball",
"sock",
"solar dish, solar collector, solar furnace",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"speedboat",
"spider web, spider's web",
"spindle",
"sports car, sport car",
"spotlight, spot",
"stage",
"steam locomotive",
"steel arch bridge",
"steel drum",
"stethoscope",
"stole",
"stone wall",
"stopwatch, stop watch",
"stove",
"strainer",
"streetcar, tram, tramcar, trolley, trolley car",
"stretcher",
"studio couch, day bed",
"stupa, tope",
"submarine, pigboat, sub, U-boat",
"suit, suit of clothes",
"sundial",
"sunglass",
"sunglasses, dark glasses, shades",
"sunscreen, sunblock, sun blocker",
"suspension bridge",
"swab, swob, mop",
"sweatshirt",
"swimming trunks, bathing trunks",
"swing",
"switch, electric switch, electrical switch",
"syringe",
"table lamp",
"tank, army tank, armored combat vehicle, armoured combat vehicle",
"tape player",
"teapot",
"teddy, teddy bear",
"television, television system",
"tennis ball",
"thatch, thatched roof",
"theater curtain, theatre curtain",
"thimble",
"thresher, thrasher, threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop, tobacconist shop, tobacconist",
"toilet seat",
"torch",
"totem pole",
"tow truck, tow car, wrecker",
"toyshop",
"tractor",
"trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi",
"tray",
"trench coat",
"tricycle, trike, velocipede",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus, trolley coach, trackless trolley",
"trombone",
"tub, vat",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle, monocycle",
"upright, upright piano",
"vacuum, vacuum cleaner",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin, fiddle",
"volleyball",
"waffle iron",
"wall clock",
"wallet, billfold, notecase, pocketbook",
"wardrobe, closet, press",
"warplane, military plane",
"washbasin, handbasin, washbowl, lavabo, wash-hand basin",
"washer, automatic washer, washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool, woolen, woollen",
"worm fence, snake fence, snake-rail fence, Virginia fence",
"wreck",
"yawl",
"yurt",
"web site, website, internet site, site",
"comic book",
"crossword puzzle, crossword",
"street sign",
"traffic light, traffic signal, stoplight",
"book jacket, dust cover, dust jacket, dust wrapper",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot, hotpot",
"trifle",
"ice cream, icecream",
"ice lolly, lolly, lollipop, popsicle",
"French loaf",
"bagel, beigel",
"pretzel",
"cheeseburger",
"hotdog, hot dog, red hot",
"mashed potato",
"head cabbage",
"broccoli",
"cauliflower",
"zucchini, courgette",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber, cuke",
"artichoke, globe artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple, ananas",
"banana",
"jackfruit, jak, jack",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate sauce, chocolate syrup",
"dough",
"meat loaf, meatloaf",
"pizza, pizza pie",
"potpie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff, drop, drop-off",
"coral reef",
"geyser",
"lakeside, lakeshore",
"promontory, headland, head, foreland",
"sandbar, sand bar",
"seashore, coast, seacoast, sea-coast",
"valley, vale",
"volcano",
"ballplayer, baseball player",
"groom, bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
"corn",
"acorn",
"hip, rose hip, rosehip",
"buckeye, horse chestnut, conker",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn, carrion fungus",
"earthstar",
"hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa",
"bolete",
"ear, spike, capitulum",
"toilet tissue, toilet paper, bathroom tissue",
];
| candle/candle-examples/src/imagenet.rs/0 | {
"file_path": "candle/candle-examples/src/imagenet.rs",
"repo_id": "candle",
"token_count": 12586
} | 25 |
// Inspired by
// https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
#pragma once
/// @param COND - a boolean expression to switch by
/// @param CONST_NAME - a name given for the constexpr bool variable.
/// @param ... - code to execute for true and false
///
/// Usage:
/// ```
/// BOOL_SWITCH(flag, BoolConst, [&] {
/// some_function<BoolConst>(...);
/// });
/// ```
#define BOOL_SWITCH(COND, CONST_NAME, ...) \
[&] { \
if (COND) { \
constexpr static bool CONST_NAME = true; \
return __VA_ARGS__(); \
} else { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
} \
}()
#define FP16_SWITCH(COND, ...) \
[&] { \
if (COND) { \
using elem_type = cutlass::half_t; \
return __VA_ARGS__(); \
} else { \
using elem_type = cutlass::bfloat16_t; \
return __VA_ARGS__(); \
} \
}()
#define FWD_HEADDIM_SWITCH(HEADDIM, ...) \
[&] { \
if (HEADDIM <= 32) { \
constexpr static int kHeadDim = 32; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 64) { \
constexpr static int kHeadDim = 64; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 96) { \
constexpr static int kHeadDim = 96; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 128) { \
constexpr static int kHeadDim = 128; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 160) { \
constexpr static int kHeadDim = 160; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 192) { \
constexpr static int kHeadDim = 192; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 224) { \
constexpr static int kHeadDim = 224; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 256) { \
constexpr static int kHeadDim = 256; \
return __VA_ARGS__(); \
} \
}()
| candle/candle-flash-attn/kernels/static_switch.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/static_switch.h",
"repo_id": "candle",
"token_count": 1516
} | 26 |
// WARNING: THIS IS ONLY VALID ASSUMING THAT inp IS CONTIGUOUS!
// TODO: proper error reporting when ids are larger than v_size.
#include "cuda_utils.cuh"
#include<stdint.h>
template<typename T, typename I>
__device__ void index_select(
const size_t numel,
const size_t num_dims,
const size_t *info,
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t ids_dim_size,
const size_t right_size
) {
const size_t *dims = info;
const size_t *strides = info + num_dims;
bool b = is_contiguous(num_dims, dims, strides);
for (unsigned int dst_i = blockIdx.x * blockDim.x + threadIdx.x; dst_i < numel; dst_i += blockDim.x * gridDim.x) {
unsigned int left_i = dst_i / (ids_dim_size * right_size);
unsigned int id_i = dst_i / right_size % ids_dim_size;
unsigned int right_i = dst_i % right_size;
unsigned int src_i = left_i * (src_dim_size * right_size) + ids[id_i] * right_size + right_i;
unsigned strided_i = b ? src_i : get_strided_index(src_i, num_dims, dims, strides);
out[dst_i] = inp[strided_i];
}
}
#define IS_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t ids_dim_size, \
const size_t right_size \
) { index_select(numel, num_dims, info, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \
template<typename T, typename I>
__device__ void gather(
const size_t numel,
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t ids_dim_size,
const size_t right_size
) {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
size_t post = i % right_size;
size_t idx = ids[i];
size_t pre = i / (right_size * ids_dim_size);
size_t src_i = (pre * src_dim_size + idx) * right_size + post;
out[i] = inp[src_i];
}
}
#define GATHER_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t ids_dim_size, \
const size_t right_size \
) { gather(numel, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \
template<typename T, typename I>
__device__ void index_add(
const I *ids,
const size_t ids_dim_size,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < ids_dim_size; ++j) {
const size_t idx = ids[j];
const size_t src_i = (pre * ids_dim_size + j) * right_size + post;
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] += inp[src_i];
}
}
}
#define IA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const size_t ids_dim_size, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { index_add(ids, ids_dim_size, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
template<typename T, typename I>
__device__ void scatter_add(
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (pre * src_dim_size + j) * right_size + post;
const size_t idx = ids[src_i];
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] += inp[src_i];
}
}
}
#define SA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { scatter_add(ids, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
#if __CUDA_ARCH__ >= 800
IS_OP(__nv_bfloat16, int64_t, is_i64_bf16)
IS_OP(__nv_bfloat16, uint32_t, is_u32_bf16)
IS_OP(__nv_bfloat16, uint8_t, is_u8_bf16)
GATHER_OP(__nv_bfloat16, int64_t, gather_i64_bf16)
GATHER_OP(__nv_bfloat16, uint32_t, gather_u32_bf16)
GATHER_OP(__nv_bfloat16, uint8_t, gather_u8_bf16)
IA_OP(__nv_bfloat16, int64_t, ia_i64_bf16)
IA_OP(__nv_bfloat16, uint32_t, ia_u32_bf16)
IA_OP(__nv_bfloat16, uint8_t, ia_u8_bf16)
SA_OP(__nv_bfloat16, int64_t, sa_i64_bf16)
SA_OP(__nv_bfloat16, uint32_t, sa_u32_bf16)
SA_OP(__nv_bfloat16, uint8_t, sa_u8_bf16)
#endif
#if __CUDA_ARCH__ >= 530
IS_OP(__half, int64_t, is_i64_f16)
IS_OP(__half, uint32_t, is_u32_f16)
IS_OP(__half, uint8_t, is_u8_f16)
GATHER_OP(__half, int64_t, gather_i64_f16)
GATHER_OP(__half, uint32_t, gather_u32_f16)
GATHER_OP(__half, uint8_t, gather_u8_f16)
IA_OP(__half, uint32_t, ia_u32_f16)
IA_OP(__half, uint8_t, ia_u8_f16)
SA_OP(__half, uint32_t, sa_u32_f16)
SA_OP(__half, uint8_t, sa_u8_f16)
#endif
IS_OP(float, int64_t, is_i64_f32)
IS_OP(double, int64_t, is_i64_f64)
IS_OP(uint8_t, int64_t, is_i64_u8)
IS_OP(uint32_t, int64_t, is_i64_u32)
IS_OP(int64_t, int64_t, is_i64_i64)
IS_OP(float, uint32_t, is_u32_f32)
IS_OP(double, uint32_t, is_u32_f64)
IS_OP(uint8_t, uint32_t, is_u32_u8)
IS_OP(int64_t, uint32_t, is_u32_i64)
IS_OP(uint32_t, uint32_t, is_u32_u32)
IS_OP(float, uint8_t, is_u8_f32)
IS_OP(double, uint8_t, is_u8_f64)
IS_OP(uint8_t, uint8_t, is_u8_u8)
IS_OP(uint32_t, uint8_t, is_u8_u32)
IS_OP(int64_t, uint8_t, is_u8_i64)
GATHER_OP(float, int64_t, gather_i64_f32)
GATHER_OP(double, int64_t, gather_i64_f64)
GATHER_OP(uint8_t, int64_t, gather_i64_u8)
GATHER_OP(uint32_t, int64_t, gather_i64_u32)
GATHER_OP(int64_t, int64_t, gather_i64_i64)
GATHER_OP(float, uint32_t, gather_u32_f32)
GATHER_OP(double, uint32_t, gather_u32_f64)
GATHER_OP(uint8_t, uint32_t, gather_u32_u8)
GATHER_OP(int64_t, uint32_t, gather_u32_i64)
GATHER_OP(uint32_t, uint32_t, gather_u32_u32)
GATHER_OP(float, uint8_t, gather_u8_f32)
GATHER_OP(double, uint8_t, gather_u8_f64)
GATHER_OP(uint8_t, uint8_t, gather_u8_u8)
GATHER_OP(uint32_t, uint8_t, gather_u8_u32)
GATHER_OP(int64_t, uint8_t, gather_u8_i64)
IA_OP(float, int64_t, ia_i64_f32)
IA_OP(double, int64_t, ia_i64_f64)
IA_OP(uint8_t, int64_t, ia_i64_u8)
IA_OP(int64_t, int64_t, ia_i64_i64)
IA_OP(uint32_t, int64_t, ia_i64_u32)
IA_OP(float, uint32_t, ia_u32_f32)
IA_OP(double, uint32_t, ia_u32_f64)
IA_OP(uint8_t, uint32_t, ia_u32_u8)
IA_OP(int64_t, uint32_t, ia_u32_i64)
IA_OP(uint32_t, uint32_t, ia_u32_u32)
IA_OP(float, uint8_t, ia_u8_f32)
IA_OP(double, uint8_t, ia_u8_f64)
IA_OP(uint8_t, uint8_t, ia_u8_u8)
IA_OP(uint32_t, uint8_t, ia_u8_u32)
IA_OP(int64_t, uint8_t, ia_u8_i64)
SA_OP(float, int64_t, sa_i64_f32)
SA_OP(double, int64_t, sa_i64_f64)
SA_OP(uint8_t, int64_t, sa_i64_u8)
SA_OP(int64_t, int64_t, sa_i64_i64)
SA_OP(uint32_t, int64_t, sa_i64_u32)
SA_OP(float, uint32_t, sa_u32_f32)
SA_OP(double, uint32_t, sa_u32_f64)
SA_OP(uint8_t, uint32_t, sa_u32_u8)
SA_OP(int64_t, uint32_t, sa_u32_i64)
SA_OP(uint32_t, uint32_t, sa_u32_u32)
SA_OP(float, uint8_t, sa_u8_f32)
SA_OP(double, uint8_t, sa_u8_f64)
SA_OP(uint8_t, uint8_t, sa_u8_u8)
SA_OP(uint32_t, uint8_t, sa_u8_u32)
SA_OP(int64_t, uint8_t, sa_u8_i64)
| candle/candle-kernels/src/indexing.cu/0 | {
"file_path": "candle/candle-kernels/src/indexing.cu",
"repo_id": "candle",
"token_count": 4314
} | 27 |
#include <metal_stdlib>
using namespace metal;
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
METAL_FUNC uint get_strided_index(
uint idx,
constant size_t &num_dims,
constant size_t *dims,
constant size_t *strides
) {
uint strided_i = 0;
for (uint d = 0; d < num_dims; d++) {
uint dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
constant int THREADGROUP_SIZE = 2048;
#define ARGMIN(NAME, T, MAXVALUE) \
kernel void NAME( \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device uint *dst, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
\
threadgroup T shared_memory[THREADGROUP_SIZE]; \
threadgroup uint shared_indices[THREADGROUP_SIZE]; \
\
shared_memory[tid] = MAXVALUE; \
shared_indices[tid] = 0xFFFFFFFF; \
bool notset = true; \
/* \
// Elements summed in this block range from dst_id * el_to_sum_per_block \
// to (dst_id + 1) * el_to_sum_per_block. \
*/ \
size_t start_idx = dst_id * el_to_sum_per_block; \
size_t stop_idx = start_idx + el_to_sum_per_block; \
size_t idx = start_idx + tid; \
while (idx < stop_idx) { \
/* \
// TODO: Fast version for the contiguous case. \
*/ \
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
if (notset || src[strided_i] < shared_memory[tid]) { \
shared_memory[tid] = src[strided_i]; \
/* Assume that the reduction takes place over the last dimension which is contiguous. */ \
shared_indices[tid] = idx % dims[num_dims - 1]; \
notset = false; \
} \
idx += block_dim; \
} \
\
threadgroup_barrier(mem_flags::mem_none); \
\
/* \
// reduction in shared memory \
*/ \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s && shared_memory[tid + s] < shared_memory[tid]) { \
shared_indices[tid] = shared_indices[tid + s]; \
shared_memory[tid] = shared_memory[tid + s]; \
} \
threadgroup_barrier(mem_flags::mem_none); \
} \
\
if (tid == 0){ \
dst[dst_id] = shared_indices[0]; \
} \
} \
#define ARGMAX(NAME, T, MINVALUE) \
kernel void NAME( \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device uint *dst, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
\
threadgroup T shared_memory[THREADGROUP_SIZE]; \
threadgroup uint shared_indices[THREADGROUP_SIZE]; \
\
shared_memory[tid] = MINVALUE; \
shared_indices[tid] = 0xFFFFFFFF; \
/* \
// Elements summed in this block range from dst_id * el_to_sum_per_block \
// to (dst_id + 1) * el_to_sum_per_block. \
*/ \
size_t start_idx = dst_id * el_to_sum_per_block; \
size_t stop_idx = start_idx + el_to_sum_per_block; \
size_t idx = start_idx + tid; \
bool notset = true; \
while (idx < stop_idx) { \
/* \
// TODO: Fast version for the contiguous case. \
*/ \
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
if (notset || shared_memory[tid] < src[strided_i]) { \
shared_memory[tid] = src[strided_i]; \
shared_indices[tid] = idx % dims[num_dims - 1]; \
notset = false; \
} \
idx += block_dim; \
} \
\
threadgroup_barrier(mem_flags::mem_none); \
\
/* \
// reduction in shared memory \
*/ \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s && shared_memory[tid + s] > shared_memory[tid]) { \
shared_indices[tid] = shared_indices[tid + s]; \
shared_memory[tid] = shared_memory[tid + s]; \
} \
threadgroup_barrier(mem_flags::mem_none); \
} \
\
if (tid == 0){ \
dst[dst_id] = shared_indices[0]; \
} \
} \
#define REDUCE(FN, NAME, T, START) \
kernel void NAME( \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device T *dst, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
\
threadgroup T shared_memory[THREADGROUP_SIZE]; \
\
shared_memory[tid] = START; \
/* \
// Elements summed in this block range from dst_id * el_to_sum_per_block \
// to (dst_id + 1) * el_to_sum_per_block. \
*/ \
size_t start_idx = dst_id * el_to_sum_per_block; \
size_t stop_idx = start_idx + el_to_sum_per_block; \
size_t idx = start_idx + tid; \
while (idx < stop_idx) { \
/* \
// TODO: Fast version for the contiguous case. \
*/ \
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
T x = shared_memory[tid]; \
T y = src[strided_i]; \
shared_memory[tid] = FN; \
idx += block_dim; \
} \
\
threadgroup_barrier(mem_flags::mem_none); \
\
/* \
// reduction in shared memory \
*/ \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s) { \
T x = shared_memory[tid]; \
T y = shared_memory[tid + s]; \
shared_memory[tid] = FN; \
} \
threadgroup_barrier(mem_flags::mem_none); \
} \
\
dst[dst_id] = shared_memory[0]; \
} \
#define SOFTMAX(NAME, T) \
kernel void NAME( \
constant size_t &src_numel, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device T *dst, \
\
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
threadgroup float shared_memory[THREADGROUP_SIZE]; \
shared_memory[tid] = -INFINITY; \
size_t start_idx = dst_id * el_to_sum_per_block; \
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \
size_t idx = start_idx + tid; \
\
\
float tmp = -INFINITY; \
while (idx < stop_idx) { \
tmp = MAX(tmp, float(src[idx])); \
idx += block_dim; \
} \
shared_memory[tid] = tmp; \
\
threadgroup_barrier(mem_flags::mem_threadgroup); \
\
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s) { \
shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]); \
} \
threadgroup_barrier(mem_flags::mem_threadgroup); \
} \
\
/* wait for shared_memory[0] to be filled */ \
threadgroup_barrier(mem_flags::mem_threadgroup); \
\
float _max = shared_memory[0]; \
\
/* prevent tid=0 from overwriting _max before other threads have written it */ \
threadgroup_barrier(mem_flags::mem_threadgroup); \
shared_memory[tid] = 0; \
\
idx = start_idx + tid; \
while (idx < stop_idx) { \
const float val = exp(float(src[idx]) - _max); \
dst[idx] = T(val); \
shared_memory[tid] += val; \
idx += block_dim; \
} \
threadgroup_barrier(mem_flags::mem_threadgroup); \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s) { \
shared_memory[tid] += shared_memory[tid + s]; \
} \
threadgroup_barrier(mem_flags::mem_threadgroup); \
} \
\
const T inv_acc = T(1.0/shared_memory[0]); \
idx = start_idx + tid; \
while (idx < stop_idx) { \
dst[idx] *= inv_acc; \
idx += block_dim; \
} \
} \
REDUCE(x + y, fast_sum_f32_strided, float, 0)
REDUCE(x + y, fast_sum_u32_strided, uint, 0)
REDUCE(x + y, fast_sum_f16_strided, half, 0)
REDUCE(x + y, fast_sum_u8_strided, uint8_t, 0)
REDUCE(x * y, fast_mul_f32_strided, float, 1)
REDUCE(x * y, fast_mul_u32_strided, uint, 1)
REDUCE(x * y, fast_mul_f16_strided, half, 1)
REDUCE(MAX(x, y), fast_max_f32_strided, float, -HUGE_VALF)
REDUCE(MAX(x, y), fast_max_u32_strided, uint, 0)
REDUCE(MAX(x, y), fast_max_f16_strided, half, -HUGE_VALH)
REDUCE(MAX(x, y), fast_max_u8_strided, uint8_t, 0)
REDUCE(MIN(x, y), fast_min_f32_strided, float, HUGE_VALF)
REDUCE(MIN(x, y), fast_min_u32_strided, uint, 0xFFFFFFFF)
REDUCE(MIN(x, y), fast_min_f16_strided, half, HUGE_VALH)
REDUCE(MIN(x, y), fast_min_u8_strided, uint8_t, 0xFF)
ARGMIN(fast_argmin_f32_strided, float, HUGE_VALF)
ARGMIN(fast_argmin_f16_strided, half, HUGE_VALH)
ARGMIN(fast_argmin_u32_strided, uint, 0xFFFFFFFF)
ARGMIN(fast_argmin_u8_strided, uint8_t, 0xFF)
ARGMAX(fast_argmax_f32_strided, float, -HUGE_VALF)
ARGMAX(fast_argmax_f16_strided, half, -HUGE_VALH)
ARGMAX(fast_argmax_u32_strided, uint, 0)
ARGMAX(fast_argmax_u8_strided, uint8_t, 0)
SOFTMAX(softmax_f32, float)
SOFTMAX(softmax_f16, half)
#if __METAL_VERSION__ >= 220
REDUCE(x + y, fast_sum_i64_strided, int64_t, 0)
REDUCE(MIN(x, y), fast_min_i64_strided, int64_t, INT_MAX)
REDUCE(MAX(x, y), fast_max_i64_strided, int64_t, INT_MIN)
ARGMIN(fast_argmin_i64_strided, int64_t, INT_MAX)
ARGMAX(fast_argmax_i64_strided, int64_t, INT_MIN)
#endif
#if defined(__HAVE_BFLOAT__)
REDUCE(x + y, fast_sum_bf16, bfloat, 0)
REDUCE(x * y, fast_mul_bf16, bfloat, 1)
REDUCE(MAX(x, y), fast_max_bf16, bfloat, -HUGE_VALBF)
REDUCE(MIN(x, y), fast_min_bf16, bfloat, HUGE_VALBF)
ARGMIN(fast_argmin_bf16, bfloat, HUGE_VALBF)
ARGMAX(fast_argmax_bf16, bfloat, -HUGE_VALBF)
SOFTMAX(softmax_bf16, bfloat)
#endif
| candle/candle-metal-kernels/src/reduce.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/reduce.metal",
"repo_id": "candle",
"token_count": 8032
} | 28 |
//! Encoding Utilities. (e.g., one-hot/cold encoding)
use candle::{bail, DType, Result, Tensor, WithDType};
/// One-hot/cold encoding.
///
/// Given an input tensor of indices, this function returns a tensor of the same shape as the input
/// tensor with an additional dimension of the given depth size. The values in the returned tensor are
/// all set to the `off_value` except for the positions represented by the indices, which are set to the `on_value`.
///
/// This method returns a tensor with a rank that is one rank larger than the input tensor.
///
/// As an example, the following tensor will be encoded to a one-hot matrix:
///
/// `[[0i64, 2], [1, -1]]`
///
/// with a depth of 4 will be encoded to:
///
/// `[[[1, 0, 0, 0], [0, 0, 1, 0]], [[0, 1, 0, 0], [0, 0, 0, 0]]]`
///
/// When the input tensor index has a value of -1, the corresponding one-hot vector will be ignored,
/// resulting in a vector of values set to the `off_value`.
///
///
/// This method supports one-cold encoding by setting `on_value` to `0` and `off_value` to `1`.
/// By default `on_value` is `1` and `off_value` is `0`.
///
/// Other encoding values can be used by setting `on_value` and `off_value` to the desired values.
///
/// # Examples
///
/// ## One-hot encoding
///
/// ```rust
/// use candle::{Shape, Tensor, Device};
/// use candle_nn::encoding::one_hot;
///
/// let device = candle::Device::Cpu;
///
/// let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device).unwrap();
/// let depth = 4;
/// let one_hot = one_hot(indices, depth, 1f32, 0f32).unwrap();
///
/// let expected_matrix = [
/// [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],
/// [[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
/// ];
///
/// assert_eq!(one_hot.shape(), &Shape::from((2, 2, depth)));
///
/// let matrix = one_hot.to_vec3::<f32>().unwrap();
///
/// assert_eq!(matrix, expected_matrix);
///```
/// ## One-cold Encoding
///
/// ```rust
/// use candle::{Shape, Tensor, Device};
/// use candle_nn::encoding::one_hot;
///
///
/// let device = candle::Device::Cpu;
/// let depth = 4;
/// let indices = Tensor::new(vec![vec![0u8, 2], vec![1, 3]], &device).unwrap();
/// let one_cold = one_hot(indices, depth, 0u8, 1u8).unwrap();
///
/// let expected_matrix = [[[0, 1, 1, 1], [1, 1, 0, 1]], [[1, 0, 1, 1], [1, 1, 1, 0]]];
///
/// assert_eq!(one_cold.shape(), &Shape::from((2, 2, depth)));
///
/// let matrix = one_cold.to_vec3::<u8>().unwrap();
///
/// assert_eq!(matrix, expected_matrix);
/// ```
///
///
/// # Bails
///
/// This method bails if:
/// - One of the index value is less than -1.
/// - One of the index value is greater than or equal to the depth value.
/// - The input data type is not `U8`, `U32`, or `I64`.
///
/// # API Design
///
/// The api design for this method is loosely based on the [TensorFlow One-Hot](https://www.tensorflow.org/api_docs/python/tf/one_hot) method.
pub fn one_hot<D: WithDType>(
indices: Tensor,
depth: usize,
on_value: D,
off_value: D,
) -> Result<Tensor> {
let mut target_shape = indices.dims().to_vec();
target_shape.push(depth);
let indices = indices.flatten_all()?;
let mut out = vec![off_value; depth * indices.elem_count()];
match indices.dtype() {
DType::U8 => {
let indices = indices.to_vec1::<u8>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
DType::U32 => {
let indices = indices.to_vec1::<u32>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
DType::I64 => {
let indices = indices.to_vec1::<i64>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
dtype => {
bail!("one_hot: unsupported data type {dtype:?}, expected U8, U32, or I64")
}
};
Tensor::from_vec(out, target_shape, indices.device())
}
fn set_at_index<D: WithDType, I: Into<i64>>(
value: I,
offset: usize,
depth: usize,
v: &mut Vec<D>,
on_value: D,
) -> Result<()> {
let value = value.into();
// Skip for an entire row of off_values
if value == -1 {
return Ok(());
}
if value < -1 {
bail!(
"one_hot: invalid negative index value {value}, expected a positive index value or -1"
);
}
let value = value as usize;
if value >= depth {
bail!("one_hot: index value {value} exceeds depth {depth}")
}
let idx = offset + value;
if idx >= v.len() {
bail!("one_hot: index out of bounds {idx}, len {}", v.len());
}
v[idx] = on_value;
Ok(())
}
| candle/candle-nn/src/encoding.rs/0 | {
"file_path": "candle/candle-nn/src/encoding.rs",
"repo_id": "candle",
"token_count": 2026
} | 29 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use candle::{test_utils, Device, Tensor};
use candle_nn::{LayerNorm, Module};
#[test]
fn layer_norm() -> Result<()> {
let device = &Device::Cpu;
let w = Tensor::new(&[3f32], device)?;
let b = Tensor::new(&[0.5f32], device)?;
let ln = LayerNorm::new(w, b, 1e-8);
let two = Tensor::new(&[[[2f32]]], device)?;
let res = ln.forward(&two)?.flatten_all()?;
assert_eq!(res.to_vec1::<f32>()?, [0.5f32]);
let inp = Tensor::new(&[[[4f32, 0f32]]], device)?;
let res = ln.forward(&inp)?;
assert_eq!(res.to_vec3::<f32>()?, [[[3.5f32, -2.5]]]);
let inp = Tensor::new(&[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]], device)?;
let res = ln.forward(&inp)?;
assert_eq!(
test_utils::to_vec3_round(&res, 4)?,
[[
[-3.1742, 0.5, 4.1742],
[-3.1742, 0.5, 4.1742],
[4.1742, 0.5, -3.1742]
]]
);
let mean = (res.sum_keepdim(2)? / 3.0)?;
// The average value should be `b`.
assert_eq!(mean.to_vec3::<f32>()?, [[[0.5], [0.5], [0.5]]]);
let std = (res.broadcast_sub(&mean)?.sqr()?.sum_keepdim(2)?.sqrt()? / 3.0)?;
// The standard deviation should be sqrt(`w`).
assert_eq!(
test_utils::to_vec3_round(&std, 4)?,
[[[1.7321], [1.7321], [1.7321]]]
);
Ok(())
}
| candle/candle-nn/tests/layer_norm.rs/0 | {
"file_path": "candle/candle-nn/tests/layer_norm.rs",
"repo_id": "candle",
"token_count": 733
} | 30 |
# Generated content DO NOT EDIT
from .. import onnx
ONNXModel = onnx.ONNXModel
ONNXTensorDescription = onnx.ONNXTensorDescription
| candle/candle-pyo3/py_src/candle/onnx/__init__.py/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/onnx/__init__.py",
"repo_id": "candle",
"token_count": 46
} | 31 |
import candle
from candle import Tensor
from candle.nn import Linear
def test_linear_layer_can_be_constructed():
linear = Linear(10, 10)
assert linear is not None
def test_linear_layer_can_forward_a_singular_input():
linear = Linear(384, 1536)
input_tensor = candle.randn((8, 384))
output = linear.forward(input_tensor)
assert output.shape == (8, 1536)
def test_linear_layer_can_forward_a_batched_input():
linear = Linear(384, 1536)
input_tensor = candle.randn((16, 8, 384))
output = linear.forward(input_tensor)
assert output.shape == (16, 8, 1536)
def test_quantized_linear_layer_can_forward_a_singular_input():
linear = Linear(384, 1536)
linear.weight = linear.weight.quantize("q4_0")
input_tensor = candle.randn((8, 384))
output = linear.forward(input_tensor)
assert output.shape == (8, 1536)
def test_quantized_linear_layer_can_forward_a_batched_input():
linear = Linear(384, 1536)
linear.weight = linear.weight.quantize("q4_0")
input_tensor = candle.randn((16, 8, 384))
output = linear.forward(input_tensor)
assert output.shape == (16, 8, 1536)
| candle/candle-pyo3/tests/bindings/test_linear.py/0 | {
"file_path": "candle/candle-pyo3/tests/bindings/test_linear.py",
"repo_id": "candle",
"token_count": 431
} | 32 |
use super::with_tracing::{layer_norm, linear, LayerNorm, Linear};
use candle::{DType, Device, Result, Tensor};
use candle_nn::{Embedding, Module, VarBuilder};
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
enum HiddenAct {
Gelu,
Relu,
}
struct HiddenActLayer {
act: HiddenAct,
span: tracing::Span,
}
impl HiddenActLayer {
fn new(act: HiddenAct) -> Self {
let span = tracing::span!(tracing::Level::TRACE, "hidden-act");
Self { act, span }
}
}
impl Module for HiddenActLayer {
fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> {
let _enter = self.span.enter();
match self.act {
// https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213
HiddenAct::Gelu => xs.gelu(),
HiddenAct::Relu => xs.relu(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
enum PositionEmbeddingType {
#[default]
Absolute,
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
vocab_size: usize,
dim: usize,
n_layers: usize,
n_heads: usize,
hidden_dim: usize,
activation: HiddenAct,
max_position_embeddings: usize,
initializer_range: f64,
pad_token_id: usize,
#[serde(default)]
position_embedding_type: PositionEmbeddingType,
#[serde(default)]
use_cache: bool,
model_type: Option<String>,
}
impl Default for Config {
fn default() -> Self {
Self {
vocab_size: 30522,
dim: 768,
n_layers: 12,
n_heads: 12,
hidden_dim: 3072,
activation: HiddenAct::Gelu,
max_position_embeddings: 512,
initializer_range: 0.02,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Absolute,
use_cache: true,
model_type: Some("distilbert".to_string()),
}
}
}
struct Embeddings {
word_embeddings: Embedding,
position_embeddings: Embedding,
layer_norm: LayerNorm,
span: tracing::Span,
}
impl Embeddings {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let word_embeddings =
candle_nn::embedding(config.vocab_size, config.dim, vb.pp("word_embeddings"))?;
let position_embeddings = candle_nn::embedding(
config.max_position_embeddings,
config.dim,
vb.pp("position_embeddings"),
)?;
let layer_norm = layer_norm(config.dim, 1e-12, vb.pp("LayerNorm"))?;
Ok(Self {
word_embeddings,
position_embeddings,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "embeddings"),
})
}
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_bsize, seq_len) = input_ids.dims2()?;
let input_embeddings = self.word_embeddings.forward(input_ids)?;
let position_ids = (0..seq_len as u32).collect::<Vec<_>>();
let position_ids = Tensor::new(&position_ids[..], input_ids.device())?;
let embeddings =
input_embeddings.broadcast_add(&self.position_embeddings.forward(&position_ids)?)?;
let embeddings = self.layer_norm.forward(&embeddings)?;
Ok(embeddings)
}
}
struct MultiHeadSelfAttention {
q_lin: Linear,
k_lin: Linear,
v_lin: Linear,
out_lin: Linear,
n_heads: usize,
attention_head_size: usize,
span: tracing::Span,
}
impl MultiHeadSelfAttention {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention_head_size = config.dim / config.n_heads;
let all_head_size = config.n_heads * attention_head_size;
let dim = config.dim;
let q_lin = linear(dim, all_head_size, vb.pp("q_lin"))?;
let v_lin = linear(dim, all_head_size, vb.pp("v_lin"))?;
let k_lin = linear(dim, all_head_size, vb.pp("k_lin"))?;
let out_lin = linear(all_head_size, dim, vb.pp("out_lin"))?;
Ok(Self {
q_lin,
k_lin,
v_lin,
out_lin,
n_heads: config.n_heads,
attention_head_size,
span: tracing::span!(tracing::Level::TRACE, "attention"),
})
}
}
impl MultiHeadSelfAttention {
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (bs, q_length, _dim) = hidden_states.dims3()?;
let dim_per_head = self.attention_head_size;
let q = self.q_lin.forward(hidden_states)?;
let k = self.k_lin.forward(hidden_states)?;
let v = self.v_lin.forward(hidden_states)?;
let q = q
.reshape((bs, q_length, self.n_heads, dim_per_head))?
.transpose(1, 2)?;
let k = k
.reshape((bs, q_length, self.n_heads, dim_per_head))?
.transpose(1, 2)?;
let v = v
.reshape((bs, q_length, self.n_heads, dim_per_head))?
.transpose(1, 2)?;
let q: Tensor = (q / (dim_per_head as f64).sqrt())?;
let scores = q.matmul(&k.transpose(2, 3)?.contiguous()?)?;
let mask = attention_mask.broadcast_as(scores.shape())?;
let scores = masked_fill(&scores.to_dtype(DType::F32)?, &mask, f32::NEG_INFINITY)?;
let weights = candle_nn::ops::softmax(&scores, candle::D::Minus1)?;
let context = weights.matmul(&v.contiguous()?)?;
let context = context
.transpose(1, 2)?
.reshape((bs, q_length, self.n_heads * dim_per_head))?
.contiguous()?;
let context = self.out_lin.forward(&context)?;
Ok(context)
}
}
#[allow(clippy::upper_case_acronyms)]
struct FFN {
lin1: Linear,
lin2: Linear,
activation: HiddenActLayer,
span: tracing::Span,
}
impl FFN {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let lin1 = linear(config.dim, config.hidden_dim, vb.pp("lin1"))?;
let lin2 = linear(config.hidden_dim, config.dim, vb.pp("lin2"))?;
Ok(Self {
lin1,
lin2,
activation: HiddenActLayer::new(config.activation),
span: tracing::span!(tracing::Level::TRACE, "ffn"),
})
}
}
impl Module for FFN {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
hidden_states
.apply(&self.lin1)?
.apply(&self.activation)?
.apply(&self.lin2)
}
}
struct TransformerBlock {
attention: MultiHeadSelfAttention,
sa_layer_norm: LayerNorm,
ffn: FFN,
output_layer_norm: LayerNorm,
span: tracing::Span,
}
impl TransformerBlock {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention = MultiHeadSelfAttention::load(vb.pp("attention"), config)?;
let sa_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("sa_layer_norm"))?;
let ffn = FFN::load(vb.pp("ffn"), config)?;
let output_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("output_layer_norm"))?;
Ok(Self {
attention,
sa_layer_norm,
ffn,
output_layer_norm,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
}
impl TransformerBlock {
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let sa_output = self.attention.forward(hidden_states, attention_mask)?;
// TODO: Support cross-attention?
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523
// TODO: Support something similar to `apply_chunking_to_forward`?
let sa_output = sa_output.broadcast_add(hidden_states)?;
let sa_output = self.sa_layer_norm.forward(&sa_output)?;
let ffn_output = self.ffn.forward(&sa_output)?;
let ffn_output = (&ffn_output + sa_output)?;
let output = self.output_layer_norm.forward(&ffn_output)?;
Ok(output)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556
struct Transformer {
layers: Vec<TransformerBlock>,
span: tracing::Span,
}
impl Transformer {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let layers = (0..config.n_layers)
.map(|index| TransformerBlock::load(vb.pp(&format!("layer.{index}")), config))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
Ok(Transformer { layers, span })
}
}
impl Transformer {
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut hidden_states = hidden_states.clone();
// Use a loop rather than a fold as it's easier to modify when adding debug/...
for layer in self.layers.iter() {
hidden_states = layer.forward(&hidden_states, attention_mask)?;
}
Ok(hidden_states)
}
}
pub struct DistilBertModel {
embeddings: Embeddings,
transformer: Transformer,
pub device: Device,
span: tracing::Span,
}
impl DistilBertModel {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let (embeddings, transformer) = match (
Embeddings::load(vb.pp("embeddings"), config),
Transformer::load(vb.pp("transformer"), config),
) {
(Ok(embeddings), Ok(encoder)) => (embeddings, encoder),
(Err(err), _) | (_, Err(err)) => {
if let Some(model_type) = &config.model_type {
if let (Ok(embeddings), Ok(encoder)) = (
Embeddings::load(vb.pp(&format!("{model_type}.embeddings")), config),
Transformer::load(vb.pp(&format!("{model_type}.transformer")), config),
) {
(embeddings, encoder)
} else {
return Err(err);
}
} else {
return Err(err);
}
}
};
Ok(Self {
embeddings,
transformer,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
pub fn forward(&self, input_ids: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let embedding_output = self.embeddings.forward(input_ids)?;
let sequence_output = self
.transformer
.forward(&embedding_output, attention_mask)?;
Ok(sequence_output)
}
}
| candle/candle-transformers/src/models/distilbert.rs/0 | {
"file_path": "candle/candle-transformers/src/models/distilbert.rs",
"repo_id": "candle",
"token_count": 5381
} | 33 |
// Adapted from:
// https://github.com/ChaoningZhang/MobileSAM/blob/master/mobile_sam/modeling/tiny_vit_sam.py
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{Conv2dConfig, Module, VarBuilder};
const MBCONV_EXPAND_RATIO: usize = 4;
const MLP_RATIO: usize = 4;
const LOCAL_CONV_SIZE: usize = 3;
const IMG_SIZE: usize = 1024;
const IN_CHANNELS: usize = 3;
#[derive(Debug)]
struct Conv2dBN {
c: candle_nn::Conv2d,
bn: candle_nn::BatchNorm,
span: tracing::Span,
}
impl Conv2dBN {
fn new(in_: usize, out: usize, ks: usize, cfg: Conv2dConfig, vb: VarBuilder) -> Result<Self> {
let c = candle_nn::conv2d_no_bias(in_, out, ks, cfg, vb.pp("c"))?;
let bn = candle_nn::batch_norm(out, 1e-5, vb.pp("bn"))?;
let span = tracing::span!(tracing::Level::TRACE, "conv2d-bn");
Ok(Self { c, bn, span })
}
}
impl Module for Conv2dBN {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.c)?.apply_t(&self.bn, false)
}
}
#[derive(Debug)]
struct PatchEmbed {
conv1: Conv2dBN,
conv2: Conv2dBN,
span: tracing::Span,
}
impl PatchEmbed {
fn new(in_chans: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> {
let cfg = candle_nn::Conv2dConfig {
stride: 2,
padding: 1,
..Default::default()
};
let conv1 = Conv2dBN::new(in_chans, embed_dim / 2, 3, cfg, vb.pp("seq.0"))?;
let conv2 = Conv2dBN::new(embed_dim / 2, embed_dim, 3, cfg, vb.pp("seq.2"))?;
let span = tracing::span!(tracing::Level::TRACE, "patch-embed");
Ok(Self { conv1, conv2, span })
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.conv1)?.gelu()?.apply(&self.conv2)
}
}
#[derive(Debug)]
struct MBConv {
conv1: Conv2dBN,
conv2: Conv2dBN,
conv3: Conv2dBN,
span: tracing::Span,
}
impl MBConv {
fn new(in_: usize, out: usize, expand_ratio: usize, vb: VarBuilder) -> Result<Self> {
let hidden = in_ * expand_ratio;
let cfg2 = candle_nn::Conv2dConfig {
padding: 1,
groups: hidden,
..Default::default()
};
let conv1 = Conv2dBN::new(in_, hidden, 1, Default::default(), vb.pp("conv1"))?;
let conv2 = Conv2dBN::new(hidden, hidden, 3, cfg2, vb.pp("conv2"))?;
let conv3 = Conv2dBN::new(hidden, out, 1, Default::default(), vb.pp("conv3"))?;
let span = tracing::span!(tracing::Level::TRACE, "mb-conv");
Ok(Self {
conv1,
conv2,
conv3,
span,
})
}
}
impl Module for MBConv {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let shortcut = xs;
let xs = xs
.apply(&self.conv1)?
.gelu()?
.apply(&self.conv2)?
.gelu()?
.apply(&self.conv3)?;
(xs + shortcut)?.gelu()
}
}
#[derive(Debug)]
struct PatchMerging {
conv1: Conv2dBN,
conv2: Conv2dBN,
conv3: Conv2dBN,
input_resolution: (usize, usize),
span: tracing::Span,
}
impl PatchMerging {
fn new(
input_resolution: (usize, usize),
dim: usize,
out: usize,
vb: VarBuilder,
) -> Result<Self> {
let stride = if [320, 448, 576].contains(&out) { 1 } else { 2 };
let cfg2 = candle_nn::Conv2dConfig {
padding: 1,
stride,
groups: out,
..Default::default()
};
let conv1 = Conv2dBN::new(dim, out, 1, Default::default(), vb.pp("conv1"))?;
let conv2 = Conv2dBN::new(out, out, 3, cfg2, vb.pp("conv2"))?;
let conv3 = Conv2dBN::new(out, out, 1, Default::default(), vb.pp("conv3"))?;
let span = tracing::span!(tracing::Level::TRACE, "patch-merging");
Ok(Self {
conv1,
conv2,
conv3,
input_resolution,
span,
})
}
}
impl Module for PatchMerging {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = if xs.rank() == 3 {
let (h, w) = self.input_resolution;
let b = xs.dim(0)?;
xs.reshape((b, h, w, ()))?.permute((0, 3, 1, 2))?
} else {
xs.clone()
};
xs.apply(&self.conv1)?
.gelu()?
.apply(&self.conv2)?
.gelu()?
.apply(&self.conv3)?
.flatten_from(2)?
.transpose(1, 2)
}
}
#[derive(Debug)]
struct ConvLayer {
blocks: Vec<MBConv>,
downsample: Option<PatchMerging>,
span: tracing::Span,
}
impl ConvLayer {
fn new(
dim: usize,
out: usize,
input_resolution: (usize, usize),
depth: usize,
downsample: bool,
conv_expand_ratio: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(depth);
for index in 0..depth {
let block = MBConv::new(dim, dim, conv_expand_ratio, vb_b.pp(index))?;
blocks.push(block)
}
let downsample = if downsample {
let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?;
Some(downsample)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "conv-layer");
Ok(Self {
blocks,
downsample,
span,
})
}
}
impl Module for ConvLayer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
match &self.downsample {
None => Ok(xs),
Some(downsample) => downsample.forward(&xs),
}
}
}
#[derive(Debug)]
struct Mlp {
norm: candle_nn::LayerNorm,
fc1: super::Linear,
fc2: super::Linear,
span: tracing::Span,
}
impl Mlp {
fn new(in_: usize, hidden: usize, vb: VarBuilder) -> Result<Self> {
let norm = candle_nn::layer_norm(in_, 1e-5, vb.pp("norm"))?;
let fc1 = super::linear(vb.pp("fc1"), in_, hidden, true)?;
let fc2 = super::linear(vb.pp("fc2"), hidden, in_, true)?;
let span = tracing::span!(tracing::Level::TRACE, "mlp");
Ok(Self {
norm,
fc1,
fc2,
span,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
xs.apply(&self.norm)?
.apply(&self.fc1)?
.gelu()?
.apply(&self.fc2)
}
}
#[derive(Debug)]
struct Attention {
norm: candle_nn::LayerNorm,
qkv: super::Linear,
proj: super::Linear,
ab: Tensor,
key_dim: usize,
num_heads: usize,
d: usize,
dh: usize,
scale: f64,
span: tracing::Span,
span_matmul: tracing::Span,
span_softmax: tracing::Span,
}
impl Attention {
fn new(
dim: usize,
key_dim: usize,
num_heads: usize,
attn_ratio: usize,
resolution: (usize, usize),
vb: VarBuilder,
) -> Result<Self> {
let d = attn_ratio * key_dim;
let dh = d * num_heads;
let nh_kd = key_dim * num_heads;
let h = dh + nh_kd * 2;
let norm = candle_nn::layer_norm(dim, 1e-5, vb.pp("norm"))?;
let qkv = super::linear(vb.pp("qkv"), dim, h, true)?;
let proj = super::linear(vb.pp("proj"), dh, dim, true)?;
let points = (0..resolution.0)
.flat_map(|x| (0..resolution.1).map(move |y| (x as i64, y as i64)))
.collect::<Vec<_>>();
let mut idxs = Vec::with_capacity(points.len() * points.len());
let mut attention_offsets = std::collections::HashMap::new();
for &(x1, y1) in points.iter() {
for &(x2, y2) in points.iter() {
let offset = ((x2 - x1).abs(), (y2 - y1).abs());
let l = attention_offsets.len();
let idx = attention_offsets.entry(offset).or_insert(l);
idxs.push(*idx as u32)
}
}
let attention_biases = vb.get((num_heads, attention_offsets.len()), "attention_biases")?;
let idxs = Tensor::new(idxs, attention_biases.device())?;
let ab =
attention_biases
.index_select(&idxs, 1)?
.reshape(((), points.len(), points.len()))?;
let span = tracing::span!(tracing::Level::TRACE, "attention");
let span_matmul = tracing::span!(tracing::Level::TRACE, "attn-matmul");
let span_softmax = tracing::span!(tracing::Level::TRACE, "attn-sm");
Ok(Self {
norm,
qkv,
proj,
ab,
key_dim,
num_heads,
d,
dh,
scale: 1f64 / (key_dim as f64).sqrt(),
span,
span_matmul,
span_softmax,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, n, _) = xs.dims3()?;
let xs = xs.apply(&self.norm)?;
let qkv = xs.apply(&self.qkv)?.reshape((b, n, self.num_heads, ()))?;
let q = qkv
.narrow(D::Minus1, 0, self.key_dim)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let k = qkv
.narrow(D::Minus1, self.key_dim, self.key_dim)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let v = qkv
.narrow(D::Minus1, 2 * self.key_dim, self.d)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let attn = {
let _enter = self.span_matmul.enter();
(q.matmul(&k.t()?)? * self.scale)?
};
let attn = attn.broadcast_add(&self.ab)?;
let attn = {
let _enter = self.span_softmax.enter();
candle_nn::ops::softmax_last_dim(&attn)?
};
let attn = {
let _enter = self.span_matmul.enter();
attn.matmul(&v)?
};
attn.transpose(1, 2)?
.reshape((b, n, self.dh))?
.apply(&self.proj)
}
}
#[derive(Debug)]
struct TinyViTBlock {
attn: Attention,
local_conv: Conv2dBN,
mlp: Mlp,
window_size: usize,
input_resolution: (usize, usize),
span: tracing::Span,
}
impl TinyViTBlock {
fn new(
dim: usize,
input_resolution: (usize, usize),
num_heads: usize,
window_size: usize,
vb: VarBuilder,
) -> Result<Self> {
let head_dim = dim / num_heads;
let attn = Attention::new(
dim,
head_dim,
num_heads,
1,
(window_size, window_size),
vb.pp("attn"),
)?;
let mlp = Mlp::new(dim, dim * MLP_RATIO, vb.pp("mlp"))?;
let cfg = candle_nn::Conv2dConfig {
padding: LOCAL_CONV_SIZE / 2,
groups: dim,
..Default::default()
};
let local_conv = Conv2dBN::new(dim, dim, LOCAL_CONV_SIZE, cfg, vb.pp("local_conv"))?;
let span = tracing::span!(tracing::Level::TRACE, "attention");
Ok(Self {
attn,
local_conv,
mlp,
window_size,
input_resolution,
span,
})
}
}
impl Module for TinyViTBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (h, w) = self.input_resolution;
let (b, l, c) = xs.dims3()?;
let res_x = xs;
let xs = if h == self.window_size && w == self.window_size {
self.attn.forward(xs)?
} else {
let xs = xs.reshape((b, h, w, c))?;
let pad_b = (self.window_size - h % self.window_size) % self.window_size;
let pad_r = (self.window_size - w % self.window_size) % self.window_size;
let xs = if pad_b > 0 {
xs.pad_with_zeros(1, 0, pad_b)?
} else {
xs
};
let xs = if pad_r > 0 {
xs.pad_with_zeros(2, 0, pad_r)?
} else {
xs
};
let (p_h, p_w) = (h + pad_b, w + pad_r);
let n_h = p_h / self.window_size;
let n_w = p_w / self.window_size;
let xs = xs
.reshape((b, n_h, self.window_size, n_w, self.window_size, c))?
.transpose(2, 3)?
.reshape((b * n_h * n_w, self.window_size * self.window_size, c))?;
let xs = self.attn.forward(&xs)?;
let xs = xs
.reshape((b, n_h, n_w, self.window_size, self.window_size, c))?
.transpose(2, 3)?
.reshape((b, p_h, p_w, c))?;
let xs = if pad_r > 0 {
xs.i((.., .., ..w))?.contiguous()?
} else {
xs
};
let xs = if pad_b > 0 {
xs.i((.., ..h, ..))?.contiguous()?
} else {
xs
};
xs.reshape((b, l, c))?
};
let xs = (xs + res_x)?;
let xs = xs
.transpose(1, 2)?
.reshape((b, c, h, w))?
.apply(&self.local_conv)?
.reshape((b, c, l))?
.transpose(1, 2)?;
&xs + self.mlp.forward(&xs)?
}
}
#[derive(Debug)]
struct BasicLayer {
blocks: Vec<TinyViTBlock>,
downsample: Option<PatchMerging>,
span: tracing::Span,
}
impl BasicLayer {
#[allow(clippy::too_many_arguments)]
fn new(
dim: usize,
input_resolution: (usize, usize),
depth: usize,
num_heads: usize,
window_size: usize,
downsample: bool,
out: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb_b = vb.pp("blocks");
let mut blocks = Vec::with_capacity(depth);
for index in 0..depth {
let block = TinyViTBlock::new(
dim,
input_resolution,
num_heads,
window_size,
vb_b.pp(index),
)?;
blocks.push(block)
}
let downsample = if downsample {
let downsample = PatchMerging::new(input_resolution, dim, out, vb.pp("downsample"))?;
Some(downsample)
} else {
None
};
let span = tracing::span!(tracing::Level::TRACE, "basic-layer");
Ok(Self {
blocks,
downsample,
span,
})
}
}
impl Module for BasicLayer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
match &self.downsample {
None => Ok(xs),
Some(downsample) => downsample.forward(&xs),
}
}
}
#[derive(Debug)]
pub struct TinyViT {
patch_embed: PatchEmbed,
layer0: ConvLayer,
layers: Vec<BasicLayer>,
// norm_head: candle_nn::LayerNorm,
// head: candle_nn::Linear,
neck_conv1: candle_nn::Conv2d,
neck_ln1: super::LayerNorm2d,
neck_conv2: candle_nn::Conv2d,
neck_ln2: super::LayerNorm2d,
span: tracing::Span,
span_neck: tracing::Span,
}
impl TinyViT {
pub fn new(
embed_dims: &[usize],
depths: &[usize],
num_heads: &[usize],
window_sizes: &[usize],
_num_classes: usize,
vb: VarBuilder,
) -> Result<Self> {
let patch_embed = PatchEmbed::new(IN_CHANNELS, embed_dims[0], vb.pp("patch_embed"))?;
let patches_resolution = IMG_SIZE / 4;
let vb_l = vb.pp("layers");
let layer0 = ConvLayer::new(
/* dim */ embed_dims[0],
/* out */ embed_dims[1],
/* input_resolution */ (patches_resolution, patches_resolution),
/* depth */ depths[0],
/* downsample */ true,
/* conv_expand_ratio */ MBCONV_EXPAND_RATIO,
vb_l.pp(0),
)?;
let num_layers = embed_dims.len();
let mut layers = Vec::with_capacity(num_layers - 1);
for i_layer in 1..num_layers {
let patches_resolution = patches_resolution / (1 << usize::min(i_layer, 2));
let layer = BasicLayer::new(
/* dim */ embed_dims[i_layer],
/* input_resolution */ (patches_resolution, patches_resolution),
/* depth */ depths[i_layer],
/* num_heads */ num_heads[i_layer],
/* window_size */ window_sizes[i_layer],
/* downsample */ i_layer < num_layers - 1,
/* out */ embed_dims[usize::min(i_layer + 1, num_layers - 1)],
vb_l.pp(i_layer),
)?;
layers.push(layer)
}
let last_embed_dim = embed_dims[embed_dims.len() - 1];
// let norm_head = candle_nn::layer_norm(last_embed_dim, 1e-5, vb.pp("norm_head"))?;
// let head = candle_nn::linear(last_embed_dim, num_classes, vb.pp("head"))?;
let neck_conv1 =
candle_nn::conv2d_no_bias(last_embed_dim, 256, 1, Default::default(), vb.pp("neck.0"))?;
let neck_ln1 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.1"))?;
let cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let neck_conv2 = candle_nn::conv2d_no_bias(256, 256, 3, cfg, vb.pp("neck.2"))?;
let neck_ln2 = super::LayerNorm2d::new(256, 1e-6, vb.pp("neck.3"))?;
let span = tracing::span!(tracing::Level::TRACE, "tiny-vit");
let span_neck = tracing::span!(tracing::Level::TRACE, "neck");
Ok(Self {
patch_embed,
layer0,
layers,
neck_conv1,
neck_ln1,
neck_conv2,
neck_ln2,
span,
span_neck,
})
}
}
impl Module for TinyViT {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.patch_embed.forward(xs)?;
let mut xs = self.layer0.forward(&xs)?;
for layer in self.layers.iter() {
xs = layer.forward(&xs)?
}
let (b, _, c) = xs.dims3()?;
let _enter = self.span_neck.enter();
xs.reshape((b, 64, 64, c))?
.permute((0, 3, 1, 2))?
.apply(&self.neck_conv1)?
.apply(&self.neck_ln1)?
.apply(&self.neck_conv2)?
.apply(&self.neck_ln2)
}
}
pub fn tiny_vit_5m(vb: VarBuilder) -> Result<TinyViT> {
TinyViT::new(
/* embed_dims */ &[64, 128, 160, 320],
/* depths */ &[2, 2, 6, 2],
/* num_heads */ &[2, 4, 5, 10],
/* window_sizes */ &[7, 7, 14, 7],
/* num_classes */ 1000,
vb,
)
}
| candle/candle-transformers/src/models/segment_anything/tiny_vit.rs/0 | {
"file_path": "candle/candle-transformers/src/models/segment_anything/tiny_vit.rs",
"repo_id": "candle",
"token_count": 10372
} | 34 |
// T5 Text Model
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py
use crate::models::with_tracing::{linear_no_bias, Embedding, Linear};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use serde::Deserialize;
use std::sync::Arc;
fn default_relative_attention_max_distance() -> usize {
128
}
fn default_is_decoder() -> bool {
false
}
fn default_use_cache() -> bool {
true
}
fn default_tie_word_embeddings() -> bool {
true
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Deserialize, Default, Clone, PartialEq)]
pub struct ActivationWithOptionalGating {
pub gated: bool,
pub activation: candle_nn::Activation,
}
pub fn deserialize_feed_forward_proj_activation<'de, D>(
deserializer: D,
) -> std::result::Result<ActivationWithOptionalGating, D::Error>
where
D: serde::de::Deserializer<'de>,
{
match String::deserialize(deserializer)?.as_str() {
"gated-gelu" => Ok(ActivationWithOptionalGating {
gated: true,
activation: candle_nn::Activation::NewGelu,
}),
"gated-silu" => Ok(ActivationWithOptionalGating {
gated: true,
activation: candle_nn::Activation::Silu,
}),
buf => {
let activation = serde_plain::from_str(buf).map_err(serde::de::Error::custom)?;
Ok(ActivationWithOptionalGating {
gated: false,
activation,
})
}
}
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
vocab_size: usize,
d_model: usize,
d_kv: usize,
d_ff: usize,
num_layers: usize,
num_decoder_layers: Option<usize>,
num_heads: usize,
relative_attention_num_buckets: usize,
#[serde(default = "default_relative_attention_max_distance")]
relative_attention_max_distance: usize,
dropout_rate: f64,
layer_norm_epsilon: f64,
initializer_factor: f64,
#[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")]
feed_forward_proj: ActivationWithOptionalGating,
#[serde(default = "default_tie_word_embeddings")]
tie_word_embeddings: bool,
#[serde(default = "default_is_decoder")]
is_decoder: bool,
is_encoder_decoder: bool,
#[serde(default = "default_use_cache")]
pub use_cache: bool,
pub pad_token_id: usize,
pub eos_token_id: usize,
pub decoder_start_token_id: Option<usize>,
}
impl Default for Config {
fn default() -> Self {
Self {
vocab_size: 32128,
d_model: 512,
d_kv: 64,
d_ff: 2048,
num_layers: 6,
num_decoder_layers: None,
num_heads: 8,
relative_attention_num_buckets: 32,
relative_attention_max_distance: 128,
dropout_rate: 0.1,
layer_norm_epsilon: 1e-6,
initializer_factor: 1.0,
feed_forward_proj: ActivationWithOptionalGating {
gated: false,
activation: Activation::Relu,
},
tie_word_embeddings: true,
is_decoder: false,
is_encoder_decoder: true,
use_cache: true,
pad_token_id: 0,
eos_token_id: 1,
decoder_start_token_id: Some(0),
}
}
}
impl Config {
// https://huggingface.co/facebook/musicgen-small/blob/495da4ad086b3416a27c6187f9239f9fd96f3962/config.json#L184
pub fn musicgen_small() -> Self {
Self {
d_ff: 3072,
d_kv: 64,
d_model: 768,
dropout_rate: 0.1,
eos_token_id: 1,
feed_forward_proj: ActivationWithOptionalGating {
gated: false,
activation: Activation::Relu,
},
tie_word_embeddings: true,
initializer_factor: 1.0,
is_decoder: false,
is_encoder_decoder: true,
layer_norm_epsilon: 1e-6,
num_decoder_layers: Some(12),
num_heads: 12,
num_layers: 12,
pad_token_id: 0,
decoder_start_token_id: Some(0),
relative_attention_max_distance: 128,
relative_attention_num_buckets: 32,
use_cache: true,
vocab_size: 32128,
}
}
}
#[derive(Debug, Clone)]
struct T5LayerNorm {
weight: Tensor,
variance_epsilon: f64,
span: tracing::Span,
}
impl T5LayerNorm {
fn load(h: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(h, "weight")?;
Ok(Self {
weight,
variance_epsilon: eps,
span: tracing::span!(tracing::Level::TRACE, "layer-norm"),
})
}
}
impl Module for T5LayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let dtype = xs.dtype();
let xs_f32 = xs.to_dtype(DType::F32)?;
// variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?;
let xs = xs.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?;
let xs = xs.to_dtype(dtype)?;
let xs = xs.broadcast_mul(&self.weight)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5DenseActDense {
wi: Linear,
wo: Linear,
act: Activation,
span: tracing::Span,
}
impl T5DenseActDense {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wi = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi"))?;
let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?;
Ok(Self {
wi,
wo,
act: Activation::Relu,
span: tracing::span!(tracing::Level::TRACE, "dense-act-dense"),
})
}
}
impl Module for T5DenseActDense {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.wi.forward(xs)?;
let xs = self.act.forward(&xs)?;
let xs = self.wo.forward(&xs)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5DenseGatedActDense {
wi_0: Linear,
wi_1: Linear,
wo: Linear,
act: Activation,
span: tracing::Span,
}
impl T5DenseGatedActDense {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wi_0 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?;
let wi_1 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?;
let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?;
Ok(Self {
wi_0,
wi_1,
wo,
act: cfg.feed_forward_proj.activation,
span: tracing::span!(tracing::Level::TRACE, "dense-gated-act-dense"),
})
}
}
impl Module for T5DenseGatedActDense {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?;
let hidden_linear = self.wi_1.forward(xs)?;
let xs = hidden_gelu.broadcast_mul(&hidden_linear)?;
let xs = self.wo.forward(&xs)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5LayerFF {
dense_act: Option<T5DenseActDense>,
gated_dense_act: Option<T5DenseGatedActDense>,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerFF {
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated {
(
None,
Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?),
)
} else {
(
Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?),
None,
)
};
Ok(Self {
dense_act,
gated_dense_act,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "layer-ff"),
})
}
}
impl Module for T5LayerFF {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let ys = self.layer_norm.forward(xs)?;
let ys = match &self.dense_act {
Some(dense_act) => dense_act.forward(&ys)?,
None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?,
};
let xs = (xs + ys)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct T5Attention {
q: Linear,
k: Linear,
v: Linear,
o: Linear,
n_heads: usize,
d_kv: usize,
relative_attention_bias: Option<Embedding>,
relative_attention_num_buckets: usize,
relative_attention_max_distance: usize,
inner_dim: usize,
use_cache: bool,
kv_cache: Option<(Tensor, Tensor)>,
span: tracing::Span,
span_cache: tracing::Span,
span_mm: tracing::Span,
span_sm: tracing::Span,
}
impl T5Attention {
fn load(
has_relative_attention_bias: bool,
decoder: bool,
vb: VarBuilder,
cfg: &Config,
) -> Result<Self> {
let inner_dim = cfg.num_heads * cfg.d_kv;
let q = linear_no_bias(cfg.d_model, inner_dim, vb.pp("q"))?;
let k = linear_no_bias(cfg.d_model, inner_dim, vb.pp("k"))?;
let v = linear_no_bias(cfg.d_model, inner_dim, vb.pp("v"))?;
let o = linear_no_bias(inner_dim, cfg.d_model, vb.pp("o"))?;
let relative_attention_bias = if has_relative_attention_bias {
let emb = Embedding::new(
cfg.relative_attention_num_buckets,
cfg.num_heads,
vb.pp("relative_attention_bias"),
)?;
Some(emb)
} else {
None
};
Ok(Self {
q,
k,
v,
o,
n_heads: cfg.num_heads,
d_kv: cfg.d_kv,
relative_attention_bias,
relative_attention_num_buckets: cfg.relative_attention_num_buckets,
relative_attention_max_distance: cfg.relative_attention_max_distance,
inner_dim,
use_cache: cfg.use_cache && decoder,
kv_cache: None,
span: tracing::span!(tracing::Level::TRACE, "attention"),
span_cache: tracing::span!(tracing::Level::TRACE, "attention-cache"),
span_mm: tracing::span!(tracing::Level::TRACE, "attention-mm"),
span_sm: tracing::span!(tracing::Level::TRACE, "attention-sm"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
key_value_states: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
// Performs Self-attention (if key_value_states is None) or attention
// over source sentence (provided by key_value_states).
let _enter = self.span.enter();
let kv_input = match key_value_states {
None => xs,
Some(key_value_states) => key_value_states,
};
let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?);
let kv_len = kv_input.dim(1)?;
let q = self.q.forward(xs)?;
let k = self.k.forward(kv_input)?;
let v = self.v.forward(kv_input)?;
let q = q
.reshape((b_sz, q_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?
.contiguous()?;
let mut k = k
.reshape((b_sz, kv_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?;
let mut v = v
.reshape((b_sz, kv_len, self.n_heads, self.d_kv))?
.transpose(1, 2)?;
if self.use_cache && key_value_states.is_none() {
let _enter = self.span_cache.enter();
if let Some((kv_cache_k, kv_cache_v)) = &self.kv_cache {
k = Tensor::cat(&[kv_cache_k, &k], 2)?;
v = Tensor::cat(&[kv_cache_v, &v], 2)?;
};
self.kv_cache = Some((k.clone(), v.clone()));
};
let k = k.contiguous()?;
let v = v.contiguous()?;
// TODO: Use flash_attn.
let scores = {
let _enter = self.span_mm.enter();
q.matmul(&k.t()?)?
};
let scores = match mask {
None => scores,
Some(mask) => masked_fill(
&scores,
&mask
.unsqueeze(0)?
.unsqueeze(0)?
.repeat((b_sz, self.n_heads))?,
f32::NEG_INFINITY,
)?,
};
let (scores, position_bias) = match position_bias {
Some(position_bias) => (
scores.broadcast_add(position_bias)?,
Some(position_bias.clone()),
),
None => match &self.relative_attention_bias {
None => (scores, None),
Some(relative_attention_bias) => {
// This only handles the bidirectional case.
let kv_len = k.dim(2)?;
let (q_start, q_end) = match self.use_cache {
true => ((kv_len - q_len) as u32, kv_len as u32),
false => (0_u32, kv_len as u32),
};
let num_buckets = self.relative_attention_num_buckets as u32 / 2;
let max_exact = num_buckets / 2;
let relative_position = (q_start..q_end)
.map(|i| {
(0..kv_len as u32)
.map(|j| {
if i < j {
if j - i < max_exact {
j - i + num_buckets
} else {
let b = f32::log(
(j - i) as f32 / max_exact as f32,
self.relative_attention_max_distance as f32
/ max_exact as f32,
) * (num_buckets - max_exact) as f32;
u32::min(
max_exact + num_buckets + b as u32,
self.relative_attention_num_buckets as u32 - 1,
)
}
} else if i - j < max_exact {
i - j
} else {
let b = f32::log(
(i - j) as f32 / max_exact as f32,
self.relative_attention_max_distance as f32
/ max_exact as f32,
) * (num_buckets - max_exact) as f32;
u32::min(max_exact + b as u32, num_buckets - 1)
}
})
.collect::<Vec<u32>>()
})
.collect::<Vec<Vec<_>>>();
let relative_buckets = Tensor::new(relative_position, q.device())?;
let position_bias = relative_attention_bias
.forward(&relative_buckets)?
.permute((2, 0, 1))?
.unsqueeze(0)?;
(scores.broadcast_add(&position_bias)?, Some(position_bias))
// TODO: position_bias_masked?
}
},
};
let attn_weights = {
let _enter = self.span_sm.enter();
candle_nn::ops::softmax_last_dim(&scores)?
};
let attn_output = attn_weights.matmul(&v)?;
let attn_output = attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.inner_dim))?;
let attn_output = self.o.forward(&attn_output)?;
Ok((attn_output, position_bias))
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct T5LayerSelfAttention {
self_attention: T5Attention,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerSelfAttention {
fn load(h: bool, d: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> {
let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?;
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
Ok(Self {
self_attention,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
let normed_xs = self.layer_norm.forward(xs)?;
let (ys, position_bias) =
self.self_attention
.forward(&normed_xs, position_bias, None, mask)?;
let ys = (xs + ys)?;
Ok((ys, position_bias))
}
fn clear_kv_cache(&mut self) {
self.self_attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
struct T5LayerCrossAttention {
cross_attention: T5Attention,
layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5LayerCrossAttention {
fn load(decoder: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> {
let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?;
let layer_norm =
T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
Ok(Self {
cross_attention,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "cross-attn"),
})
}
fn forward(
&mut self,
hidden_states: &Tensor,
position_bias: Option<&Tensor>,
key_value_states: &Tensor,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
let normed_hidden_states = self.layer_norm.forward(hidden_states)?;
let (ys, position_bias) = self.cross_attention.forward(
&normed_hidden_states,
position_bias,
Some(key_value_states),
None,
)?;
let ys = (hidden_states + ys)?;
Ok((ys, position_bias))
}
fn clear_kv_cache(&mut self) {
self.cross_attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
struct T5Block {
self_attn: T5LayerSelfAttention,
cross_attn: Option<T5LayerCrossAttention>,
ff: T5LayerFF,
span: tracing::Span,
}
impl T5Block {
fn load(
has_relative_attention_bias: bool,
decoder: bool,
vb: VarBuilder,
cfg: &Config,
) -> Result<Self> {
let vb = vb.pp("layer");
let self_attn =
T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?;
let cross_attn = if cfg.is_decoder {
Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?)
} else {
None
};
let ff_i = if cross_attn.is_some() { 2 } else { 1 };
let ff = T5LayerFF::load(vb.pp(&ff_i.to_string()), cfg)?;
Ok(Self {
self_attn,
cross_attn,
ff,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(
&mut self,
xs: &Tensor,
position_bias: Option<&Tensor>,
encoder_hidden_states: Option<&Tensor>,
) -> Result<(Tensor, Option<Tensor>)> {
let _enter = self.span.enter();
// TODO: Cache masks
let mask = match self.cross_attn.is_some() {
true => {
let mask_len = xs.dim(1)?;
// If the input seq length is 1, no need for a mask, this is also helpful to avoid shape
// issues when using the KV cache in the decoder.
if mask_len <= 1 {
None
} else {
Some(get_mask(mask_len, xs.device())?)
}
}
false => None,
};
let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?;
// TODO: clamp for f16?
if let Some(cross_attn) = &mut self.cross_attn {
(xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?;
// TODO: clamp for f16?
}
let xs = self.ff.forward(&xs)?;
// TODO: clamp for f16?
Ok((xs, position_bias))
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
self.cross_attn.iter_mut().for_each(|c| c.clear_kv_cache());
}
}
#[derive(Debug, Clone)]
struct T5Stack {
block: Vec<T5Block>,
shared: Arc<Embedding>,
final_layer_norm: T5LayerNorm,
span: tracing::Span,
}
impl T5Stack {
fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> {
let block = (0..cfg.num_layers)
.map(|i| T5Block::load(i == 0, decoder, vb.pp(&format!("block.{i}")), cfg))
.collect::<Result<Vec<_>>>()?;
let final_layer_norm = T5LayerNorm::load(
cfg.d_model,
cfg.layer_norm_epsilon,
vb.pp("final_layer_norm"),
)?;
Ok(Self {
block,
shared: shared.clone(),
final_layer_norm,
span: tracing::span!(tracing::Level::TRACE, "stack"),
})
}
fn forward(
&mut self,
input_ids: &Tensor,
encoder_hidden_states: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let input_embeds = self.shared.as_ref().forward(input_ids)?;
let mut hidden_states = input_embeds;
let mut position_bias = None;
for block in self.block.iter_mut() {
(hidden_states, position_bias) = block.forward(
&hidden_states,
position_bias.as_ref(),
encoder_hidden_states,
)?
}
self.final_layer_norm.forward(&hidden_states)
}
fn clear_kv_cache(&mut self) {
self.block.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
#[derive(Debug, Clone)]
pub struct T5EncoderModel {
encoder: T5Stack,
device: Device,
span: tracing::Span,
}
impl T5EncoderModel {
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let shared_vb = if vb.contains_tensor("shared.weight") {
vb.pp("shared")
} else {
vb.pp("decoder").pp("embed_tokens")
};
let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?;
let shared = Arc::new(shared);
let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg)?;
Ok(Self {
encoder,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "encoder"),
})
}
pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.encoder.forward(input_ids, None)
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn clear_kv_cache(&mut self) {
self.encoder.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct T5ForConditionalGeneration {
encoder: T5Stack,
decoder: T5Stack,
d_model: usize,
tie_word_embeddings: bool,
lm_head: Option<Linear>,
shared: Arc<Embedding>,
device: Device,
span_decode: tracing::Span,
span_decode_head: tracing::Span,
}
impl T5ForConditionalGeneration {
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
assert!(cfg.is_encoder_decoder);
let d_model = cfg.d_model;
let shared_vb = if vb.contains_tensor("shared.weight") {
vb.pp("shared")
} else {
vb.pp("decoder").pp("embed_tokens")
};
let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?;
let shared = Arc::new(shared);
let mut encoder_cfg = cfg.clone();
encoder_cfg.is_decoder = false;
encoder_cfg.use_cache = false;
encoder_cfg.is_encoder_decoder = false;
let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, &encoder_cfg)?;
let mut decoder_cfg = cfg.clone();
decoder_cfg.is_decoder = true;
decoder_cfg.is_encoder_decoder = false;
decoder_cfg.num_layers = cfg.num_decoder_layers.unwrap_or(cfg.num_layers);
let decoder = T5Stack::load(true, vb.pp("decoder"), &shared, &decoder_cfg)?;
let tie_word_embeddings = cfg.tie_word_embeddings;
let lm_head = if tie_word_embeddings {
None
} else {
Some(linear_no_bias(
cfg.d_model,
cfg.vocab_size,
vb.pp("lm_head"),
)?)
};
Ok(Self {
encoder,
decoder,
d_model,
tie_word_embeddings,
lm_head,
shared,
device: vb.device().clone(),
span_decode: tracing::span!(tracing::Level::TRACE, "decode"),
span_decode_head: tracing::span!(tracing::Level::TRACE, "decode-head"),
})
}
pub fn encode(&mut self, input_ids: &Tensor) -> Result<Tensor> {
self.encoder.forward(input_ids, None)
}
pub fn decode(
&mut self,
decoder_input_ids: &Tensor,
encoder_output: &Tensor,
) -> Result<Tensor> {
let _enter = self.span_decode.enter();
let decoder_output = self
.decoder
.forward(decoder_input_ids, Some(encoder_output))?;
let scaling_factor = if self.tie_word_embeddings {
// Rescale output before projecting on vocab
// See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
(self.d_model as f64).sqrt()
} else {
1.0
};
let sequence_output = ((decoder_output
.narrow(1, decoder_output.dim(1)? - 1, 1)?
.squeeze(1)?)
* scaling_factor)?;
let output = {
let _enter = self.span_decode_head.enter();
match self.lm_head {
None => sequence_output.matmul(&self.shared.embeddings().t()?)?,
Some(ref lm_head) => lm_head.forward(&sequence_output)?,
}
};
Ok(output)
}
pub fn forward(&mut self, input_ids: &Tensor, decoder_input_ids: &Tensor) -> Result<Tensor> {
let encoder_output = self.encode(input_ids)?;
self.decode(decoder_input_ids, &encoder_output)
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn clear_kv_cache(&mut self) {
self.encoder.clear_kv_cache();
self.decoder.clear_kv_cache();
}
}
| candle/candle-transformers/src/models/t5.rs/0 | {
"file_path": "candle/candle-transformers/src/models/t5.rs",
"repo_id": "candle",
"token_count": 15016
} | 35 |
/// https://huggingface.co/01-ai/Yi-6B/blob/main/modeling_yi.py
use crate::models::with_tracing::{linear_no_bias, Linear};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) hidden_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) num_key_value_heads: usize,
pub(crate) hidden_act: Activation,
pub(crate) max_position_embeddings: usize,
pub(crate) rms_norm_eps: f64,
pub(crate) rope_theta: f64,
}
impl Config {
pub fn config_6b() -> Self {
Self {
vocab_size: 64000,
hidden_size: 4096,
intermediate_size: 11008,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 4,
hidden_act: Activation::Silu,
max_position_embeddings: 4096,
rms_norm_eps: 1e-5,
rope_theta: 5_000_000.,
}
}
pub fn config_34b() -> Self {
Self {
vocab_size: 64000,
hidden_size: 7168,
intermediate_size: 20480,
num_hidden_layers: 60,
num_attention_heads: 56,
num_key_value_heads: 8,
hidden_act: Activation::Silu,
max_position_embeddings: 4096,
rms_norm_eps: 1e-5,
rope_theta: 5_000_000.,
}
}
}
#[derive(Debug, Clone)]
struct RmsNorm {
inner: candle_nn::RmsNorm,
span: tracing::Span,
}
impl RmsNorm {
fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "rms-norm");
let inner = candle_nn::rms_norm(size, eps, vb)?;
Ok(Self { inner, span })
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(x)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> {
let n_rep = self.num_kv_groups;
if n_rep == 1 {
Ok(xs)
} else {
let (b_sz, num_kv_heads, seq_len, head_dim) = xs.dims4()?;
xs.unsqueeze(2)?
.expand((b_sz, num_kv_heads, n_rep, seq_len, head_dim))?
.reshape((b_sz, num_kv_heads * n_rep, seq_len, head_dim))
}
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = self.repeat_kv(key_states)?;
let value_states = self.repeat_kv(value_states)?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
ln1: RmsNorm,
ln2: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let ln2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
ln1,
ln2,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.ln1.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.ln2)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| candle/candle-transformers/src/models/yi.rs/0 | {
"file_path": "candle/candle-transformers/src/models/yi.rs",
"repo_id": "candle",
"token_count": 6687
} | 36 |
use candle::{Device, Tensor};
use candle_transformers::generation::LogitsProcessor;
use candle_wasm_example_llama2::worker::{Model as M, ModelData};
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
pub struct Model {
inner: M,
logits_processor: LogitsProcessor,
tokens: Vec<u32>,
repeat_penalty: f32,
}
impl Model {
fn process(&mut self, tokens: &[u32]) -> candle::Result<String> {
const REPEAT_LAST_N: usize = 64;
let dev = Device::Cpu;
let input = Tensor::new(tokens, &dev)?.unsqueeze(0)?;
let logits = self.inner.llama.forward(&input, tokens.len())?;
let logits = logits.squeeze(0)?;
let logits = if self.repeat_penalty == 1. || tokens.is_empty() {
logits
} else {
let start_at = self.tokens.len().saturating_sub(REPEAT_LAST_N);
candle_transformers::utils::apply_repeat_penalty(
&logits,
self.repeat_penalty,
&self.tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
self.tokens.push(next_token);
let text = match self.inner.tokenizer.id_to_token(next_token) {
Some(text) => text.replace('▁', " ").replace("<0x0A>", "\n"),
None => "".to_string(),
};
Ok(text)
}
}
#[wasm_bindgen]
impl Model {
#[wasm_bindgen(constructor)]
pub fn new(weights: Vec<u8>, tokenizer: Vec<u8>) -> Result<Model, JsError> {
let model = M::load(ModelData {
tokenizer,
model: weights,
});
let logits_processor = LogitsProcessor::new(299792458, None, None);
match model {
Ok(inner) => Ok(Self {
inner,
logits_processor,
tokens: vec![],
repeat_penalty: 1.,
}),
Err(e) => Err(JsError::new(&e.to_string())),
}
}
#[wasm_bindgen]
pub fn get_seq_len(&mut self) -> usize {
self.inner.config.seq_len
}
#[wasm_bindgen]
pub fn init_with_prompt(
&mut self,
prompt: String,
temp: f64,
top_p: f64,
repeat_penalty: f32,
seed: u64,
) -> Result<String, JsError> {
// First reset the cache.
{
let mut cache = self.inner.cache.kvs.lock().unwrap();
for elem in cache.iter_mut() {
*elem = None
}
}
let temp = if temp <= 0. { None } else { Some(temp) };
let top_p = if top_p <= 0. || top_p >= 1. {
None
} else {
Some(top_p)
};
self.logits_processor = LogitsProcessor::new(seed, temp, top_p);
self.repeat_penalty = repeat_penalty;
self.tokens.clear();
let tokens = self
.inner
.tokenizer
.encode(prompt, true)
.map_err(|m| JsError::new(&m.to_string()))?
.get_ids()
.to_vec();
let text = self
.process(&tokens)
.map_err(|m| JsError::new(&m.to_string()))?;
Ok(text)
}
#[wasm_bindgen]
pub fn next_token(&mut self) -> Result<String, JsError> {
let last_token = *self.tokens.last().unwrap();
let text = self
.process(&[last_token])
.map_err(|m| JsError::new(&m.to_string()))?;
Ok(text)
}
}
fn main() {}
| candle/candle-wasm-examples/llama2-c/src/bin/m.rs/0 | {
"file_path": "candle/candle-wasm-examples/llama2-c/src/bin/m.rs",
"repo_id": "candle",
"token_count": 1807
} | 37 |
//load the candle SAM Model wasm module
import init, { Model } from "./build/m.js";
async function fetchArrayBuffer(url, cacheModel = true) {
if (!cacheModel)
return new Uint8Array(await (await fetch(url)).arrayBuffer());
const cacheName = "sam-candle-cache";
const cache = await caches.open(cacheName);
const cachedResponse = await cache.match(url);
if (cachedResponse) {
const data = await cachedResponse.arrayBuffer();
return new Uint8Array(data);
}
const res = await fetch(url, { cache: "force-cache" });
cache.put(url, res.clone());
return new Uint8Array(await res.arrayBuffer());
}
class SAMModel {
static instance = {};
// keep current image embeddings state
static imageArrayHash = {};
// Add a new property to hold the current modelID
static currentModelID = null;
static async getInstance(modelURL, modelID) {
if (!this.instance[modelID]) {
await init();
self.postMessage({
status: "loading",
message: `Loading Model ${modelID}`,
});
const weightsArrayU8 = await fetchArrayBuffer(modelURL);
this.instance[modelID] = new Model(
weightsArrayU8,
/tiny|mobile/.test(modelID)
);
} else {
self.postMessage({ status: "loading", message: "Model Already Loaded" });
}
// Set the current modelID to the modelID that was passed in
this.currentModelID = modelID;
return this.instance[modelID];
}
// Remove the modelID parameter from setImageEmbeddings
static setImageEmbeddings(imageArrayU8) {
// check if image embeddings are already set for this image and model
const imageArrayHash = this.getSimpleHash(imageArrayU8);
if (
this.imageArrayHash[this.currentModelID] === imageArrayHash &&
this.instance[this.currentModelID]
) {
self.postMessage({
status: "embedding",
message: "Embeddings Already Set",
});
return;
}
this.imageArrayHash[this.currentModelID] = imageArrayHash;
this.instance[this.currentModelID].set_image_embeddings(imageArrayU8);
self.postMessage({ status: "embedding", message: "Embeddings Set" });
}
static getSimpleHash(imageArrayU8) {
// get simple hash of imageArrayU8
let imageArrayHash = 0;
for (let i = 0; i < imageArrayU8.length; i += 100) {
imageArrayHash ^= imageArrayU8[i];
}
return imageArrayHash.toString(16);
}
}
async function createImageCanvas(
{ mask_shape, mask_data }, // mask
{ original_width, original_height, width, height } // original image
) {
const [_, __, shape_width, shape_height] = mask_shape;
const maskCanvas = new OffscreenCanvas(shape_width, shape_height); // canvas for mask
const maskCtx = maskCanvas.getContext("2d");
const canvas = new OffscreenCanvas(original_width, original_height); // canvas for creating mask with original image size
const ctx = canvas.getContext("2d");
const imageData = maskCtx.createImageData(
maskCanvas.width,
maskCanvas.height
);
const data = imageData.data;
for (let p = 0; p < data.length; p += 4) {
data[p] = 0;
data[p + 1] = 0;
data[p + 2] = 0;
data[p + 3] = mask_data[p / 4] * 255;
}
maskCtx.putImageData(imageData, 0, 0);
let sx, sy;
if (original_height < original_width) {
sy = original_height / original_width;
sx = 1;
} else {
sy = 1;
sx = original_width / original_height;
}
ctx.drawImage(
maskCanvas,
0,
0,
maskCanvas.width * sx,
maskCanvas.height * sy,
0,
0,
original_width,
original_height
);
const blob = await canvas.convertToBlob();
return URL.createObjectURL(blob);
}
self.addEventListener("message", async (event) => {
const { modelURL, modelID, imageURL, points } = event.data;
try {
self.postMessage({ status: "loading", message: "Starting SAM" });
const sam = await SAMModel.getInstance(modelURL, modelID);
self.postMessage({ status: "loading", message: "Loading Image" });
const imageArrayU8 = await fetchArrayBuffer(imageURL, false);
self.postMessage({ status: "embedding", message: "Creating Embeddings" });
SAMModel.setImageEmbeddings(imageArrayU8);
if (!points) {
// no points only do the embeddings
self.postMessage({
status: "complete-embedding",
message: "Embeddings Complete",
});
return;
}
self.postMessage({ status: "segmenting", message: "Segmenting" });
const { mask, image } = sam.mask_for_point({ points });
const maskDataURL = await createImageCanvas(mask, image);
// Send the segment back to the main thread as JSON
self.postMessage({
status: "complete",
message: "Segmentation Complete",
output: { maskURL: maskDataURL },
});
} catch (e) {
self.postMessage({ error: e });
}
});
| candle/candle-wasm-examples/segment-anything/samWorker.js/0 | {
"file_path": "candle/candle-wasm-examples/segment-anything/samWorker.js",
"repo_id": "candle",
"token_count": 1747
} | 38 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Welcome to Candle!</title>
<link data-trunk rel="copy-file" href="mel_filters.safetensors" />
<!-- samples -->
<link data-trunk rel="copy-dir" href="audios" />
<!-- tiny.en -->
<link data-trunk rel="copy-dir" href="whisper-tiny.en" />
<!-- tiny -->
<link data-trunk rel="copy-dir" href="whisper-tiny" />
<!-- quantized -->
<link data-trunk rel="copy-dir" href="quantized" />
<link
data-trunk
rel="rust"
href="Cargo.toml"
data-bin="app"
data-type="main" />
<link
data-trunk
rel="rust"
href="Cargo.toml"
data-bin="worker"
data-type="worker" />
<link
rel="stylesheet"
href="https://fonts.googleapis.com/css?family=Roboto:300,300italic,700,700italic" />
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.css" />
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.4.1/milligram.css" />
</head>
<body></body>
</html>
| candle/candle-wasm-examples/whisper/index.html/0 | {
"file_path": "candle/candle-wasm-examples/whisper/index.html",
"repo_id": "candle",
"token_count": 523
} | 39 |
<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="Content-Type" />
<title>Candle YOLOv8 Rust/WASM</title>
</head>
<body></body>
</html>
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<style>
@import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap");
html,
body {
font-family: "Source Sans 3", sans-serif;
}
code,
output,
select,
pre {
font-family: "Source Code Pro", monospace;
}
</style>
<script src="https://cdn.tailwindcss.com"></script>
<script
src="https://cdn.jsdelivr.net/gh/huggingface/hub-js-utils/share-canvas.js"
type="module"
></script>
<script type="module">
const MODEL_BASEURL =
"https://huggingface.co/lmz/candle-yolo-v8/resolve/main/";
const MODELS = {
yolov8n: {
model_size: "n",
url: "yolov8n.safetensors",
},
yolov8s: {
model_size: "s",
url: "yolov8s.safetensors",
},
yolov8m: {
model_size: "m",
url: "yolov8m.safetensors",
},
yolov8l: {
model_size: "l",
url: "yolov8l.safetensors",
},
yolov8x: {
model_size: "x",
url: "yolov8x.safetensors",
},
yolov8n_pose: {
model_size: "n",
url: "yolov8n-pose.safetensors",
},
yolov8s_pose: {
model_size: "s",
url: "yolov8s-pose.safetensors",
},
yolov8m_pose: {
model_size: "m",
url: "yolov8m-pose.safetensors",
},
yolov8l_pose: {
model_size: "l",
url: "yolov8l-pose.safetensors",
},
yolov8x_pose: {
model_size: "x",
url: "yolov8x-pose.safetensors",
},
};
const COCO_PERSON_SKELETON = [
[4, 0], // head
[3, 0],
[16, 14], // left lower leg
[14, 12], // left upper leg
[6, 12], // left torso
[6, 5], // top torso
[6, 8], // upper arm
[8, 10], // lower arm
[1, 2], // head
[1, 3], // right head
[2, 4], // left head
[3, 5], // right neck
[4, 6], // left neck
[5, 7], // right upper arm
[7, 9], // right lower arm
[5, 11], // right torso
[11, 12], // bottom torso
[11, 13], // right upper leg
[13, 15], // right lower leg
];
// init web worker
const yoloWorker = new Worker("./yoloWorker.js", { type: "module" });
let hasImage = false;
//add event listener to image examples
document.querySelector("#image-select").addEventListener("click", (e) => {
const target = e.target;
if (target.nodeName === "IMG") {
const href = target.src;
drawImageCanvas(href);
}
});
//add event listener to file input
document.querySelector("#file-upload").addEventListener("change", (e) => {
const target = e.target;
if (target.files.length > 0) {
const href = URL.createObjectURL(target.files[0]);
drawImageCanvas(href);
}
});
// add event listener to drop-area
const dropArea = document.querySelector("#drop-area");
dropArea.addEventListener("dragenter", (e) => {
e.preventDefault();
dropArea.classList.add("border-blue-700");
});
dropArea.addEventListener("dragleave", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
});
dropArea.addEventListener("dragover", (e) => {
e.preventDefault();
});
dropArea.addEventListener("drop", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
const url = e.dataTransfer.getData("text/uri-list");
const files = e.dataTransfer.files;
if (files.length > 0) {
const href = URL.createObjectURL(files[0]);
drawImageCanvas(href);
} else if (url) {
drawImageCanvas(url);
}
});
document.querySelector("#clear-btn").addEventListener("click", () => {
drawImageCanvas();
});
function drawImageCanvas(imgURL) {
const canvas = document.querySelector("#canvas");
const canvasResult = document.querySelector("#canvas-result");
canvasResult
.getContext("2d")
.clearRect(0, 0, canvas.width, canvas.height);
const ctx = canvas.getContext("2d");
ctx.clearRect(0, 0, canvas.width, canvas.height);
document.querySelector("#share-btn").classList.add("invisible");
document.querySelector("#clear-btn").classList.add("invisible");
document.querySelector("#detect").disabled = true;
hasImage = false;
canvas.parentElement.style.height = "auto";
if (imgURL && imgURL !== "") {
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0);
canvas.parentElement.style.height = canvas.offsetHeight + "px";
hasImage = true;
document.querySelector("#detect").disabled = false;
document.querySelector("#clear-btn").classList.remove("invisible");
};
img.src = imgURL;
}
}
async function classifyImage(
imageURL, // URL of image to classify
modelID, // ID of model to use
modelURL, // URL to model file
modelSize, // size of model
confidence, // confidence threshold
iou_threshold, // IoU threshold
updateStatus // function receives status updates
) {
return new Promise((resolve, reject) => {
yoloWorker.postMessage({
imageURL,
modelID,
modelURL,
modelSize,
confidence,
iou_threshold,
});
function handleMessage(event) {
console.log("message", event.data);
if ("status" in event.data) {
updateStatus(event.data.status);
}
if ("error" in event.data) {
yoloWorker.removeEventListener("message", handleMessage);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
yoloWorker.removeEventListener("message", handleMessage);
resolve(event.data);
}
}
yoloWorker.addEventListener("message", handleMessage);
});
}
// add event listener to detect button
document.querySelector("#detect").addEventListener("click", async () => {
if (!hasImage) {
return;
}
const modelID = document.querySelector("#model").value;
const modelURL = MODEL_BASEURL + MODELS[modelID].url;
const modelSize = MODELS[modelID].model_size;
const confidence = parseFloat(
document.querySelector("#confidence").value
);
const iou_threshold = parseFloat(
document.querySelector("#iou_threshold").value
);
const canvasInput = document.querySelector("#canvas");
const canvas = document.querySelector("#canvas-result");
canvas.width = canvasInput.width;
canvas.height = canvasInput.height;
const scale = canvas.width / canvas.offsetWidth;
const ctx = canvas.getContext("2d");
ctx.drawImage(canvasInput, 0, 0);
const imageURL = canvas.toDataURL();
const results = await await classifyImage(
imageURL,
modelID,
modelURL,
modelSize,
confidence,
iou_threshold,
updateStatus
);
const { output } = results;
ctx.lineWidth = 1 + 2 * scale;
ctx.strokeStyle = "#3c8566";
ctx.fillStyle = "#0dff9a";
const fontSize = 14 * scale;
ctx.font = `${fontSize}px sans-serif`;
for (const detection of output) {
// check keypoint for pose model data
let xmin, xmax, ymin, ymax, label, confidence, keypoints;
if ("keypoints" in detection) {
xmin = detection.xmin;
xmax = detection.xmax;
ymin = detection.ymin;
ymax = detection.ymax;
confidence = detection.confidence;
keypoints = detection.keypoints;
} else {
const [_label, bbox] = detection;
label = _label;
xmin = bbox.xmin;
xmax = bbox.xmax;
ymin = bbox.ymin;
ymax = bbox.ymax;
confidence = bbox.confidence;
}
const [x, y, w, h] = [xmin, ymin, xmax - xmin, ymax - ymin];
const text = `${label ? label + " " : ""}${confidence.toFixed(2)}`;
const width = ctx.measureText(text).width;
ctx.fillStyle = "#3c8566";
ctx.fillRect(x - 2, y - fontSize, width + 4, fontSize);
ctx.fillStyle = "#e3fff3";
ctx.strokeRect(x, y, w, h);
ctx.fillText(text, x, y - 2);
if (keypoints) {
ctx.save();
ctx.fillStyle = "magenta";
ctx.strokeStyle = "yellow";
for (const keypoint of keypoints) {
const { x, y } = keypoint;
ctx.beginPath();
ctx.arc(x, y, 3, 0, 2 * Math.PI);
ctx.fill();
}
ctx.beginPath();
for (const [xid, yid] of COCO_PERSON_SKELETON) {
//draw line between skeleton keypoitns
if (keypoints[xid] && keypoints[yid]) {
ctx.moveTo(keypoints[xid].x, keypoints[xid].y);
ctx.lineTo(keypoints[yid].x, keypoints[yid].y);
}
}
ctx.stroke();
ctx.restore();
}
}
});
function updateStatus(statusMessage) {
const button = document.querySelector("#detect");
if (statusMessage === "detecting") {
button.disabled = true;
button.classList.add("bg-blue-700");
button.classList.remove("bg-blue-950");
button.textContent = "Predicting...";
} else if (statusMessage === "complete") {
button.disabled = false;
button.classList.add("bg-blue-950");
button.classList.remove("bg-blue-700");
button.textContent = "Predict";
document.querySelector("#share-btn").classList.remove("invisible");
}
}
document.querySelector("#share-btn").addEventListener("click", () => {
shareToCommunity(
"lmz/candle-yolo",
"Candle + YOLOv8",
"YOLOv8 with [Candle](https://github.com/huggingface/candle)",
"canvas-result",
"share-btn"
);
});
</script>
</head>
<body class="container max-w-4xl mx-auto p-4">
<main class="grid grid-cols-1 gap-8 relative">
<span class="absolute text-5xl -ml-[1em]"> 🕯️ </span>
<div>
<h1 class="text-5xl font-bold">Candle YOLOv8</h1>
<h2 class="text-2xl font-bold">Rust/WASM Demo</h2>
<p class="max-w-lg">
This demo showcases object detection and pose estimation models in
your browser using Rust/WASM. It utilizes
<a
href="https://huggingface.co/lmz/candle-yolo-v8"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>
safetensor's YOLOv8 models
</a>
and a WASM runtime built with
<a
href="https://github.com/huggingface/candle/"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>Candle </a
>.
</p>
<p>
To run pose estimation, select a yolo pose model from the dropdown
</p>
</div>
<div>
<label for="model" class="font-medium">Models Options: </label>
<select
id="model"
class="border-2 border-gray-500 rounded-md font-light"
>
<option value="yolov8n" selected>yolov8n (6.37 MB)</option>
<option value="yolov8s">yolov8s (22.4 MB)</option>
<option value="yolov8m">yolov8m (51.9 MB)</option>
<option value="yolov8l">yolov8l (87.5 MB)</option>
<option value="yolov8x">yolov8x (137 MB)</option>
<!-- Pose models -->
<option value="yolov8n_pose">yolov8n_pose (6.65 MB)</option>
<option value="yolov8s_pose">yolov8s_pose (23.3 MB)</option>
<option value="yolov8m_pose">yolov8m_pose (53 MB)</option>
<option value="yolov8l_pose">yolov8l_pose (89.1 MB)</option>
<option value="yolov8x_pose">yolov8x_pose (139 MB)</option>
</select>
</div>
<div>
<button
id="detect"
disabled
class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 px-4 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"
>
Predict
</button>
</div>
<!-- drag and drop area -->
<div class="relative max-w-lg">
<div class="py-1">
<button
id="clear-btn"
class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center ml-auto invisible"
>
<svg
class=""
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 13 12"
height="1em"
>
<path
d="M1.6.7 12 11.1M12 .7 1.6 11.1"
stroke="#2E3036"
stroke-width="2"
/>
</svg>
Clear image
</button>
</div>
<div
id="drop-area"
class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden"
>
<div
class="flex flex-col items-center justify-center space-y-1 text-center"
>
<svg
width="25"
height="25"
viewBox="0 0 25 25"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z"
fill="#000"
/>
</svg>
<div class="flex text-sm text-gray-600">
<label
for="file-upload"
class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700"
>
<span>Drag and drop your image here</span>
<span class="block text-xs">or</span>
<span class="block text-xs">Click to upload</span>
</label>
</div>
<input
id="file-upload"
name="file-upload"
type="file"
class="sr-only"
/>
</div>
<canvas
id="canvas"
class="absolute pointer-events-none w-full"
></canvas>
<canvas
id="canvas-result"
class="absolute pointer-events-none w-full"
></canvas>
</div>
<div class="text-right py-2">
<button
id="share-btn"
class="bg-white rounded-md hover:outline outline-orange-200 disabled:opacity-50 invisible"
>
<img
src="https://huggingface.co/datasets/huggingface/badges/raw/main/share-to-community-sm.svg"
/>
</button>
</div>
</div>
<div>
<div
class="flex gap-3 items-center overflow-x-scroll"
id="image-select"
>
<h3 class="font-medium">Examples:</h3>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg"
class="cursor-pointer w-24 h-24 object-cover"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg"
class="cursor-pointer w-24 h-24 object-cover"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg"
class="cursor-pointer w-24 h-24 object-cover"
/>
</div>
</div>
<div>
<div class="grid grid-cols-3 max-w-md items-center gap-3">
<label class="text-sm font-medium" for="confidence"
>Confidence Threshold</label
>
<input
type="range"
id="confidence"
name="confidence"
min="0"
max="1"
step="0.01"
value="0.25"
oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"
/>
<output
class="text-xs font-light px-1 py-1 border border-gray-700 rounded-md w-min"
>0.25</output
>
<label class="text-sm font-medium" for="iou_threshold"
>IoU Threshold</label
>
<input
type="range"
id="iou_threshold"
name="iou_threshold"
min="0"
max="1"
step="0.01"
value="0.45"
oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"
/>
<output
class="font-extralight text-xs px-1 py-1 border border-gray-700 rounded-md w-min"
>0.45</output
>
</div>
</div>
</main>
</body>
</html>
| candle/candle-wasm-examples/yolo/lib-example.html/0 | {
"file_path": "candle/candle-wasm-examples/yolo/lib-example.html",
"repo_id": "candle",
"token_count": 9649
} | 40 |
Dockerfile
.vscode/
.idea
.gitignore
LICENSE
README.md
node_modules/
.svelte-kit/
.env*
!.env
!.env.local | chat-ui/.dockerignore/0 | {
"file_path": "chat-ui/.dockerignore",
"repo_id": "chat-ui",
"token_count": 51
} | 41 |
{
"name": "chat-ui",
"version": "0.7.0",
"private": true,
"packageManager": "[email protected]",
"scripts": {
"dev": "vite dev",
"build": "vite build",
"preview": "vite preview",
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
"lint": "prettier --plugin-search-dir . --check . && eslint .",
"format": "prettier --plugin-search-dir . --write .",
"test": "MONGODB_URL=mongodb://127.0.0.1:27017/ vitest",
"updateLocalEnv": "node --loader ts-node/esm scripts/updateLocalEnv.ts",
"updateProdEnv": "node --loader ts-node/esm scripts/updateProdEnv.ts"
},
"devDependencies": {
"@iconify-json/carbon": "^1.1.16",
"@iconify-json/eos-icons": "^1.1.6",
"@sveltejs/adapter-node": "^1.3.1",
"@sveltejs/kit": "^1.27.6",
"@tailwindcss/typography": "^0.5.9",
"@types/jsdom": "^21.1.1",
"@types/marked": "^4.0.8",
"@types/parquetjs": "^0.10.3",
"@typescript-eslint/eslint-plugin": "^6.x",
"@typescript-eslint/parser": "^6.x",
"eslint": "^8.28.0",
"eslint-config-prettier": "^8.5.0",
"eslint-plugin-svelte": "^2.30.0",
"marked-katex-extension": "^3.0.6",
"prettier": "^2.8.0",
"prettier-plugin-svelte": "^2.10.1",
"prettier-plugin-tailwindcss": "^0.2.7",
"svelte": "^4.2.8",
"svelte-check": "^3.6.2",
"ts-node": "^10.9.1",
"tslib": "^2.4.1",
"typescript": "^5.0.0",
"unplugin-icons": "^0.16.1",
"vite": "^4.5.2",
"vitest": "^0.31.0"
},
"type": "module",
"dependencies": {
"@huggingface/hub": "^0.5.1",
"@huggingface/inference": "^2.6.3",
"@iconify-json/bi": "^1.1.21",
"@resvg/resvg-js": "^2.6.0",
"@xenova/transformers": "^2.6.0",
"autoprefixer": "^10.4.14",
"browser-image-resizer": "^2.4.1",
"date-fns": "^2.29.3",
"dotenv": "^16.0.3",
"handlebars": "^4.7.8",
"highlight.js": "^11.7.0",
"image-size": "^1.0.2",
"jsdom": "^22.0.0",
"json5": "^2.2.3",
"marked": "^4.3.0",
"mongodb": "^5.8.0",
"nanoid": "^4.0.2",
"openid-client": "^5.4.2",
"parquetjs": "^0.11.2",
"postcss": "^8.4.31",
"saslprep": "^1.0.3",
"satori": "^0.10.11",
"satori-html": "^0.3.2",
"serpapi": "^1.1.1",
"sharp": "^0.33.2",
"tailwind-scrollbar": "^3.0.0",
"tailwindcss": "^3.4.0",
"zod": "^3.22.3"
},
"optionalDependencies": {
"aws4fetch": "^1.0.17",
"openai": "^4.14.2"
}
}
| chat-ui/package.json/0 | {
"file_path": "chat-ui/package.json",
"repo_id": "chat-ui",
"token_count": 1310
} | 42 |
<script lang="ts">
import { base } from "$app/paths";
import { page } from "$app/stores";
import { PUBLIC_APP_DESCRIPTION, PUBLIC_APP_NAME } from "$env/static/public";
import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte";
import Modal from "$lib/components/Modal.svelte";
import { useSettingsStore } from "$lib/stores/settings";
import { cookiesAreEnabled } from "$lib/utils/cookiesAreEnabled";
import Logo from "./icons/Logo.svelte";
const settings = useSettingsStore();
</script>
<Modal>
<div
class="flex w-full flex-col items-center gap-6 bg-gradient-to-b from-primary-500/40 via-primary-500/10 to-primary-500/0 px-5 pb-8 pt-9 text-center sm:px-6"
>
<h2 class="flex items-center text-2xl font-semibold text-gray-800">
<Logo classNames="mr-1" />
{PUBLIC_APP_NAME}
</h2>
<p class="text-lg font-semibold leading-snug text-gray-800" style="text-wrap: balance;">
{PUBLIC_APP_DESCRIPTION}
</p>
<p class="text-sm text-gray-500">
Disclaimer: AI is an area of active research with known problems such as biased generation and
misinformation. Do not use this application for high-stakes decisions or advice.
</p>
<div class="flex w-full flex-col items-center gap-2">
{#if $page.data.guestMode || !$page.data.loginEnabled}
<button
class="w-full justify-center rounded-full border-2 border-gray-300 bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900"
class:bg-white={$page.data.loginEnabled}
class:text-gray-800={$page.data.loginEnabled}
class:hover:bg-slate-100={$page.data.loginEnabled}
on:click|preventDefault|stopPropagation={() => {
if (!cookiesAreEnabled()) {
window.open(window.location.href, "_blank");
}
$settings.ethicsModalAccepted = true;
}}
>
{#if $page.data.loginEnabled}
Try as guest
{:else}
Start chatting
{/if}
</button>
{/if}
{#if $page.data.loginEnabled}
<form action="{base}/login" target="_parent" method="POST" class="w-full">
<button
type="submit"
class="flex w-full items-center justify-center whitespace-nowrap rounded-full border-2 border-black bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900"
>
Sign in
{#if PUBLIC_APP_NAME === "HuggingChat"}
with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5 flex-none" /> Hugging Face
{/if}
</button>
</form>
{/if}
</div>
</div>
</Modal>
| chat-ui/src/lib/components/DisclaimerModal.svelte/0 | {
"file_path": "chat-ui/src/lib/components/DisclaimerModal.svelte",
"repo_id": "chat-ui",
"token_count": 1045
} | 43 |
<script lang="ts">
import CarbonUpload from "~icons/carbon/upload";
export let classNames = "";
export let files: File[];
let filelist: FileList;
$: if (filelist) {
files = Array.from(filelist);
}
</script>
<button
class="btn relative h-8 rounded-lg border bg-white px-3 py-1 text-sm text-gray-500 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}"
>
<input
bind:files={filelist}
class="absolute w-full cursor-pointer opacity-0"
type="file"
accept="image/*"
/>
<CarbonUpload class="mr-2 text-xs " /> Upload image
</button>
| chat-ui/src/lib/components/UploadBtn.svelte/0 | {
"file_path": "chat-ui/src/lib/components/UploadBtn.svelte",
"repo_id": "chat-ui",
"token_count": 233
} | 44 |
import { HF_ACCESS_TOKEN, HF_TOKEN } from "$env/static/private";
import { buildPrompt } from "$lib/buildPrompt";
import { textGenerationStream } from "@huggingface/inference";
import type { Endpoint } from "../endpoints";
import { z } from "zod";
export const endpointTgiParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("tgi"),
url: z.string().url(),
accessToken: z.string().default(HF_TOKEN ?? HF_ACCESS_TOKEN),
authorization: z.string().optional(),
});
export function endpointTgi(input: z.input<typeof endpointTgiParametersSchema>): Endpoint {
const { url, accessToken, model, authorization } = endpointTgiParametersSchema.parse(input);
return async ({ conversation, continue: messageContinue }) => {
let prompt = await buildPrompt({
messages: conversation.messages,
webSearch: conversation.messages[conversation.messages.length - 1].webSearch,
preprompt: conversation.preprompt,
model,
id: conversation._id,
});
if (messageContinue) {
// start with the full prompt, and for each stop token, try to remove it from the end of the prompt
prompt = model.parameters.stop.reduce((acc: string, curr: string) => {
if (acc.endsWith(curr)) {
return acc.slice(0, acc.length - curr.length);
}
return acc;
}, prompt.trimEnd());
}
return textGenerationStream(
{
parameters: { ...model.parameters, return_full_text: false },
model: url,
inputs: prompt,
accessToken,
},
{
use_cache: false,
fetch: async (endpointUrl, info) => {
if (info && authorization && !accessToken) {
// Set authorization header if it is defined and HF_TOKEN is empty
info.headers = {
...info.headers,
Authorization: authorization,
};
}
return fetch(endpointUrl, info);
},
}
);
};
}
export default endpointTgi;
| chat-ui/src/lib/server/endpoints/tgi/endpointTgi.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/tgi/endpointTgi.ts",
"repo_id": "chat-ui",
"token_count": 696
} | 45 |
import { browser } from "$app/environment";
import { invalidate } from "$app/navigation";
import { base } from "$app/paths";
import { UrlDependency } from "$lib/types/UrlDependency";
import type { ObjectId } from "mongodb";
import { getContext, setContext } from "svelte";
import { type Writable, writable, get } from "svelte/store";
type SettingsStore = {
shareConversationsWithModelAuthors: boolean;
hideEmojiOnSidebar: boolean;
ethicsModalAccepted: boolean;
ethicsModalAcceptedAt: Date | null;
activeModel: string;
customPrompts: Record<string, string>;
recentlySaved: boolean;
assistants: Array<ObjectId | string>;
};
export function useSettingsStore() {
return getContext<Writable<SettingsStore>>("settings");
}
export function createSettingsStore(initialValue: Omit<SettingsStore, "recentlySaved">) {
const baseStore = writable({ ...initialValue, recentlySaved: false });
let timeoutId: NodeJS.Timeout;
async function setSettings(settings: Partial<SettingsStore>) {
baseStore.update((s) => ({
...s,
...settings,
}));
clearTimeout(timeoutId);
if (browser) {
timeoutId = setTimeout(async () => {
await fetch(`${base}/settings`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
...get(baseStore),
...settings,
}),
});
invalidate(UrlDependency.ConversationList);
// set savedRecently to true for 3s
baseStore.update((s) => ({
...s,
recentlySaved: true,
}));
setTimeout(() => {
baseStore.update((s) => ({
...s,
recentlySaved: false,
}));
}, 3000);
invalidate(UrlDependency.ConversationList);
}, 300);
// debounce server calls by 300ms
}
}
const newStore = {
subscribe: baseStore.subscribe,
set: setSettings,
update: (fn: (s: SettingsStore) => SettingsStore) => {
setSettings(fn(get(baseStore)));
},
} satisfies Writable<SettingsStore>;
setContext("settings", newStore);
return newStore;
}
| chat-ui/src/lib/stores/settings.ts/0 | {
"file_path": "chat-ui/src/lib/stores/settings.ts",
"repo_id": "chat-ui",
"token_count": 760
} | 46 |
import type { Message } from "./Message";
export type LegacyParamatersTemplateInput = {
preprompt?: string;
userMessageToken: string;
userMessageEndToken: string;
assistantMessageToken: string;
assistantMessageEndToken: string;
};
export type ChatTemplateInput = {
messages: Pick<Message, "from" | "content">[];
preprompt?: string;
};
| chat-ui/src/lib/types/Template.ts/0 | {
"file_path": "chat-ui/src/lib/types/Template.ts",
"repo_id": "chat-ui",
"token_count": 105
} | 47 |
type UUID = ReturnType<typeof crypto.randomUUID>;
export function randomUUID(): UUID {
// Only on old safari / ios
if (!("randomUUID" in crypto)) {
return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) =>
(
Number(c) ^
(crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (Number(c) / 4)))
).toString(16)
) as UUID;
}
return crypto.randomUUID();
}
| chat-ui/src/lib/utils/randomUuid.ts/0 | {
"file_path": "chat-ui/src/lib/utils/randomUuid.ts",
"repo_id": "chat-ui",
"token_count": 166
} | 48 |
import { base } from "$app/paths";
import { collections } from "$lib/server/database.js";
import { redirect } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
export const load = async ({ params }) => {
try {
const assistant = await collections.assistants.findOne({
_id: new ObjectId(params.assistantId),
});
if (!assistant) {
throw redirect(302, `${base}`);
}
return { assistant: JSON.parse(JSON.stringify(assistant)) };
} catch {
throw redirect(302, `${base}`);
}
};
| chat-ui/src/routes/assistant/[assistantId]/+page.server.ts/0 | {
"file_path": "chat-ui/src/routes/assistant/[assistantId]/+page.server.ts",
"repo_id": "chat-ui",
"token_count": 178
} | 49 |
import { redirect } from "@sveltejs/kit";
import { getOIDCAuthorizationUrl } from "$lib/server/auth";
import { base } from "$app/paths";
export const actions = {
async default({ url, locals, request }) {
// TODO: Handle errors if provider is not responding
const referer = request.headers.get("referer");
const authorizationUrl = await getOIDCAuthorizationUrl(
{ redirectURI: `${(referer ? new URL(referer) : url).origin}${base}/login/callback` },
{ sessionId: locals.sessionId }
);
throw redirect(303, authorizationUrl);
},
};
| chat-ui/src/routes/login/+page.server.ts/0 | {
"file_path": "chat-ui/src/routes/login/+page.server.ts",
"repo_id": "chat-ui",
"token_count": 178
} | 50 |
import { collections } from "$lib/server/database";
import { error, type RequestHandler } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
export const GET: RequestHandler = async ({ params }) => {
const assistant = await collections.assistants.findOne({
_id: new ObjectId(params.assistantId),
});
if (!assistant) {
throw error(404, "No assistant found");
}
if (!assistant.avatar) {
throw error(404, "No avatar found");
}
const fileId = collections.bucket.find({ filename: assistant._id.toString() });
const content = await fileId.next().then(async (file) => {
if (!file?._id) {
throw error(404, "Avatar not found");
}
const fileStream = collections.bucket.openDownloadStream(file?._id);
const fileBuffer = await new Promise<Buffer>((resolve, reject) => {
const chunks: Uint8Array[] = [];
fileStream.on("data", (chunk) => chunks.push(chunk));
fileStream.on("error", reject);
fileStream.on("end", () => resolve(Buffer.concat(chunks)));
});
return fileBuffer;
});
return new Response(content, {
headers: {
"Content-Type": "image/jpeg",
},
});
};
| chat-ui/src/routes/settings/assistants/[assistantId]/avatar.jpg/+server.ts/0 | {
"file_path": "chat-ui/src/routes/settings/assistants/[assistantId]/avatar.jpg/+server.ts",
"repo_id": "chat-ui",
"token_count": 385
} | 51 |
# This first_section was backported from nginx
loading_datasets: loading
share_dataset: share
quicktour: quickstart
dataset_streaming: stream
torch_tensorflow: use_dataset
splits: loading#slice-splits
processing: process
faiss_and_ea: faiss_es
features: about_dataset_features
using_metrics: how_to_metrics
exploring: access
package_reference/logging_methods: package_reference/utilities
# end of first_section
| datasets/docs/source/_redirects.yml/0 | {
"file_path": "datasets/docs/source/_redirects.yml",
"repo_id": "datasets",
"token_count": 134
} | 52 |
# Create a dataset card
Each dataset should have a dataset card to promote responsible usage and inform users of any potential biases within the dataset.
This idea was inspired by the Model Cards proposed by [Mitchell, 2018](https://arxiv.org/abs/1810.03993).
Dataset cards help users understand a dataset's contents, the context for using the dataset, how it was created, and any other considerations a user should be aware of.
Creating a dataset card is easy and can be done in a just a few steps:
1. Go to your dataset repository on the [Hub](https://hf.co/new-dataset) and click on **Create Dataset Card** to create a new `README.md` file in your repository.
2. Use the **Metadata UI** to select the tags that describe your dataset. You can add a license, language, pretty_name, the task_categories, size_categories, and any other tags that you think are relevant. These tags help users discover and find your dataset on the Hub.
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/datasets-metadata-ui.png"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/datasets-metadata-ui-dark.png"/>
</div>
<Tip>
For a complete, but not required, set of tag options you can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1). This'll have a few more tag options like `multilinguality` and `language_creators` which are useful but not absolutely necessary.
</Tip>
3. Click on the **Import dataset card template** link to automatically create a template with all the relevant fields to complete. Fill out the template sections to the best of your ability. Take a look at the [Dataset Card Creation Guide](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md) for more detailed information about what to include in each section of the card. For fields you are unable to complete, you can write **[More Information Needed]**.
4. Once you're done, commit the changes to the `README.md` file and you'll see the completed dataset card on your repository.
YAML also allows you to customize the way your dataset is loaded by [defining splits and/or configurations](./repository_structure#define-your-splits-and-subsets-in-yaml) without the need to write any code.
Feel free to take a look at the [SNLI](https://huggingface.co/datasets/snli), [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail), and [Allociné](https://huggingface.co/datasets/allocine) dataset cards as examples to help you get started.
| datasets/docs/source/dataset_card.mdx/0 | {
"file_path": "datasets/docs/source/dataset_card.mdx",
"repo_id": "datasets",
"token_count": 757
} | 53 |
# Load
Your data can be stored in various places; they can be on your local machine's disk, in a Github repository, and in in-memory data structures like Python dictionaries and Pandas DataFrames. Wherever a dataset is stored, 🤗 Datasets can help you load it.
This guide will show you how to load a dataset from:
- The Hub without a dataset loading script
- Local loading script
- Local files
- In-memory data
- Offline
- A specific slice of a split
For more details specific to loading other dataset modalities, take a look at the <a class="underline decoration-pink-400 decoration-2 font-semibold" href="./audio_load">load audio dataset guide</a>, the <a class="underline decoration-yellow-400 decoration-2 font-semibold" href="./image_load">load image dataset guide</a>, or the <a class="underline decoration-green-400 decoration-2 font-semibold" href="./nlp_load">load text dataset guide</a>.
<a id='load-from-the-hub'></a>
## Hugging Face Hub
Datasets are loaded from a dataset loading script that downloads and generates the dataset. However, you can also load a dataset from any dataset repository on the Hub without a loading script! Begin by [creating a dataset repository](share#create-the-repository) and upload your data files. Now you can use the [`load_dataset`] function to load the dataset.
For example, try loading the files from this [demo repository](https://huggingface.co/datasets/lhoestq/demo1) by providing the repository namespace and dataset name. This dataset repository contains CSV files, and the code below loads the dataset from the CSV files:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("lhoestq/demo1")
```
Some datasets may have more than one version based on Git tags, branches, or commits. Use the `revision` parameter to specify the dataset version you want to load:
```py
>>> dataset = load_dataset(
... "lhoestq/custom_squad",
... revision="main" # tag name, or branch name, or commit hash
... )
```
<Tip>
Refer to the [Upload a dataset to the Hub](./upload_dataset) tutorial for more details on how to create a dataset repository on the Hub, and how to upload your data files.
</Tip>
A dataset without a loading script by default loads all the data into the `train` split. Use the `data_files` parameter to map data files to splits like `train`, `validation` and `test`:
```py
>>> data_files = {"train": "train.csv", "test": "test.csv"}
>>> dataset = load_dataset("namespace/your_dataset_name", data_files=data_files)
```
<Tip warning={true}>
If you don't specify which data files to use, [`load_dataset`] will return all the data files. This can take a long time if you load a large dataset like C4, which is approximately 13TB of data.
</Tip>
You can also load a specific subset of the files with the `data_files` or `data_dir` parameter. These parameters can accept a relative path which resolves to the base path corresponding to where the dataset is loaded from.
```py
>>> from datasets import load_dataset
# load files that match the grep pattern
>>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz")
# load dataset from the en directory on the Hub
>>> c4_subset = load_dataset("allenai/c4", data_dir="en")
```
The `split` parameter can also map a data file to a specific split:
```py
>>> data_files = {"validation": "en/c4-validation.*.json.gz"}
>>> c4_validation = load_dataset("allenai/c4", data_files=data_files, split="validation")
```
## Local loading script
You may have a 🤗 Datasets loading script locally on your computer. In this case, load the dataset by passing one of the following paths to [`load_dataset`]:
- The local path to the loading script file.
- The local path to the directory containing the loading script file (only if the script file has the same name as the directory).
Pass `trust_remote_code=True` to allow 🤗 Datasets to execute the loading script:
```py
>>> dataset = load_dataset("path/to/local/loading_script/loading_script.py", split="train", trust_remote_code=True)
>>> dataset = load_dataset("path/to/local/loading_script", split="train", trust_remote_code=True) # equivalent because the file has the same name as the directory
```
### Edit loading script
You can also edit a loading script from the Hub to add your own modifications. Download the dataset repository locally so any data files referenced by a relative path in the loading script can be loaded:
```bash
git clone https://huggingface.co/datasets/eli5
```
Make your edits to the loading script and then load it by passing its local path to [`~datasets.load_dataset`]:
```py
>>> from datasets import load_dataset
>>> eli5 = load_dataset("path/to/local/eli5")
```
## Local and remote files
Datasets can be loaded from local files stored on your computer and from remote files. The datasets are most likely stored as a `csv`, `json`, `txt` or `parquet` file. The [`load_dataset`] function can load each of these file types.
### CSV
🤗 Datasets can read a dataset made up of one or several CSV files (in this case, pass your CSV files as a list):
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("csv", data_files="my_file.csv")
```
<Tip>
For more details, check out the [how to load tabular datasets from CSV files](tabular_load#csv-files) guide.
</Tip>
### JSON
JSON files are loaded directly with [`load_dataset`] as shown below:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("json", data_files="my_file.json")
```
JSON files have diverse formats, but we think the most efficient format is to have multiple JSON objects; each line represents an individual row of data. For example:
```json
{"a": 1, "b": 2.0, "c": "foo", "d": false}
{"a": 4, "b": -5.5, "c": null, "d": true}
```
Another JSON format you may encounter is a nested field, in which case you'll need to specify the `field` argument as shown in the following:
```py
{"version": "0.1.0",
"data": [{"a": 1, "b": 2.0, "c": "foo", "d": false},
{"a": 4, "b": -5.5, "c": null, "d": true}]
}
>>> from datasets import load_dataset
>>> dataset = load_dataset("json", data_files="my_file.json", field="data")
```
To load remote JSON files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
>>> dataset = load_dataset("json", data_files={"train": base_url + "train-v1.1.json", "validation": base_url + "dev-v1.1.json"}, field="data")
```
While these are the most common JSON formats, you'll see other datasets that are formatted differently. 🤗 Datasets recognizes these other formats and will fallback accordingly on the Python JSON loading methods to handle them.
### Parquet
Parquet files are stored in a columnar format, unlike row-based files like a CSV. Large datasets may be stored in a Parquet file because it is more efficient and faster at returning your query.
To load a Parquet file:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("parquet", data_files={'train': 'train.parquet', 'test': 'test.parquet'})
```
To load remote Parquet files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wikipedia/20200501.en/1.0.0/"
>>> data_files = {"train": base_url + "wikipedia-train.parquet"}
>>> wiki = load_dataset("parquet", data_files=data_files, split="train")
```
### Arrow
Arrow files are stored in an in-memory columnar format, unlike row-based formats like CSV and uncompressed formats like Parquet.
To load an Arrow file:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("arrow", data_files={'train': 'train.arrow', 'test': 'test.arrow'})
```
To load remote Arrow files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wikipedia/20200501.en/1.0.0/"
>>> data_files = {"train": base_url + "wikipedia-train.arrow"}
>>> wiki = load_dataset("arrow", data_files=data_files, split="train")
```
Arrow is the file format used by 🤗 Datasets under the hood, therefore you can load a local Arrow file using [`Dataset.from_file`] directly:
```py
>>> from datasets import Dataset
>>> dataset = Dataset.from_file("data.arrow")
```
Unlike [`load_dataset`], [`Dataset.from_file`] memory maps the Arrow file without preparing the dataset in the cache, saving you disk space.
The cache directory to store intermediate processing results will be the Arrow file directory in that case.
For now only the Arrow streaming format is supported. The Arrow IPC file format (also known as Feather V2) is not supported.
### SQL
Read database contents with [`~datasets.Dataset.from_sql`] by specifying the URI to connect to your database. You can read both table names and queries:
```py
>>> from datasets import Dataset
# load entire table
>>> dataset = Dataset.from_sql("data_table_name", con="sqlite:///sqlite_file.db")
# load from query
>>> dataset = Dataset.from_sql("SELECT text FROM table WHERE length(text) > 100 LIMIT 10", con="sqlite:///sqlite_file.db")
```
<Tip>
For more details, check out the [how to load tabular datasets from SQL databases](tabular_load#databases) guide.
</Tip>
### WebDataset
The [WebDataset](https://github.com/webdataset/webdataset) format is based on TAR archives and is suitable for big image datasets.
Because of their size, WebDatasets are generally loaded in streaming mode (using `streaming=True`).
You can load a WebDataset like this:
```python
>>> from datasets import load_dataset
>>>
>>> path = "path/to/train/*.tar"
>>> dataset = load_dataset("webdataset", data_files={"train": path}, split="train", streaming=True)
```
To load remote WebDatasets via HTTP, pass the URLs instead:
```python
>>> from datasets import load_dataset
>>>
>>> base_url = "https://huggingface.co/datasets/lhoestq/small-publaynet-wds/resolve/main/publaynet-train-{i:06d}.tar"
>>> urls = [base_url.format(i=i) for i in range(4)]
>>> dataset = load_dataset("webdataset", data_files={"train": urls}, split="train", streaming=True)
```
## Multiprocessing
When a dataset is made of several files (that we call "shards"), it is possible to significantly speed up the dataset downloading and preparation step.
You can choose how many processes you'd like to use to prepare a dataset in parallel using `num_proc`.
In this case, each process is given a subset of shards to prepare:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", num_proc=8)
ml_librispeech_spanish = load_dataset("facebook/multilingual_librispeech", "spanish", num_proc=8)
```
## In-memory data
🤗 Datasets will also allow you to create a [`Dataset`] directly from in-memory data structures like Python dictionaries and Pandas DataFrames.
### Python dictionary
Load Python dictionaries with [`~Dataset.from_dict`]:
```py
>>> from datasets import Dataset
>>> my_dict = {"a": [1, 2, 3]}
>>> dataset = Dataset.from_dict(my_dict)
```
### Python list of dictionaries
Load a list of Python dictionaries with [`~Dataset.from_list`]:
```py
>>> from datasets import Dataset
>>> my_list = [{"a": 1}, {"a": 2}, {"a": 3}]
>>> dataset = Dataset.from_list(my_list)
```
### Python generator
Create a dataset from a Python generator with [`~Dataset.from_generator`]:
```py
>>> from datasets import Dataset
>>> def my_gen():
... for i in range(1, 4):
... yield {"a": i}
...
>>> dataset = Dataset.from_generator(my_gen)
```
This approach supports loading data larger than available memory.
You can also define a sharded dataset by passing lists to `gen_kwargs`:
```py
>>> def gen(shards):
... for shard in shards:
... with open(shard) as f:
... for line in f:
... yield {"line": line}
...
>>> shards = [f"data{i}.txt" for i in range(32)]
>>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
>>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
>>> from torch.utils.data import DataLoader
>>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
```
### Pandas DataFrame
Load Pandas DataFrames with [`~Dataset.from_pandas`]:
```py
>>> from datasets import Dataset
>>> import pandas as pd
>>> df = pd.DataFrame({"a": [1, 2, 3]})
>>> dataset = Dataset.from_pandas(df)
```
<Tip>
For more details, check out the [how to load tabular datasets from Pandas DataFrames](tabular_load#pandas-dataframes) guide.
</Tip>
## Offline
Even if you don't have an internet connection, it is still possible to load a dataset. As long as you've downloaded a dataset from the Hub repository before, it should be cached. This means you can reload the dataset from the cache and use it offline.
If you know you won't have internet access, you can run 🤗 Datasets in full offline mode. This saves time because instead of waiting for the Dataset builder download to time out, 🤗 Datasets will look directly in the cache. Set the environment variable `HF_DATASETS_OFFLINE` to `1` to enable full offline mode.
## Slice splits
You can also choose only to load specific slices of a split. There are two options for slicing a split: using strings or the [`ReadInstruction`] API. Strings are more compact and readable for simple cases, while [`ReadInstruction`] is easier to use with variable slicing parameters.
Concatenate a `train` and `test` split by:
```py
>>> train_test_ds = datasets.load_dataset("bookcorpus", split="train+test")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> ri = datasets.ReadInstruction("train") + datasets.ReadInstruction("test")
>>> train_test_ds = datasets.load_dataset("bookcorpus", split=ri)
```
Select specific rows of the `train` split:
```py
>>> train_10_20_ds = datasets.load_dataset("bookcorpus", split="train[10:20]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> train_10_20_ds = datasets.load_dataset("bookcorpu", split=datasets.ReadInstruction("train", from_=10, to=20, unit="abs"))
```
Or select a percentage of a split with:
```py
>>> train_10pct_ds = datasets.load_dataset("bookcorpus", split="train[:10%]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> train_10_20_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train", to=10, unit="%"))
```
Select a combination of percentages from each split:
```py
>>> train_10_80pct_ds = datasets.load_dataset("bookcorpus", split="train[:10%]+train[-80%:]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> ri = (datasets.ReadInstruction("train", to=10, unit="%") + datasets.ReadInstruction("train", from_=-80, unit="%"))
>>> train_10_80pct_ds = datasets.load_dataset("bookcorpus", split=ri)
```
Finally, you can even create cross-validated splits. The example below creates 10-fold cross-validated splits. Each validation dataset is a 10% chunk, and the training dataset makes up the remaining complementary 90% chunk:
```py
>>> val_ds = datasets.load_dataset("bookcorpus", split=[f"train[{k}%:{k+10}%]" for k in range(0, 100, 10)])
>>> train_ds = datasets.load_dataset("bookcorpus", split=[f"train[:{k}%]+train[{k+10}%:]" for k in range(0, 100, 10)])
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> val_ds = datasets.load_dataset("bookcorpus", [datasets.ReadInstruction("train", from_=k, to=k+10, unit="%") for k in range(0, 100, 10)])
>>> train_ds = datasets.load_dataset("bookcorpus", [(datasets.ReadInstruction("train", to=k, unit="%") + datasets.ReadInstruction("train", from_=k+10, unit="%")) for k in range(0, 100, 10)])
```
### Percent slicing and rounding
The default behavior is to round the boundaries to the nearest integer for datasets where the requested slice boundaries do not divide evenly by 100. As shown below, some slices may contain more examples than others. For instance, if the following train split includes 999 records, then:
```py
# 19 records, from 500 (included) to 519 (excluded).
>>> train_50_52_ds = datasets.load_dataset("bookcorpus", split="train[50%:52%]")
# 20 records, from 519 (included) to 539 (excluded).
>>> train_52_54_ds = datasets.load_dataset("bookcorpus", split="train[52%:54%]")
```
If you want equal sized splits, use `pct1_dropremainder` rounding instead. This treats the specified percentage boundaries as multiples of 1%.
```py
# 18 records, from 450 (included) to 468 (excluded).
>>> train_50_52pct1_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train", from_=50, to=52, unit="%", rounding="pct1_dropremainder"))
# 18 records, from 468 (included) to 486 (excluded).
>>> train_52_54pct1_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train",from_=52, to=54, unit="%", rounding="pct1_dropremainder"))
# Or equivalently:
>>> train_50_52pct1_ds = datasets.load_dataset("bookcorpus", split="train[50%:52%](pct1_dropremainder)")
>>> train_52_54pct1_ds = datasets.load_dataset("bookcorpus", split="train[52%:54%](pct1_dropremainder)")
```
<Tip warning={true}>
`pct1_dropremainder` rounding may truncate the last examples in a dataset if the number of examples in your dataset don't divide evenly by 100.
</Tip>
<a id='troubleshoot'></a>
## Troubleshooting
Sometimes, you may get unexpected results when you load a dataset. Two of the most common issues you may encounter are manually downloading a dataset and specifying features of a dataset.
### Manual download
Certain datasets require you to manually download the dataset files due to licensing incompatibility or if the files are hidden behind a login page. This causes [`load_dataset`] to throw an `AssertionError`. But 🤗 Datasets provides detailed instructions for downloading the missing files. After you've downloaded the files, use the `data_dir` argument to specify the path to the files you just downloaded.
For example, if you try to download a configuration from the [MATINF](https://huggingface.co/datasets/matinf) dataset:
```py
>>> dataset = load_dataset("matinf", "summarization")
Downloading and preparing dataset matinf/summarization (download: Unknown size, generated: 246.89 MiB, post-processed: Unknown size, total: 246.89 MiB) to /root/.cache/huggingface/datasets/matinf/summarization/1.0.0/82eee5e71c3ceaf20d909bca36ff237452b4e4ab195d3be7ee1c78b53e6f540e...
AssertionError: The dataset matinf with config summarization requires manual data.
Please follow the manual download instructions: To use MATINF you have to download it manually. Please fill this google form (https://forms.gle/nkH4LVE4iNQeDzsc9). You will receive a download link and a password once you complete the form. Please extract all files in one folder and load the dataset with: *datasets.load_dataset('matinf', data_dir='path/to/folder/folder_name')*.
Manual data can be loaded with `datasets.load_dataset(matinf, data_dir='<path/to/manual/data>')
```
If you've already downloaded a dataset from the *Hub with a loading script* to your computer, then you need to pass an absolute path to the `data_dir` or `data_files` parameter to load that dataset. Otherwise, if you pass a relative path, [`load_dataset`] will load the directory from the repository on the Hub instead of the local directory.
### Specify features
When you create a dataset from local files, the [`Features`] are automatically inferred by [Apache Arrow](https://arrow.apache.org/docs/). However, the dataset's features may not always align with your expectations, or you may want to define the features yourself. The following example shows how you can add custom labels with the [`ClassLabel`] feature.
Start by defining your own labels with the [`Features`] class:
```py
>>> class_names = ["sadness", "joy", "love", "anger", "fear", "surprise"]
>>> emotion_features = Features({'text': Value('string'), 'label': ClassLabel(names=class_names)})
```
Next, specify the `features` parameter in [`load_dataset`] with the features you just created:
```py
>>> dataset = load_dataset('csv', data_files=file_dict, delimiter=';', column_names=['text', 'label'], features=emotion_features)
```
Now when you look at your dataset features, you can see it uses the custom labels you defined:
```py
>>> dataset['train'].features
{'text': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=6, names=['sadness', 'joy', 'love', 'anger', 'fear', 'surprise'], names_file=None, id=None)}
```
## Metrics
<Tip warning={true}>
Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
When the metric you want to use is not supported by 🤗 Datasets, you can write and use your own metric script. Load your metric by providing the path to your local metric loading script:
```py
>>> from datasets import load_metric
>>> metric = load_metric('PATH/TO/MY/METRIC/SCRIPT')
>>> # Example of typical usage
>>> for batch in dataset:
... inputs, references = batch
... predictions = model(inputs)
... metric.add_batch(predictions=predictions, references=references)
>>> score = metric.compute()
```
<Tip>
See the [Metrics](./how_to_metrics#custom-metric-loading-script) guide for more details on how to write your own metric loading script.
</Tip>
### Load configurations
It is possible for a metric to have different configurations. The configurations are stored in the `config_name` parameter in [`MetricInfo`] attribute. When you load a metric, provide the configuration name as shown in the following:
```
>>> from datasets import load_metric
>>> metric = load_metric('bleurt', name='bleurt-base-128')
>>> metric = load_metric('bleurt', name='bleurt-base-512')
```
### Distributed setup
When working in a distributed or parallel processing environment, loading and computing a metric can be tricky because these processes are executed in parallel on separate subsets of the data. 🤗 Datasets supports distributed usage with a few additional arguments when you load a metric.
For example, imagine you are training and evaluating on eight parallel processes. Here's how you would load a metric in this distributed setting:
1. Define the total number of processes with the `num_process` argument.
2. Set the process `rank` as an integer between zero and `num_process - 1`.
3. Load your metric with [`load_metric`] with these arguments:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc', num_process=num_process, process_id=rank)
```
<Tip>
Once you've loaded a metric for distributed usage, you can compute the metric as usual. Behind the scenes, [`Metric.compute`] gathers all the predictions and references from the nodes, and computes the final metric.
</Tip>
In some instances, you may be simultaneously running multiple independent distributed evaluations on the same server and files. To avoid any conflicts, it is important to provide an `experiment_id` to distinguish the separate evaluations:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc', num_process=num_process, process_id=process_id, experiment_id="My_experiment_10")
```
| datasets/docs/source/loading.mdx/0 | {
"file_path": "datasets/docs/source/loading.mdx",
"repo_id": "datasets",
"token_count": 7158
} | 54 |
# Stream
Dataset streaming lets you work with a dataset without downloading it.
The data is streamed as you iterate over the dataset.
This is especially helpful when:
- You don't want to wait for an extremely large dataset to download.
- The dataset size exceeds the amount of available disk space on your computer.
- You want to quickly explore just a few samples of a dataset.
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/streaming.gif"/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/streaming-dark.gif"/>
</div>
For example, the English split of the [oscar-corpus/OSCAR-2201](https://huggingface.co/datasets/oscar-corpus/OSCAR-2201) dataset is 1.2 terabytes, but you can use it instantly with streaming. Stream a dataset by setting `streaming=True` in [`load_dataset`] as shown below:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar-corpus/OSCAR-2201', 'en', split='train', streaming=True)
>>> print(next(iter(dataset)))
{'id': 0, 'text': 'Founded in 2015, Golden Bees is a leading programmatic recruitment platform dedicated to employers, HR agencies and job boards. The company has developed unique HR-custom technologies and predictive algorithms to identify and attract the best candidates for a job opportunity.', ...
```
Dataset streaming also lets you work with a dataset made of local files without doing any conversion.
In this case, the data is streamed from the local files as you iterate over the dataset.
This is especially helpful when:
- You don't want to wait for an extremely large local dataset to be converted to Arrow.
- The converted files size would exceed the amount of available disk space on your computer.
- You want to quickly explore just a few samples of a dataset.
For example, you can stream a local dataset of hundreds of compressed JSONL files like [oscar-corpus/OSCAR-2201](https://huggingface.co/datasets/oscar-corpus/OSCAR-2201) to use it instantly:
```py
>>> from datasets import load_dataset
>>> data_files = {'train': 'path/to/OSCAR-2201/compressed/en_meta/*.jsonl.gz'}
>>> dataset = load_dataset('json', data_files=data_files, split='train', streaming=True)
>>> print(next(iter(dataset)))
{'id': 0, 'text': 'Founded in 2015, Golden Bees is a leading programmatic recruitment platform dedicated to employers, HR agencies and job boards. The company has developed unique HR-custom technologies and predictive algorithms to identify and attract the best candidates for a job opportunity.', ...
```
Loading a dataset in streaming mode creates a new dataset type instance (instead of the classic [`Dataset`] object), known as an [`IterableDataset`].
This special type of dataset has its own set of processing methods shown below.
<Tip>
An [`IterableDataset`] is useful for iterative jobs like training a model.
You shouldn't use a [`IterableDataset`] for jobs that require random access to examples because you have to iterate all over it using a for loop. Getting the last example in an iterable dataset would require you to iterate over all the previous examples.
You can find more details in the [Dataset vs. IterableDataset guide](./about_mapstyle_vs_iterable).
</Tip>
## Convert from a Dataset
If you have an existing [`Dataset`] object, you can convert it to an [`IterableDataset`] with the [`~Dataset.to_iterable_dataset`] function. This is actually faster than setting the `streaming=True` argument in [`load_dataset`] because the data is streamed from local files.
```py
>>> from datasets import load_dataset
# faster 🐇
>>> dataset = load_dataset("food101")
>>> iterable_dataset = dataset.to_iterable_dataset()
# slower 🐢
>>> iterable_dataset = load_dataset("food101", streaming=True)
```
The [`~Dataset.to_iterable_dataset`] function supports sharding when the [`IterableDataset`] is instantiated. This is useful when working with big datasets, and you'd like to shuffle the dataset or to enable fast parallel loading with a PyTorch DataLoader.
```py
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("food101")
>>> iterable_dataset = dataset.to_iterable_dataset(num_shards=64) # shard the dataset
>>> iterable_dataset = iterable_dataset.shuffle(buffer_size=10_000) # shuffles the shards order and use a shuffle buffer when you start iterating
dataloader = torch.utils.data.DataLoader(iterable_dataset, num_workers=4) # assigns 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
```
## Shuffle
Like a regular [`Dataset`] object, you can also shuffle a [`IterableDataset`] with [`IterableDataset.shuffle`].
The `buffer_size` argument controls the size of the buffer to randomly sample examples from. Let's say your dataset has one million examples, and you set the `buffer_size` to ten thousand. [`IterableDataset.shuffle`] will randomly select examples from the first ten thousand examples in the buffer. Selected examples in the buffer are replaced with new examples. By default, the buffer size is 1,000.
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
>>> shuffled_dataset = dataset.shuffle(seed=42, buffer_size=10_000)
```
<Tip>
[`IterableDataset.shuffle`] will also shuffle the order of the shards if the dataset is sharded into multiple files.
</Tip>
## Reshuffle
Sometimes you may want to reshuffle the dataset after each epoch. This will require you to set a different seed for each epoch. Use [`IterableDataset.set_epoch`] in between epochs to tell the dataset what epoch you're on.
Your seed effectively becomes: `initial seed + current epoch`.
```py
>>> for epoch in range(epochs):
... shuffled_dataset.set_epoch(epoch)
... for example in shuffled_dataset:
... ...
```
## Split dataset
You can split your dataset one of two ways:
- [`IterableDataset.take`] returns the first `n` examples in a dataset:
```py
>>> dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
>>> dataset_head = dataset.take(2)
>>> list(dataset_head)
[{'id': 0, 'text': 'Mtendere Village was...'}, {'id': 1, 'text': 'Lily James cannot fight the music...'}]
```
- [`IterableDataset.skip`] omits the first `n` examples in a dataset and returns the remaining examples:
```py
>>> train_dataset = shuffled_dataset.skip(1000)
```
<Tip warning={true}>
`take` and `skip` prevent future calls to `shuffle` because they lock in the order of the shards. You should `shuffle` your dataset before splitting it.
</Tip>
<a id='interleave_datasets'></a>
## Interleave
[`interleave_datasets`] can combine an [`IterableDataset`] with other datasets. The combined dataset returns alternating examples from each of the original datasets.
```py
>>> from datasets import interleave_datasets
>>> en_dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True, trust_remote_code=True)
>>> fr_dataset = load_dataset('oscar', "unshuffled_deduplicated_fr", split='train', streaming=True, trust_remote_code=True)
>>> multilingual_dataset = interleave_datasets([en_dataset, fr_dataset])
>>> list(multilingual_dataset.take(2))
[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': "Média de débat d'idées, de culture et de littérature..."}]
```
Define sampling probabilities from each of the original datasets for more control over how each of them are sampled and combined. Set the `probabilities` argument with your desired sampling probabilities:
```py
>>> multilingual_dataset_with_oversampling = interleave_datasets([en_dataset, fr_dataset], probabilities=[0.8, 0.2], seed=42)
>>> list(multilingual_dataset_with_oversampling.take(2))
[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': 'Lily James cannot fight the music...'}]
```
Around 80% of the final dataset is made of the `en_dataset`, and 20% of the `fr_dataset`.
You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples.
You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached.
Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`.
## Rename, remove, and cast
The following methods allow you to modify the columns of a dataset. These methods are useful for renaming or removing columns and changing columns to a new set of features.
### Rename
Use [`IterableDataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place.
Provide [`IterableDataset.rename_column`] with the name of the original column, and the new column name:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('mc4', 'en', streaming=True, split='train', trust_remote_code=True)
>>> dataset = dataset.rename_column("text", "content")
```
### Remove
When you need to remove one or more columns, give [`IterableDataset.remove_columns`] the name of the column to remove. Remove more than one column by providing a list of column names:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('mc4', 'en', streaming=True, split='train', trust_remote_code=True)
>>> dataset = dataset.remove_columns('timestamp')
```
### Cast
[`IterableDataset.cast`] changes the feature type of one or more columns. This method takes your new `Features` as its argument. The following sample code shows how to change the feature types of `ClassLabel` and `Value`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('glue', 'mrpc', split='train', streaming=True)
>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None),
'idx': Value(dtype='int32', id=None)}
>>> from datasets import ClassLabel, Value
>>> new_features = dataset.features.copy()
>>> new_features["label"] = ClassLabel(names=['negative', 'positive'])
>>> new_features["idx"] = Value('int64')
>>> dataset = dataset.cast(new_features)
>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=2, names=['negative', 'positive'], names_file=None, id=None),
'idx': Value(dtype='int64', id=None)}
```
<Tip>
Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value('int32')` to `Value('bool')` if the original column only contains ones and zeros.
</Tip>
Use [`IterableDataset.cast_column`] to change the feature type of just one column. Pass the column name and its new feature type as arguments:
```py
>>> dataset.features
{'audio': Audio(sampling_rate=44100, mono=True, id=None)}
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
>>> dataset.features
{'audio': Audio(sampling_rate=16000, mono=True, id=None)}
```
## Map
Similar to the [`Dataset.map`] function for a regular [`Dataset`], 🤗 Datasets features [`IterableDataset.map`] for processing an [`IterableDataset`].
[`IterableDataset.map`] applies processing on-the-fly when examples are streamed.
It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns.
The following example demonstrates how to tokenize a [`IterableDataset`]. The function needs to accept and output a `dict`:
```py
>>> def add_prefix(example):
... example['text'] = 'My text: ' + example['text']
... return example
```
Next, apply this function to the dataset with [`IterableDataset.map`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar', 'unshuffled_deduplicated_en', streaming=True, split='train', trust_remote_code=True)
>>> updated_dataset = dataset.map(add_prefix)
>>> list(updated_dataset.take(3))
[{'id': 0, 'text': 'My text: Mtendere Village was inspired by...'},
{'id': 1, 'text': 'My text: Lily James cannot fight the music...'},
{'id': 2, 'text': 'My text: "I\'d love to help kickstart...'}]
```
Let's take a look at another example, except this time, you will remove a column with [`IterableDataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed.
Specify the column to remove with the `remove_columns` argument in [`IterableDataset.map`]:
```py
>>> updated_dataset = dataset.map(add_prefix, remove_columns=["id"])
>>> list(updated_dataset.take(3))
[{'text': 'My text: Mtendere Village was inspired by...'},
{'text': 'My text: Lily James cannot fight the music...'},
{'text': 'My text: "I\'d love to help kickstart...'}]
```
### Batch processing
[`IterableDataset.map`] also supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` argument. This opens the door to many interesting applications such as tokenization, splitting long sentences into shorter chunks, and data augmentation.
#### Tokenization
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> dataset = load_dataset("mc4", "en", streaming=True, split="train", trust_remote_code=True)
>>> tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
>>> def encode(examples):
... return tokenizer(examples['text'], truncation=True, padding='max_length')
>>> dataset = dataset.map(encode, batched=True, remove_columns=["text", "timestamp", "url"])
>>> next(iter(dataset))
{'input_ids': [101, 8466, 1018, 1010, 4029, 2475, 2062, 18558, 3100, 2061, ...,1106, 3739, 102],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ..., 1, 1]}
```
<Tip>
See other examples of batch processing in the [batched map processing](./process#batch-processing) documentation. They work the same for iterable datasets.
</Tip>
### Filter
You can filter rows in the dataset based on a predicate function using [`Dataset.filter`]. It returns rows that match a specified condition:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar', 'unshuffled_deduplicated_en', streaming=True, split='train', trust_remote_code=True)
>>> start_with_ar = dataset.filter(lambda example: example['text'].startswith('Ar'))
>>> next(iter(start_with_ar))
{'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)?...'}
```
[`Dataset.filter`] can also filter by indices if you set `with_indices=True`:
```py
>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
>>> list(even_dataset.take(3))
[{'id': 0, 'text': 'Mtendere Village was inspired by the vision of Chief Napoleon Dzombe, ...'},
{'id': 2, 'text': '"I\'d love to help kickstart continued development! And 0 EUR/month...'},
{'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)? Normally, ...'}]
```
## Stream in a training loop
[`IterableDataset`] can be integrated into a training loop. First, shuffle the dataset:
<frameworkcontent>
<pt>
```py
>>> seed, buffer_size = 42, 10_000
>>> dataset = dataset.shuffle(seed, buffer_size=buffer_size)
```
Lastly, create a simple training loop and start training:
```py
>>> import torch
>>> from torch.utils.data import DataLoader
>>> from transformers import AutoModelForMaskedLM, DataCollatorForLanguageModeling
>>> from tqdm import tqdm
>>> dataset = dataset.with_format("torch")
>>> dataloader = DataLoader(dataset, collate_fn=DataCollatorForLanguageModeling(tokenizer))
>>> device = 'cuda' if torch.cuda.is_available() else 'cpu'
>>> model = AutoModelForMaskedLM.from_pretrained("distilbert-base-uncased")
>>> model.train().to(device)
>>> optimizer = torch.optim.AdamW(params=model.parameters(), lr=1e-5)
>>> for epoch in range(3):
... dataset.set_epoch(epoch)
... for i, batch in enumerate(tqdm(dataloader, total=5)):
... if i == 5:
... break
... batch = {k: v.to(device) for k, v in batch.items()}
... outputs = model(**batch)
... loss = outputs[0]
... loss.backward()
... optimizer.step()
... optimizer.zero_grad()
... if i % 10 == 0:
... print(f"loss: {loss}")
```
</pt>
</frameworkcontent>
<!-- TODO: Write the TF content! -->
| datasets/docs/source/stream.mdx/0 | {
"file_path": "datasets/docs/source/stream.mdx",
"repo_id": "datasets",
"token_count": 5324
} | 55 |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BLEU metric. """
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_CITATION = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
"""
_DESCRIPTION = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_KWARGS_DESCRIPTION = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Bleu(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"
),
}
),
codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"],
reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
],
)
def _compute(self, predictions, references, max_order=4, smooth=False):
score = compute_bleu(
reference_corpus=references, translation_corpus=predictions, max_order=max_order, smooth=smooth
)
(bleu, precisions, bp, ratio, translation_length, reference_length) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| datasets/metrics/bleu/bleu.py/0 | {
"file_path": "datasets/metrics/bleu/bleu.py",
"repo_id": "datasets",
"token_count": 2140
} | 56 |
# Metric Card for CUAD
## Metric description
This metric wraps the official scoring script for version 1 of the [Contract Understanding Atticus Dataset (CUAD)](https://huggingface.co/datasets/cuad), which is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
The CUAD metric computes several scores: [Exact Match](https://huggingface.co/metrics/exact_match), [F1 score](https://huggingface.co/metrics/f1), Area Under the Precision-Recall Curve, [Precision](https://huggingface.co/metrics/precision) at 80% [recall](https://huggingface.co/metrics/recall) and Precision at 90% recall.
## How to use
The CUAD metric takes two inputs :
`predictions`, a list of question-answer dictionaries with the following key-values:
- `id`: the id of the question-answer pair as given in the references.
- `prediction_text`: a list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction.
`references`: a list of question-answer dictionaries with the following key-values:
- `id`: the id of the question-answer pair (the same as above).
- `answers`: a dictionary *in the CUAD dataset format* with the following keys:
- `text`: a list of possible texts for the answer, as a list of strings.
- `answer_start`: a list of start positions for the answer, as a list of ints.
Note that `answer_start` values are not taken into account to compute the metric.
```python
from datasets import load_metric
cuad_metric = load_metric("cuad")
predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
results = cuad_metric.compute(predictions=predictions, references=references)
```
## Output values
The output of the CUAD metric consists of a dictionary that contains one or several of the following metrics:
`exact_match`: The normalized answers that exactly match the reference answer, with a range between 0.0 and 1.0 (see [exact match](https://huggingface.co/metrics/exact_match) for more information).
`f1`: The harmonic mean of the precision and recall (see [F1 score](https://huggingface.co/metrics/f1) for more information). Its range is between 0.0 and 1.0 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall.
`aupr`: The Area Under the Precision-Recall curve, with a range between 0.0 and 1.0, with a higher value representing both high recall and high precision, and a low value representing low values for both. See the [Wikipedia article](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve) for more information.
`prec_at_80_recall`: The fraction of true examples among the predicted examples at a recall rate of 80%. Its range is between 0.0 and 1.0. For more information, see [precision](https://huggingface.co/metrics/precision) and [recall](https://huggingface.co/metrics/recall).
`prec_at_90_recall`: The fraction of true examples among the predicted examples at a recall rate of 90%. Its range is between 0.0 and 1.0.
### Values from popular papers
The [original CUAD paper](https://arxiv.org/pdf/2103.06268.pdf) reports that a [DeBERTa model](https://huggingface.co/microsoft/deberta-base) attains
an AUPR of 47.8%, a Precision at 80% Recall of 44.0%, and a Precision at 90% Recall of 17.8% (they do not report F1 or Exact Match separately).
For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/cuad).
## Examples
Maximal values :
```python
from datasets import load_metric
cuad_metric = load_metric("cuad")
predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
results = cuad_metric.compute(predictions=predictions, references=references)
print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
```
Minimal values:
```python
from datasets import load_metric
cuad_metric = load_metric("cuad")
predictions = [{'prediction_text': ['The Company appoints the Distributor as an exclusive distributor of Products in the Market, subject to the terms and conditions of this Agreement.'], 'id': 'LIMEENERGYCO_09_09_1999-EX-10-DISTRIBUTOR AGREEMENT__Exclusivity_0'}]
references = [{'answers': {'answer_start': [143], 'text': 'The seller'}, 'id': 'LIMEENERGYCO_09_09_1999-EX-10-DISTRIBUTOR AGREEMENT__Exclusivity_0'}]
results = cuad_metric.compute(predictions=predictions, references=references)
print(results)
{'exact_match': 0.0, 'f1': 0.0, 'aupr': 0.0, 'prec_at_80_recall': 0, 'prec_at_90_recall': 0}
```
Partial match:
```python
from datasets import load_metric
cuad_metric = load_metric("cuad")
predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
predictions = [{'prediction_text': ['The Company appoints the Distributor as an exclusive distributor of Products in the Market, subject to the terms and conditions of this Agreement.', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
results = cuad_metric.compute(predictions=predictions, references=references)
print(results)
{'exact_match': 100.0, 'f1': 50.0, 'aupr': 0.0, 'prec_at_80_recall': 0, 'prec_at_90_recall': 0}
```
## Limitations and bias
This metric works only with datasets that have the same format as the [CUAD dataset](https://huggingface.co/datasets/cuad). The limitations of the biases of this dataset are not discussed, but could exhibit annotation bias given the homogeneity of annotators for this dataset.
In terms of the metric itself, the accuracy of AUPR has been debated because its estimates are quite noisy and because of the fact that reducing the Precision-Recall Curve to a single number ignores the fact that it is about the tradeoffs between the different systems or performance points plotted and not the performance of an individual system. Reporting the original F1 and exact match scores is therefore useful to ensure a more complete representation of system performance.
## Citation
```bibtex
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
```
## Further References
- [CUAD dataset homepage](https://www.atticusprojectai.org/cuad-v1-performance-announcements)
| datasets/metrics/cuad/README.md/0 | {
"file_path": "datasets/metrics/cuad/README.md",
"repo_id": "datasets",
"token_count": 2380
} | 57 |
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAE - Mean Absolute Error Metric"""
from sklearn.metrics import mean_absolute_error
import datasets
_CITATION = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_DESCRIPTION = """\
Mean Absolute Error (MAE) is the mean of the magnitude of difference between the predicted and actual
values.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
Returns:
mae : mean absolute error.
If multioutput is "raw_values", then mean absolute error is returned for each output separately. If multioutput is "uniform_average" or an ndarray of weights, then the weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples:
>>> mae_metric = datasets.load_metric("mae")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mae_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mae': 0.5}
If you're using multi-dimensional lists, then set the config as follows :
>>> mae_metric = datasets.load_metric("mae", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mae_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mae': 0.75}
>>> results = mae_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results)
{'mae': array([0.5, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mae(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(self._get_feature_types()),
reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html"
],
)
def _get_feature_types(self):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float")),
"references": datasets.Sequence(datasets.Value("float")),
}
else:
return {
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
def _compute(self, predictions, references, sample_weight=None, multioutput="uniform_average"):
mae_score = mean_absolute_error(references, predictions, sample_weight=sample_weight, multioutput=multioutput)
return {"mae": mae_score}
| datasets/metrics/mae/mae.py/0 | {
"file_path": "datasets/metrics/mae/mae.py",
"repo_id": "datasets",
"token_count": 1662
} | 58 |
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perplexity Metric."""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_CITATION = """\
"""
_DESCRIPTION = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_KWARGS_DESCRIPTION = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Perplexity(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"input_texts": datasets.Value("string"),
}
),
reference_urls=["https://huggingface.co/docs/transformers/perplexity"],
)
def _compute(self, input_texts, model_id, batch_size: int = 16, add_start_token: bool = True, device=None):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
device = "cuda"
else:
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained(model_id)
model = model.to(device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
existing_special_tokens = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(existing_special_tokens) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
max_tokenized_len = model.config.max_length - 1
else:
max_tokenized_len = model.config.max_length
encodings = tokenizer(
input_texts,
add_special_tokens=False,
padding=True,
truncation=True,
max_length=max_tokenized_len,
return_tensors="pt",
return_attention_mask=True,
).to(device)
encoded_texts = encodings["input_ids"]
attn_masks = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1), 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1), 2)
), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
ppls = []
loss_fct = CrossEntropyLoss(reduction="none")
for start_index in logging.tqdm(range(0, len(encoded_texts), batch_size)):
end_index = min(start_index + batch_size, len(encoded_texts))
encoded_batch = encoded_texts[start_index:end_index]
attn_mask = attn_masks[start_index:end_index]
if add_start_token:
bos_tokens_tensor = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(device)
encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1)
attn_mask = torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to(device), attn_mask], dim=1
)
labels = encoded_batch
with torch.no_grad():
out_logits = model(encoded_batch, attention_mask=attn_mask).logits
shift_logits = out_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_attention_mask_batch = attn_mask[..., 1:].contiguous()
perplexity_batch = torch.exp2(
(loss_fct(shift_logits.transpose(1, 2), shift_labels) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1)
)
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(ppls)}
| datasets/metrics/perplexity/perplexity.py/0 | {
"file_path": "datasets/metrics/perplexity/perplexity.py",
"repo_id": "datasets",
"token_count": 3550
} | 59 |
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spearman correlation coefficient metric."""
from scipy.stats import spearmanr
import datasets
_DESCRIPTION = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_CITATION = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Spearmanr(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
),
reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"],
)
def _compute(self, predictions, references, return_pvalue=False):
results = spearmanr(references, predictions)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| datasets/metrics/spearmanr/spearmanr.py/0 | {
"file_path": "datasets/metrics/spearmanr/spearmanr.py",
"repo_id": "datasets",
"token_count": 1942
} | 60 |
# Metric Card for XNLI
## Metric description
The XNLI metric allows to evaluate a model's score on the [XNLI dataset](https://huggingface.co/datasets/xnli), which is a subset of a few thousand examples from the [MNLI dataset](https://huggingface.co/datasets/glue/viewer/mnli) that have been translated into a 14 different languages, some of which are relatively low resource such as Swahili and Urdu.
As with MNLI, the task is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels).
## How to use
The XNLI metric is computed based on the `predictions` (a list of predicted labels) and the `references` (a list of ground truth labels).
```python
from datasets import load_metric
xnli_metric = load_metric("xnli")
predictions = [0, 1]
references = [0, 1]
results = xnli_metric.compute(predictions=predictions, references=references)
```
## Output values
The output of the XNLI metric is simply the `accuracy`, i.e. the proportion of correct predictions among the total number of cases processed, with a range between 0 and 1 (see [accuracy](https://huggingface.co/metrics/accuracy) for more information).
### Values from popular papers
The [original XNLI paper](https://arxiv.org/pdf/1809.05053.pdf) reported accuracies ranging from 59.3 (for `ur`) to 73.7 (for `en`) for the BiLSTM-max model.
For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/xnli).
## Examples
Maximal values:
```python
>>> from datasets import load_metric
>>> xnli_metric = load_metric("xnli")
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
```
Minimal values:
```python
>>> from datasets import load_metric
>>> xnli_metric = load_metric("xnli")
>>> predictions = [1, 0]
>>> references = [0, 1]
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 0.0}
```
Partial match:
```python
>>> from datasets import load_metric
>>> xnli_metric = load_metric("xnli")
>>> predictions = [1, 0, 1]
>>> references = [1, 0, 0]
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 0.6666666666666666}
```
## Limitations and bias
While accuracy alone does give a certain indication of performance, it can be supplemented by error analysis and a better understanding of the model's mistakes on each of the categories represented in the dataset, especially if they are unbalanced.
While the XNLI dataset is multilingual and represents a diversity of languages, in reality, cross-lingual sentence understanding goes beyond translation, given that there are many cultural differences that have an impact on human sentiment annotations. Since the XNLI dataset was obtained by translation based on English sentences, it does not capture these cultural differences.
## Citation
```bibtex
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
```
## Further References
- [XNI Dataset GitHub](https://github.com/facebookresearch/XNLI)
- [HuggingFace Tasks -- Text Classification](https://huggingface.co/tasks/text-classification)
| datasets/metrics/xnli/README.md/0 | {
"file_path": "datasets/metrics/xnli/README.md",
"repo_id": "datasets",
"token_count": 1226
} | 61 |
#!/usr/bin/env python
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def parse_unknown_args(unknown_args):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
def main():
parser = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
)
commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
TestCommand.register_subcommand(commands_parser)
RunBeamCommand.register_subcommand(commands_parser)
DummyDataCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
kwargs = parse_unknown_args(unknown_args)
# Run
service = args.func(args, **kwargs)
service.run()
if __name__ == "__main__":
main()
| datasets/src/datasets/commands/datasets_cli.py/0 | {
"file_path": "datasets/src/datasets/commands/datasets_cli.py",
"repo_id": "datasets",
"token_count": 473
} | 62 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.download_config import DownloadConfig
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Audio:
"""Audio [`Feature`] to extract audio data from an audio file.
Input: The Audio feature accepts as input:
- A `str`: Absolute path to the audio file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `bytes`: Bytes content of the audio file.
This is useful for archived files with sequential access.
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `array`: Array containing the audio sample
- `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
This is useful for archived files with sequential access.
Args:
sampling_rate (`int`, *optional*):
Target sampling rate. If `None`, the native sampling rate is used.
mono (`bool`, defaults to `True`):
Whether to convert the audio signal to mono by averaging samples across
channels.
decode (`bool`, defaults to `True`):
Whether to decode the audio data. If `False`,
returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
Example:
```py
>>> from datasets import load_dataset, Audio
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
>>> ds[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
"""
sampling_rate: Optional[int] = None
mono: bool = True
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Audio", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str` or `dict`):
Data passed as input to Audio feature.
Returns:
`dict`
"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
if isinstance(value, str):
return {"bytes": None, "path": value}
elif isinstance(value, bytes):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
buffer = BytesIO()
sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm"):
# "PCM" only has raw audio bytes
if value.get("sampling_rate") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
if value.get("bytes"):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
else:
bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
buffer = BytesIO(bytes())
sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
) -> dict:
"""Decode example audio file into audio data.
Args:
value (`dict`):
A dictionary with keys:
- `path`: String with relative audio file path.
- `bytes`: Bytes of the audio file.
token_per_repo_id (`dict`, *optional*):
To access and decode
audio files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`)
Returns:
`dict`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
if file is None:
token_per_repo_id = token_per_repo_id or {}
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id[repo_id]
except (ValueError, KeyError):
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
array, sampling_rate = sf.read(f)
else:
array, sampling_rate = sf.read(file)
array = array.T
if self.mono:
array = librosa.to_mono(array)
if self.sampling_rate and self.sampling_rate != sampling_rate:
array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
sampling_rate = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature.")
return {
"bytes": Value("binary"),
"path": Value("string"),
}
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
"""Cast an Arrow array to the Audio arrow storage type.
The Arrow types that can be converted to the Audio pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the audio bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
Args:
storage (`Union[pa.StringArray, pa.StructArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed audio files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type)
| datasets/src/datasets/features/audio.py/0 | {
"file_path": "datasets/src/datasets/features/audio.py",
"repo_id": "datasets",
"token_count": 5335
} | 63 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
| datasets/src/datasets/packaged_modules/arrow/arrow.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/arrow/arrow.py",
"repo_id": "datasets",
"token_count": 1473
} | 64 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table)
| datasets/src/datasets/packaged_modules/pandas/pandas.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/pandas/pandas.py",
"repo_id": "datasets",
"token_count": 941
} | 65 |
import importlib
import inspect
from functools import wraps
from typing import TYPE_CHECKING, Optional
from .download.download_config import DownloadConfig
from .download.streaming_download_manager import (
xbasename,
xdirname,
xet_parse,
xexists,
xgetsize,
xglob,
xgzip_open,
xisdir,
xisfile,
xjoin,
xlistdir,
xnumpy_load,
xopen,
xpandas_read_csv,
xpandas_read_excel,
xPath,
xpyarrow_parquet_read_table,
xrelpath,
xsio_loadmat,
xsplit,
xsplitext,
xwalk,
xxml_dom_minidom_parse,
)
from .utils.logging import get_logger
from .utils.patching import patch_submodule
from .utils.py_utils import get_imports
logger = get_logger(__name__)
if TYPE_CHECKING:
from .builder import DatasetBuilder
def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None):
"""Extend the module to support streaming.
We patch some functions in the module to use `fsspec` to support data streaming:
- We use `fsspec.open` to open and read remote files. We patch the module function:
- `open`
- We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module
functions:
- `os.path.join`
- `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator)
The patched functions are replaced with custom functions defined to work with the
:class:`~download.streaming_download_manager.StreamingDownloadManager`.
Args:
module_path: Path to the module to be extended.
download_config : mainly use use_auth_token or storage_options to support different platforms and auth types.
"""
module = importlib.import_module(module_path)
# TODO(QL): always update the module to add subsequent new authentication without removing old ones
if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming:
if isinstance(module._patched_for_streaming, DownloadConfig):
module._patched_for_streaming.token = download_config.token
module._patched_for_streaming.storage_options = download_config.storage_options
return
def wrap_auth(function):
@wraps(function)
def wrapper(*args, **kwargs):
return function(*args, download_config=download_config, **kwargs)
wrapper._decorator_name_ = "wrap_auth"
return wrapper
# open files in a streaming fashion
patch_submodule(module, "open", wrap_auth(xopen)).start()
patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start()
patch_submodule(module, "os.walk", wrap_auth(xwalk)).start()
patch_submodule(module, "glob.glob", wrap_auth(xglob)).start()
# allow to navigate in remote zip files
patch_submodule(module, "os.path.join", xjoin).start()
patch_submodule(module, "os.path.dirname", xdirname).start()
patch_submodule(module, "os.path.basename", xbasename).start()
patch_submodule(module, "os.path.relpath", xrelpath).start()
patch_submodule(module, "os.path.split", xsplit).start()
patch_submodule(module, "os.path.splitext", xsplitext).start()
# allow checks on paths
patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start()
patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start()
patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start()
patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start()
patch_submodule(module, "pathlib.Path", xPath).start()
# file readers
patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start()
patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start()
patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start()
patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start()
patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start()
patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start()
patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start()
# pyarrow: do not patch pyarrow attribute in packaged modules
if not module.__name__.startswith("datasets.packaged_modules."):
patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start()
module._patched_for_streaming = download_config
def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"):
"""Extend the dataset builder module and the modules imported by it to support streaming.
Args:
builder (:class:`DatasetBuilder`): Dataset builder instance.
"""
# this extends the open and os.path.join functions for data streaming
download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token)
extend_module_for_streaming(builder.__module__, download_config=download_config)
# if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils)
if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv
for imports in get_imports(inspect.getfile(builder.__class__)):
if imports[0] == "internal":
internal_import_name = imports[1]
internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name])
extend_module_for_streaming(internal_module_name, download_config=download_config)
# builders can inherit from other builders that might use streaming functionality
# (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation)
# but these parents builders are not patched automatically as they are not instantiated, so we patch them here
from .builder import DatasetBuilder
parent_builder_modules = [
cls.__module__
for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched
if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__
] # check it's not a standard builder from datasets.builder
for module in parent_builder_modules:
extend_module_for_streaming(module, download_config=download_config)
| datasets/src/datasets/streaming.py/0 | {
"file_path": "datasets/src/datasets/streaming.py",
"repo_id": "datasets",
"token_count": 2300
} | 66 |
import enum
import inspect
import warnings
from functools import wraps
from typing import Callable, Optional
from .logging import get_logger
_emitted_deprecation_warnings = set()
logger = get_logger(__name__)
def deprecated(help_message: Optional[str] = None):
"""Decorator to mark a class or a function as deprecated.
Args:
help_message (:obj:`str`, optional): An optional message to guide the user on how to
switch to non-deprecated usage of the library.
"""
def decorator(deprecated_class_or_function: Callable):
global _emitted_deprecation_warnings
if inspect.isclass(deprecated_class_or_function):
deprecated_function = deprecated_class_or_function.__init__
name = deprecated_class_or_function.__name__
else:
deprecated_function = deprecated_class_or_function
name = deprecated_function.__name__
# Support deprecating __init__ class method: class name instead
name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2]
warning_msg = (
f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}"
if help_message
else ""
)
@wraps(deprecated_function)
def wrapper(*args, **kwargs):
func_hash = hash(deprecated_function)
if func_hash not in _emitted_deprecation_warnings:
warnings.warn(warning_msg, category=FutureWarning, stacklevel=2)
_emitted_deprecation_warnings.add(func_hash)
return deprecated_function(*args, **kwargs)
wrapper._decorator_name_ = "deprecated"
if inspect.isclass(deprecated_class_or_function):
deprecated_class_or_function.__init__ = wrapper
return deprecated_class_or_function
else:
return wrapper
return decorator
class OnAccess(enum.EnumMeta):
"""
Enum metaclass that calls a user-specified function whenever a member is accessed.
"""
def __getattribute__(cls, name):
obj = super().__getattribute__(name)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
def __getitem__(cls, name):
member = super().__getitem__(name)
if member._on_access:
member._on_access()
return member
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
class DeprecatedEnum(enum.Enum, metaclass=OnAccess):
"""
Enum class that calls `deprecate` method whenever a member is accessed.
"""
def __new__(cls, value):
member = object.__new__(cls)
member._value_ = value
member._on_access = member.deprecate
return member
@property
def help_message(self):
return ""
def deprecate(self):
help_message = f" {self.help_message}" if self.help_message else ""
warnings.warn(
f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets."
+ help_message,
FutureWarning,
stacklevel=3,
)
| datasets/src/datasets/utils/deprecation_utils.py/0 | {
"file_path": "datasets/src/datasets/utils/deprecation_utils.py",
"repo_id": "datasets",
"token_count": 1426
} | 67 |
{
"code": "Programming language (C++, Java, Javascript, Python, etc.)",
"aa": "Afar",
"aaa": "Ghotuo",
"aab": "Alumu-Tesu",
"aac": "Ari",
"aad": "Amal",
"aae": "Arbëreshë Albanian",
"aaf": "Aranadan",
"aag": "Ambrak",
"aah": "Abu' Arapesh",
"aai": "Arifama-Miniafia",
"aak": "Ankave",
"aal": "Afade",
"aan": "Anambé",
"aao": "Algerian Saharan Arabic",
"aap": "Pará Arára",
"aaq": "Eastern Abnaki",
"aas": "Aasáx",
"aat": "Arvanitika Albanian",
"aau": "Abau",
"aav": "Austro-Asiatic languages",
"aaw": "Solong",
"aax": "Mandobo Atas",
"aaz": "Amarasi",
"ab": "Abkhazian",
"aba": "Abé",
"abb": "Bankon",
"abc": "Ambala Ayta",
"abd": "Manide",
"abe": "Western Abnaki",
"abf": "Abai Sungai",
"abg": "Abaga",
"abh": "Tajiki Arabic",
"abi": "Abidji",
"abj": "Aka-Bea",
"abl": "Lampung Nyo",
"abm": "Abanyom",
"abn": "Abua",
"abo": "Abon",
"abp": "Abellen Ayta",
"abq": "Abaza",
"abr": "Abron",
"abs": "Ambonese Malay",
"abt": "Ambulas",
"abu": "Abure",
"abv": "Baharna Arabic",
"abw": "Pal",
"abx": "Inabaknon",
"aby": "Aneme Wake",
"abz": "Abui",
"aca": "Achagua",
"acb": "Áncá",
"acd": "Gikyode",
"ace": "Achinese",
"acf": "Saint Lucian Creole French",
"ach": "Acoli",
"aci": "Aka-Cari",
"ack": "Aka-Kora",
"acl": "Akar-Bale",
"acm": "Mesopotamian Arabic",
"acn": "Achang",
"acp": "Eastern Acipa",
"acq": "Ta'izzi-Adeni Arabic",
"acr": "Achi",
"acs": "Acroá",
"act": "Achterhoeks",
"acu": "Achuar-Shiwiar",
"acv": "Achumawi",
"acw": "Hijazi Arabic",
"acx": "Omani Arabic",
"acy": "Cypriot Arabic",
"acz": "Acheron",
"ada": "Adangme",
"adb": "Atauran",
"add": "Lidzonka; Dzodinka",
"ade": "Adele",
"adf": "Dhofari Arabic",
"adg": "Andegerebinha",
"adh": "Adhola",
"adi": "Adi",
"adj": "Adioukrou",
"adl": "Galo",
"adn": "Adang",
"ado": "Abu",
"adq": "Adangbe",
"adr": "Adonara",
"ads": "Adamorobe Sign Language",
"adt": "Adnyamathanha",
"adu": "Aduge",
"adw": "Amundava",
"adx": "Amdo Tibetan",
"ady": "Adyghe; Adygei",
"adz": "Adzera",
"ae": "Avestan",
"aea": "Areba",
"aeb": "Tunisian Arabic",
"aec": "Saidi Arabic",
"aed": "Argentine Sign Language",
"aee": "Northeast Pashai; Northeast Pashayi",
"aek": "Haeke",
"ael": "Ambele",
"aem": "Arem",
"aen": "Armenian Sign Language",
"aeq": "Aer",
"aer": "Eastern Arrernte",
"aes": "Alsea",
"aeu": "Akeu",
"aew": "Ambakich",
"aey": "Amele",
"aez": "Aeka",
"af": "Afrikaans",
"afa": "Afro-Asiatic languages",
"afb": "Gulf Arabic",
"afd": "Andai",
"afe": "Putukwam",
"afg": "Afghan Sign Language",
"afh": "Afrihili",
"afi": "Akrukay; Chini",
"afk": "Nanubae",
"afn": "Defaka",
"afo": "Eloyi",
"afp": "Tapei",
"afs": "Afro-Seminole Creole",
"aft": "Afitti",
"afu": "Awutu",
"afz": "Obokuitai",
"aga": "Aguano",
"agb": "Legbo",
"agc": "Agatu",
"agd": "Agarabi",
"age": "Angal",
"agf": "Arguni",
"agg": "Angor",
"agh": "Ngelima",
"agi": "Agariya",
"agj": "Argobba",
"agk": "Isarog Agta",
"agl": "Fembe",
"agm": "Angaataha",
"agn": "Agutaynen",
"ago": "Tainae",
"agq": "Aghem",
"agr": "Aguaruna",
"ags": "Esimbi",
"agt": "Central Cagayan Agta",
"agu": "Aguacateco",
"agv": "Remontado Dumagat",
"agw": "Kahua",
"agx": "Aghul",
"agy": "Southern Alta",
"agz": "Mt. Iriga Agta",
"aha": "Ahanta",
"ahb": "Axamb",
"ahg": "Qimant",
"ahh": "Aghu",
"ahi": "Tiagbamrin Aizi",
"ahk": "Akha",
"ahl": "Igo",
"ahm": "Mobumrin Aizi",
"ahn": "Àhàn",
"aho": "Ahom",
"ahp": "Aproumu Aizi",
"ahr": "Ahirani",
"ahs": "Ashe",
"aht": "Ahtena",
"aia": "Arosi",
"aib": "Ainu (China)",
"aic": "Ainbai",
"aid": "Alngith",
"aie": "Amara",
"aif": "Agi",
"aig": "Antigua and Barbuda Creole English",
"aih": "Ai-Cham",
"aii": "Assyrian Neo-Aramaic",
"aij": "Lishanid Noshan",
"aik": "Ake",
"ail": "Aimele",
"aim": "Aimol",
"ain": "Ainu (Japan)",
"aio": "Aiton",
"aip": "Burumakok",
"aiq": "Aimaq",
"air": "Airoran",
"ait": "Arikem",
"aiw": "Aari",
"aix": "Aighon",
"aiy": "Ali",
"aja": "Aja (South Sudan)",
"ajg": "Aja (Benin)",
"aji": "Ajië",
"ajn": "Andajin",
"ajp": "South Levantine Arabic",
"ajs": "Algerian Jewish Sign Language",
"aju": "Judeo-Moroccan Arabic",
"ajw": "Ajawa",
"ajz": "Amri Karbi",
"ak": "Akan",
"akb": "Batak Angkola",
"akc": "Mpur",
"akd": "Ukpet-Ehom",
"ake": "Akawaio",
"akf": "Akpa",
"akg": "Anakalangu",
"akh": "Angal Heneng",
"aki": "Aiome",
"akj": "Aka-Jeru",
"akk": "Akkadian",
"akl": "Aklanon",
"akm": "Aka-Bo",
"ako": "Akurio",
"akp": "Siwu",
"akq": "Ak",
"akr": "Araki",
"aks": "Akaselem",
"akt": "Akolet",
"aku": "Akum",
"akv": "Akhvakh",
"akw": "Akwa",
"akx": "Aka-Kede",
"aky": "Aka-Kol",
"akz": "Alabama",
"ala": "Alago",
"alc": "Qawasqar",
"ald": "Alladian",
"ale": "Aleut",
"alf": "Alege",
"alg": "Algonquian languages",
"alh": "Alawa",
"ali": "Amaimon",
"alj": "Alangan",
"alk": "Alak",
"all": "Allar",
"alm": "Amblong",
"aln": "Gheg Albanian",
"alo": "Larike-Wakasihu",
"alp": "Alune",
"alq": "Algonquin",
"alr": "Alutor",
"als": "Tosk Albanian",
"alt": "Southern Altai",
"alu": "'Are'are",
"alv": "Atlantic-Congo languages",
"alw": "Alaba-K’abeena; Wanbasana",
"alx": "Amol",
"aly": "Alyawarr",
"alz": "Alur",
"am": "Amharic",
"ama": "Amanayé",
"amb": "Ambo",
"amc": "Amahuaca",
"ame": "Yanesha'",
"amf": "Hamer-Banna",
"amg": "Amurdak",
"ami": "Amis",
"amj": "Amdang",
"amk": "Ambai",
"aml": "War-Jaintia",
"amm": "Ama (Papua New Guinea)",
"amn": "Amanab",
"amo": "Amo",
"amp": "Alamblak",
"amq": "Amahai",
"amr": "Amarakaeri",
"ams": "Southern Amami-Oshima",
"amt": "Amto",
"amu": "Guerrero Amuzgo",
"amv": "Ambelau",
"amw": "Western Neo-Aramaic",
"amx": "Anmatyerre",
"amy": "Ami",
"amz": "Atampaya",
"an": "Aragonese",
"ana": "Andaqui",
"anb": "Andoa",
"anc": "Ngas",
"and": "Ansus",
"ane": "Xârâcùù",
"anf": "Animere",
"ang": "Old English (ca. 450-1100)",
"anh": "Nend",
"ani": "Andi",
"anj": "Anor",
"ank": "Goemai",
"anl": "Anu-Hkongso Chin",
"anm": "Anal",
"ann": "Obolo",
"ano": "Andoque",
"anp": "Angika",
"anq": "Jarawa (India)",
"anr": "Andh",
"ans": "Anserma",
"ant": "Antakarinya; Antikarinya",
"anu": "Anuak",
"anv": "Denya",
"anw": "Anaang",
"anx": "Andra-Hus",
"any": "Anyin",
"anz": "Anem",
"aoa": "Angolar",
"aob": "Abom",
"aoc": "Pemon",
"aod": "Andarum",
"aoe": "Angal Enen",
"aof": "Bragat",
"aog": "Angoram",
"aoi": "Anindilyakwa",
"aoj": "Mufian",
"aok": "Arhö",
"aol": "Alor",
"aom": "Ömie",
"aon": "Bumbita Arapesh",
"aor": "Aore",
"aos": "Taikat",
"aot": "Atong (India); A'tong",
"aou": "A'ou",
"aox": "Atorada",
"aoz": "Uab Meto",
"apa": "Apache languages",
"apb": "Sa'a",
"apc": "North Levantine Arabic",
"apd": "Sudanese Arabic",
"ape": "Bukiyip",
"apf": "Pahanan Agta",
"apg": "Ampanang",
"aph": "Athpariya",
"api": "Apiaká",
"apj": "Jicarilla Apache",
"apk": "Kiowa Apache",
"apl": "Lipan Apache",
"apm": "Mescalero-Chiricahua Apache",
"apn": "Apinayé",
"apo": "Ambul",
"app": "Apma",
"apq": "A-Pucikwar",
"apr": "Arop-Lokep",
"aps": "Arop-Sissano",
"apt": "Apatani",
"apu": "Apurinã",
"apv": "Alapmunte",
"apw": "Western Apache",
"apx": "Aputai",
"apy": "Apalaí",
"apz": "Safeyoka",
"aqa": "Alacalufan languages",
"aqc": "Archi",
"aqd": "Ampari Dogon",
"aqg": "Arigidi",
"aqk": "Aninka",
"aql": "Algic languages",
"aqm": "Atohwaim",
"aqn": "Northern Alta",
"aqp": "Atakapa",
"aqr": "Arhâ",
"aqt": "Angaité",
"aqz": "Akuntsu",
"ar": "Arabic",
"arb": "Standard Arabic",
"arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)",
"ard": "Arabana",
"are": "Western Arrarnta",
"arh": "Arhuaco",
"ari": "Arikara",
"arj": "Arapaso",
"ark": "Arikapú",
"arl": "Arabela",
"arn": "Mapudungun; Mapuche",
"aro": "Araona",
"arp": "Arapaho",
"arq": "Algerian Arabic",
"arr": "Karo (Brazil)",
"ars": "Najdi Arabic",
"art": "Artificial languages",
"aru": "Aruá (Amazonas State); Arawá",
"arv": "Arbore",
"arw": "Arawak",
"arx": "Aruá (Rodonia State)",
"ary": "Moroccan Arabic",
"arz": "Egyptian Arabic",
"as": "Assamese",
"asa": "Asu (Tanzania)",
"asb": "Assiniboine",
"asc": "Casuarina Coast Asmat",
"ase": "American Sign Language",
"asf": "Auslan; Australian Sign Language",
"asg": "Cishingini",
"ash": "Abishira",
"asi": "Buruwai",
"asj": "Sari",
"ask": "Ashkun",
"asl": "Asilulu",
"asn": "Xingú Asuriní",
"aso": "Dano",
"asp": "Algerian Sign Language",
"asq": "Austrian Sign Language",
"asr": "Asuri",
"ass": "Ipulo",
"ast": "Asturian; Asturleonese; Bable; Leonese",
"asu": "Tocantins Asurini",
"asv": "Asoa",
"asw": "Australian Aborigines Sign Language",
"asx": "Muratayak",
"asy": "Yaosakor Asmat",
"asz": "As",
"ata": "Pele-Ata",
"atb": "Zaiwa",
"atc": "Atsahuaca",
"atd": "Ata Manobo",
"ate": "Atemble",
"atg": "Ivbie North-Okpela-Arhe",
"ath": "Athapascan languages",
"ati": "Attié",
"atj": "Atikamekw",
"atk": "Ati",
"atl": "Mt. Iraya Agta",
"atm": "Ata",
"atn": "Ashtiani",
"ato": "Atong (Cameroon)",
"atp": "Pudtol Atta",
"atq": "Aralle-Tabulahan",
"atr": "Waimiri-Atroari",
"ats": "Gros Ventre",
"att": "Pamplona Atta",
"atu": "Reel",
"atv": "Northern Altai",
"atw": "Atsugewi",
"atx": "Arutani",
"aty": "Aneityum",
"atz": "Arta",
"aua": "Asumboa",
"aub": "Alugu",
"auc": "Waorani",
"aud": "Anuta",
"auf": "Arauan languages",
"aug": "Aguna",
"auh": "Aushi",
"aui": "Anuki",
"auj": "Awjilah",
"auk": "Heyo",
"aul": "Aulua",
"aum": "Asu (Nigeria)",
"aun": "Molmo One",
"auo": "Auyokawa",
"aup": "Makayam",
"auq": "Anus; Korur",
"aur": "Aruek",
"aus": "Australian languages",
"aut": "Austral",
"auu": "Auye",
"auw": "Awyi",
"aux": "Aurá",
"auy": "Awiyaana",
"auz": "Uzbeki Arabic",
"av": "Avaric",
"avb": "Avau",
"avd": "Alviri-Vidari",
"avi": "Avikam",
"avk": "Kotava",
"avl": "Eastern Egyptian Bedawi Arabic",
"avm": "Angkamuthi",
"avn": "Avatime",
"avo": "Agavotaguerra",
"avs": "Aushiri",
"avt": "Au",
"avu": "Avokaya",
"avv": "Avá-Canoeiro",
"awa": "Awadhi",
"awb": "Awa (Papua New Guinea)",
"awc": "Cicipu",
"awd": "Arawakan languages",
"awe": "Awetí",
"awg": "Anguthimri",
"awh": "Awbono",
"awi": "Aekyom",
"awk": "Awabakal",
"awm": "Arawum",
"awn": "Awngi",
"awo": "Awak",
"awr": "Awera",
"aws": "South Awyu",
"awt": "Araweté",
"awu": "Central Awyu",
"awv": "Jair Awyu",
"aww": "Awun",
"awx": "Awara",
"awy": "Edera Awyu",
"axb": "Abipon",
"axe": "Ayerrerenge",
"axg": "Mato Grosso Arára",
"axk": "Yaka (Central African Republic)",
"axl": "Lower Southern Aranda",
"axm": "Middle Armenian",
"axx": "Xârâgurè",
"ay": "Aymara",
"aya": "Awar",
"ayb": "Ayizo Gbe",
"ayc": "Southern Aymara",
"ayd": "Ayabadhu",
"aye": "Ayere",
"ayg": "Ginyanga",
"ayh": "Hadrami Arabic",
"ayi": "Leyigha",
"ayk": "Akuku",
"ayl": "Libyan Arabic",
"ayn": "Sanaani Arabic",
"ayo": "Ayoreo",
"ayp": "North Mesopotamian Arabic",
"ayq": "Ayi (Papua New Guinea)",
"ayr": "Central Aymara",
"ays": "Sorsogon Ayta",
"ayt": "Magbukun Ayta",
"ayu": "Ayu",
"ayz": "Mai Brat",
"az": "Azerbaijani",
"aza": "Azha",
"azb": "South Azerbaijani",
"azc": "Uto-Aztecan languages",
"azd": "Eastern Durango Nahuatl",
"azg": "San Pedro Amuzgos Amuzgo",
"azj": "North Azerbaijani",
"azm": "Ipalapa Amuzgo",
"azn": "Western Durango Nahuatl",
"azo": "Awing",
"azt": "Faire Atta",
"azz": "Highland Puebla Nahuatl",
"ba": "Bashkir",
"baa": "Babatana",
"bab": "Bainouk-Gunyuño",
"bac": "Badui",
"bad": "Banda languages",
"bae": "Baré",
"baf": "Nubaca",
"bag": "Tuki",
"bah": "Bahamas Creole English",
"bai": "Bamileke languages",
"baj": "Barakai",
"bal": "Baluchi",
"ban": "Balinese",
"bao": "Waimaha",
"bap": "Bantawa",
"bar": "Bavarian",
"bas": "Basa (Cameroon)",
"bat": "Baltic languages",
"bau": "Bada (Nigeria)",
"bav": "Vengo",
"baw": "Bambili-Bambui",
"bax": "Bamun",
"bay": "Batuley",
"bba": "Baatonum",
"bbb": "Barai",
"bbc": "Batak Toba",
"bbd": "Bau",
"bbe": "Bangba",
"bbf": "Baibai",
"bbg": "Barama",
"bbh": "Bugan",
"bbi": "Barombi",
"bbj": "Ghomálá'",
"bbk": "Babanki",
"bbl": "Bats",
"bbm": "Babango",
"bbn": "Uneapa",
"bbo": "Northern Bobo Madaré; Konabéré",
"bbp": "West Central Banda",
"bbq": "Bamali",
"bbr": "Girawa",
"bbs": "Bakpinka",
"bbt": "Mburku",
"bbu": "Kulung (Nigeria)",
"bbv": "Karnai",
"bbw": "Baba",
"bbx": "Bubia",
"bby": "Befang",
"bca": "Central Bai",
"bcb": "Bainouk-Samik",
"bcc": "Southern Balochi",
"bcd": "North Babar",
"bce": "Bamenyam",
"bcf": "Bamu",
"bcg": "Baga Pokur",
"bch": "Bariai",
"bci": "Baoulé",
"bcj": "Bardi",
"bck": "Bunuba",
"bcl": "Central Bikol",
"bcm": "Bannoni",
"bcn": "Bali (Nigeria)",
"bco": "Kaluli",
"bcp": "Bali (Democratic Republic of Congo)",
"bcq": "Bench",
"bcr": "Babine",
"bcs": "Kohumono",
"bct": "Bendi",
"bcu": "Awad Bing",
"bcv": "Shoo-Minda-Nye",
"bcw": "Bana",
"bcy": "Bacama",
"bcz": "Bainouk-Gunyaamolo",
"bda": "Bayot",
"bdb": "Basap",
"bdc": "Emberá-Baudó",
"bdd": "Bunama",
"bde": "Bade",
"bdf": "Biage",
"bdg": "Bonggi",
"bdh": "Baka (South Sudan)",
"bdi": "Burun",
"bdj": "Bai (South Sudan); Bai",
"bdk": "Budukh",
"bdl": "Indonesian Bajau",
"bdm": "Buduma",
"bdn": "Baldemu",
"bdo": "Morom",
"bdp": "Bende",
"bdq": "Bahnar",
"bdr": "West Coast Bajau",
"bds": "Burunge",
"bdt": "Bokoto",
"bdu": "Oroko",
"bdv": "Bodo Parja",
"bdw": "Baham",
"bdx": "Budong-Budong",
"bdy": "Bandjalang",
"bdz": "Badeshi",
"be": "Belarusian",
"bea": "Beaver",
"beb": "Bebele",
"bec": "Iceve-Maci",
"bed": "Bedoanas",
"bee": "Byangsi",
"bef": "Benabena",
"beg": "Belait",
"beh": "Biali",
"bei": "Bekati'",
"bej": "Beja; Bedawiyet",
"bek": "Bebeli",
"bem": "Bemba (Zambia)",
"beo": "Beami",
"bep": "Besoa",
"beq": "Beembe",
"ber": "Berber languages",
"bes": "Besme",
"bet": "Guiberoua Béte",
"beu": "Blagar",
"bev": "Daloa Bété",
"bew": "Betawi",
"bex": "Jur Modo",
"bey": "Beli (Papua New Guinea)",
"bez": "Bena (Tanzania)",
"bfa": "Bari",
"bfb": "Pauri Bareli",
"bfc": "Panyi Bai; Northern Bai",
"bfd": "Bafut",
"bfe": "Betaf; Tena",
"bff": "Bofi",
"bfg": "Busang Kayan",
"bfh": "Blafe",
"bfi": "British Sign Language",
"bfj": "Bafanji",
"bfk": "Ban Khor Sign Language",
"bfl": "Banda-Ndélé",
"bfm": "Mmen",
"bfn": "Bunak",
"bfo": "Malba Birifor",
"bfp": "Beba",
"bfq": "Badaga",
"bfr": "Bazigar",
"bfs": "Southern Bai",
"bft": "Balti",
"bfu": "Gahri",
"bfw": "Bondo",
"bfx": "Bantayanon",
"bfy": "Bagheli",
"bfz": "Mahasu Pahari",
"bg": "Bulgarian",
"bga": "Gwamhi-Wuri",
"bgb": "Bobongko",
"bgc": "Haryanvi",
"bgd": "Rathwi Bareli",
"bge": "Bauria",
"bgf": "Bangandu",
"bgg": "Bugun",
"bgi": "Giangan",
"bgj": "Bangolan",
"bgk": "Bit; Buxinhua",
"bgl": "Bo (Laos)",
"bgn": "Western Balochi",
"bgo": "Baga Koga",
"bgp": "Eastern Balochi",
"bgq": "Bagri",
"bgr": "Bawm Chin",
"bgs": "Tagabawa",
"bgt": "Bughotu",
"bgu": "Mbongno",
"bgv": "Warkay-Bipim",
"bgw": "Bhatri",
"bgx": "Balkan Gagauz Turkish",
"bgy": "Benggoi",
"bgz": "Banggai",
"bh": "Bihari languages",
"bha": "Bharia",
"bhb": "Bhili",
"bhc": "Biga",
"bhd": "Bhadrawahi",
"bhe": "Bhaya",
"bhf": "Odiai",
"bhg": "Binandere",
"bhh": "Bukharic",
"bhi": "Bhilali",
"bhj": "Bahing",
"bhl": "Bimin",
"bhm": "Bathari",
"bhn": "Bohtan Neo-Aramaic",
"bho": "Bhojpuri",
"bhp": "Bima",
"bhq": "Tukang Besi South",
"bhr": "Bara Malagasy",
"bhs": "Buwal",
"bht": "Bhattiyali",
"bhu": "Bhunjia",
"bhv": "Bahau",
"bhw": "Biak",
"bhx": "Bhalay",
"bhy": "Bhele",
"bhz": "Bada (Indonesia)",
"bi": "Bislama",
"bia": "Badimaya",
"bib": "Bissa; Bisa",
"bid": "Bidiyo",
"bie": "Bepour",
"bif": "Biafada",
"big": "Biangai",
"bik": "Bikol",
"bil": "Bile",
"bim": "Bimoba",
"bin": "Bini; Edo",
"bio": "Nai",
"bip": "Bila",
"biq": "Bipi",
"bir": "Bisorio",
"bit": "Berinomo",
"biu": "Biete",
"biv": "Southern Birifor",
"biw": "Kol (Cameroon)",
"bix": "Bijori",
"biy": "Birhor",
"biz": "Baloi",
"bja": "Budza",
"bjb": "Banggarla",
"bjc": "Bariji",
"bje": "Biao-Jiao Mien",
"bjf": "Barzani Jewish Neo-Aramaic",
"bjg": "Bidyogo",
"bjh": "Bahinemo",
"bji": "Burji",
"bjj": "Kanauji",
"bjk": "Barok",
"bjl": "Bulu (Papua New Guinea)",
"bjm": "Bajelani",
"bjn": "Banjar",
"bjo": "Mid-Southern Banda",
"bjp": "Fanamaket",
"bjr": "Binumarien",
"bjs": "Bajan",
"bjt": "Balanta-Ganja",
"bju": "Busuu",
"bjv": "Bedjond",
"bjw": "Bakwé",
"bjx": "Banao Itneg",
"bjy": "Bayali",
"bjz": "Baruga",
"bka": "Kyak",
"bkc": "Baka (Cameroon)",
"bkd": "Binukid; Talaandig",
"bkf": "Beeke",
"bkg": "Buraka",
"bkh": "Bakoko",
"bki": "Baki",
"bkj": "Pande",
"bkk": "Brokskat",
"bkl": "Berik",
"bkm": "Kom (Cameroon)",
"bkn": "Bukitan",
"bko": "Kwa'",
"bkp": "Boko (Democratic Republic of Congo)",
"bkq": "Bakairí",
"bkr": "Bakumpai",
"bks": "Northern Sorsoganon",
"bkt": "Boloki",
"bku": "Buhid",
"bkv": "Bekwarra",
"bkw": "Bekwel",
"bkx": "Baikeno",
"bky": "Bokyi",
"bkz": "Bungku",
"bla": "Siksika",
"blb": "Bilua",
"blc": "Bella Coola",
"bld": "Bolango",
"ble": "Balanta-Kentohe",
"blf": "Buol",
"blh": "Kuwaa",
"bli": "Bolia",
"blj": "Bolongan",
"blk": "Pa'o Karen; Pa'O",
"bll": "Biloxi",
"blm": "Beli (South Sudan)",
"bln": "Southern Catanduanes Bikol",
"blo": "Anii",
"blp": "Blablanga",
"blq": "Baluan-Pam",
"blr": "Blang",
"bls": "Balaesang",
"blt": "Tai Dam",
"blv": "Kibala; Bolo",
"blw": "Balangao",
"blx": "Mag-Indi Ayta",
"bly": "Notre",
"blz": "Balantak",
"bm": "Bambara",
"bma": "Lame",
"bmb": "Bembe",
"bmc": "Biem",
"bmd": "Baga Manduri",
"bme": "Limassa",
"bmf": "Bom-Kim",
"bmg": "Bamwe",
"bmh": "Kein",
"bmi": "Bagirmi",
"bmj": "Bote-Majhi",
"bmk": "Ghayavi",
"bml": "Bomboli",
"bmm": "Northern Betsimisaraka Malagasy",
"bmn": "Bina (Papua New Guinea)",
"bmo": "Bambalang",
"bmp": "Bulgebi",
"bmq": "Bomu",
"bmr": "Muinane",
"bms": "Bilma Kanuri",
"bmt": "Biao Mon",
"bmu": "Somba-Siawari",
"bmv": "Bum",
"bmw": "Bomwali",
"bmx": "Baimak",
"bmz": "Baramu",
"bn": "Bengali; Bangla",
"bna": "Bonerate",
"bnb": "Bookan",
"bnc": "Bontok",
"bnd": "Banda (Indonesia)",
"bne": "Bintauna",
"bnf": "Masiwang",
"bng": "Benga",
"bni": "Bangi",
"bnj": "Eastern Tawbuid",
"bnk": "Bierebo",
"bnl": "Boon",
"bnm": "Batanga",
"bnn": "Bunun",
"bno": "Bantoanon",
"bnp": "Bola",
"bnq": "Bantik",
"bnr": "Butmas-Tur",
"bns": "Bundeli",
"bnt": "Bantu languages",
"bnu": "Bentong",
"bnv": "Bonerif; Beneraf; Edwas",
"bnw": "Bisis",
"bnx": "Bangubangu",
"bny": "Bintulu",
"bnz": "Beezen",
"bo": "Tibetan",
"boa": "Bora",
"bob": "Aweer",
"boe": "Mundabli",
"bof": "Bolon",
"bog": "Bamako Sign Language",
"boh": "Boma",
"boi": "Barbareño",
"boj": "Anjam",
"bok": "Bonjo",
"bol": "Bole",
"bom": "Berom",
"bon": "Bine",
"boo": "Tiemacèwè Bozo",
"bop": "Bonkiman",
"boq": "Bogaya",
"bor": "Borôro",
"bot": "Bongo",
"bou": "Bondei",
"bov": "Tuwuli",
"bow": "Rema",
"box": "Buamu",
"boy": "Bodo (Central African Republic)",
"boz": "Tiéyaxo Bozo",
"bpa": "Daakaka",
"bpc": "Mbuk",
"bpd": "Banda-Banda",
"bpe": "Bauni",
"bpg": "Bonggo",
"bph": "Botlikh",
"bpi": "Bagupi",
"bpj": "Binji",
"bpk": "Orowe; 'Ôrôê",
"bpl": "Broome Pearling Lugger Pidgin",
"bpm": "Biyom",
"bpn": "Dzao Min",
"bpo": "Anasi",
"bpp": "Kaure",
"bpq": "Banda Malay",
"bpr": "Koronadal Blaan",
"bps": "Sarangani Blaan",
"bpt": "Barrow Point",
"bpu": "Bongu",
"bpv": "Bian Marind",
"bpw": "Bo (Papua New Guinea)",
"bpx": "Palya Bareli",
"bpy": "Bishnupriya",
"bpz": "Bilba",
"bqa": "Tchumbuli",
"bqb": "Bagusa",
"bqc": "Boko (Benin); Boo",
"bqd": "Bung",
"bqf": "Baga Kaloum",
"bqg": "Bago-Kusuntu",
"bqh": "Baima",
"bqi": "Bakhtiari",
"bqj": "Bandial",
"bqk": "Banda-Mbrès",
"bql": "Bilakura",
"bqm": "Wumboko",
"bqn": "Bulgarian Sign Language",
"bqo": "Balo",
"bqp": "Busa",
"bqq": "Biritai",
"bqr": "Burusu",
"bqs": "Bosngun",
"bqt": "Bamukumbit",
"bqu": "Boguru",
"bqv": "Koro Wachi; Begbere-Ejar",
"bqw": "Buru (Nigeria)",
"bqx": "Baangi",
"bqy": "Bengkala Sign Language",
"bqz": "Bakaka",
"br": "Breton",
"bra": "Braj",
"brb": "Brao; Lave",
"brc": "Berbice Creole Dutch",
"brd": "Baraamu",
"brf": "Bira",
"brg": "Baure",
"brh": "Brahui",
"bri": "Mokpwe",
"brj": "Bieria",
"brk": "Birked",
"brl": "Birwa",
"brm": "Barambu",
"brn": "Boruca",
"bro": "Brokkat",
"brp": "Barapasi",
"brq": "Breri",
"brr": "Birao",
"brs": "Baras",
"brt": "Bitare",
"bru": "Eastern Bru",
"brv": "Western Bru",
"brw": "Bellari",
"brx": "Bodo (India)",
"bry": "Burui",
"brz": "Bilbil",
"bs": "Bosnian",
"bsa": "Abinomn",
"bsb": "Brunei Bisaya",
"bsc": "Bassari; Oniyan",
"bse": "Wushi",
"bsf": "Bauchi",
"bsg": "Bashkardi",
"bsh": "Kati",
"bsi": "Bassossi",
"bsj": "Bangwinji",
"bsk": "Burushaski",
"bsl": "Basa-Gumna",
"bsm": "Busami",
"bsn": "Barasana-Eduria",
"bso": "Buso",
"bsp": "Baga Sitemu",
"bsq": "Bassa",
"bsr": "Bassa-Kontagora",
"bss": "Akoose",
"bst": "Basketo",
"bsu": "Bahonsuai",
"bsv": "Baga Sobané",
"bsw": "Baiso",
"bsx": "Yangkam",
"bsy": "Sabah Bisaya",
"bta": "Bata",
"btc": "Bati (Cameroon)",
"btd": "Batak Dairi",
"bte": "Gamo-Ningi",
"btf": "Birgit",
"btg": "Gagnoa Bété",
"bth": "Biatah Bidayuh",
"bti": "Burate",
"btj": "Bacanese Malay",
"btk": "Batak languages",
"btm": "Batak Mandailing",
"btn": "Ratagnon",
"bto": "Rinconada Bikol",
"btp": "Budibud",
"btq": "Batek",
"btr": "Baetora",
"bts": "Batak Simalungun",
"btt": "Bete-Bendi",
"btu": "Batu",
"btv": "Bateri",
"btw": "Butuanon",
"btx": "Batak Karo",
"bty": "Bobot",
"btz": "Batak Alas-Kluet",
"bua": "Buriat",
"bub": "Bua",
"buc": "Bushi",
"bud": "Ntcham",
"bue": "Beothuk",
"buf": "Bushoong",
"bug": "Buginese",
"buh": "Younuo Bunu",
"bui": "Bongili",
"buj": "Basa-Gurmana",
"buk": "Bugawac",
"bum": "Bulu (Cameroon)",
"bun": "Sherbro",
"buo": "Terei",
"bup": "Busoa",
"buq": "Brem",
"bus": "Bokobaru",
"but": "Bungain",
"buu": "Budu",
"buv": "Bun",
"buw": "Bubi",
"bux": "Boghom",
"buy": "Bullom So",
"buz": "Bukwen",
"bva": "Barein",
"bvb": "Bube",
"bvc": "Baelelea",
"bvd": "Baeggu",
"bve": "Berau Malay",
"bvf": "Boor",
"bvg": "Bonkeng",
"bvh": "Bure",
"bvi": "Belanda Viri",
"bvj": "Baan",
"bvk": "Bukat",
"bvl": "Bolivian Sign Language",
"bvm": "Bamunka",
"bvn": "Buna",
"bvo": "Bolgo",
"bvp": "Bumang",
"bvq": "Birri",
"bvr": "Burarra",
"bvt": "Bati (Indonesia)",
"bvu": "Bukit Malay",
"bvv": "Baniva",
"bvw": "Boga",
"bvx": "Dibole",
"bvy": "Baybayanon",
"bvz": "Bauzi",
"bwa": "Bwatoo",
"bwb": "Namosi-Naitasiri-Serua",
"bwc": "Bwile",
"bwd": "Bwaidoka",
"bwe": "Bwe Karen",
"bwf": "Boselewa",
"bwg": "Barwe",
"bwh": "Bishuo",
"bwi": "Baniwa",
"bwj": "Láá Láá Bwamu",
"bwk": "Bauwaki",
"bwl": "Bwela",
"bwm": "Biwat",
"bwn": "Wunai Bunu",
"bwo": "Boro (Ethiopia); Borna (Ethiopia)",
"bwp": "Mandobo Bawah",
"bwq": "Southern Bobo Madaré",
"bwr": "Bura-Pabir",
"bws": "Bomboma",
"bwt": "Bafaw-Balong",
"bwu": "Buli (Ghana)",
"bww": "Bwa",
"bwx": "Bu-Nao Bunu",
"bwy": "Cwi Bwamu",
"bwz": "Bwisi",
"bxa": "Tairaha",
"bxb": "Belanda Bor",
"bxc": "Molengue",
"bxd": "Pela",
"bxe": "Birale",
"bxf": "Bilur; Minigir",
"bxg": "Bangala",
"bxh": "Buhutu",
"bxi": "Pirlatapa",
"bxj": "Bayungu",
"bxk": "Bukusu; Lubukusu",
"bxl": "Jalkunan",
"bxm": "Mongolia Buriat",
"bxn": "Burduna",
"bxo": "Barikanchi",
"bxp": "Bebil",
"bxq": "Beele",
"bxr": "Russia Buriat",
"bxs": "Busam",
"bxu": "China Buriat",
"bxv": "Berakou",
"bxw": "Bankagooma",
"bxz": "Binahari",
"bya": "Batak",
"byb": "Bikya",
"byc": "Ubaghara",
"byd": "Benyadu'",
"bye": "Pouye",
"byf": "Bete",
"byg": "Baygo",
"byh": "Bhujel",
"byi": "Buyu",
"byj": "Bina (Nigeria)",
"byk": "Biao",
"byl": "Bayono",
"bym": "Bidjara",
"byn": "Bilin; Blin",
"byo": "Biyo",
"byp": "Bumaji",
"byq": "Basay",
"byr": "Baruya; Yipma",
"bys": "Burak",
"byt": "Berti",
"byv": "Medumba",
"byw": "Belhariya",
"byx": "Qaqet",
"byz": "Banaro",
"bza": "Bandi",
"bzb": "Andio",
"bzc": "Southern Betsimisaraka Malagasy",
"bzd": "Bribri",
"bze": "Jenaama Bozo",
"bzf": "Boikin",
"bzg": "Babuza",
"bzh": "Mapos Buang",
"bzi": "Bisu",
"bzj": "Belize Kriol English",
"bzk": "Nicaragua Creole English",
"bzl": "Boano (Sulawesi)",
"bzm": "Bolondo",
"bzn": "Boano (Maluku)",
"bzo": "Bozaba",
"bzp": "Kemberano",
"bzq": "Buli (Indonesia)",
"bzr": "Biri",
"bzs": "Brazilian Sign Language",
"bzt": "Brithenig",
"bzu": "Burmeso",
"bzv": "Naami",
"bzw": "Basa (Nigeria)",
"bzx": "Kɛlɛngaxo Bozo",
"bzy": "Obanliku",
"bzz": "Evant",
"ca": "Catalan; Valencian",
"caa": "Chortí",
"cab": "Garifuna",
"cac": "Chuj",
"cad": "Caddo",
"cae": "Lehar; Laalaa",
"caf": "Southern Carrier",
"cag": "Nivaclé",
"cah": "Cahuarano",
"cai": "Central American Indian languages",
"caj": "Chané",
"cak": "Kaqchikel; Cakchiquel",
"cal": "Carolinian",
"cam": "Cemuhî",
"can": "Chambri",
"cao": "Chácobo",
"cap": "Chipaya",
"caq": "Car Nicobarese",
"car": "Galibi Carib",
"cas": "Tsimané",
"cau": "Caucasian languages",
"cav": "Cavineña",
"caw": "Callawalla",
"cax": "Chiquitano",
"cay": "Cayuga",
"caz": "Canichana",
"cba": "Chibchan languages",
"cbb": "Cabiyarí",
"cbc": "Carapana",
"cbd": "Carijona",
"cbg": "Chimila",
"cbi": "Chachi",
"cbj": "Ede Cabe",
"cbk": "Chavacano",
"cbl": "Bualkhaw Chin",
"cbn": "Nyahkur",
"cbo": "Izora",
"cbq": "Tsucuba; Cuba",
"cbr": "Cashibo-Cacataibo",
"cbs": "Cashinahua",
"cbt": "Chayahuita",
"cbu": "Candoshi-Shapra",
"cbv": "Cacua",
"cbw": "Kinabalian",
"cby": "Carabayo",
"ccc": "Chamicuro",
"ccd": "Cafundo Creole",
"cce": "Chopi",
"ccg": "Samba Daka",
"cch": "Atsam",
"ccj": "Kasanga",
"ccl": "Cutchi-Swahili",
"ccm": "Malaccan Creole Malay",
"ccn": "North Caucasian languages",
"cco": "Comaltepec Chinantec",
"ccp": "Chakma",
"ccr": "Cacaopera",
"ccs": "South Caucasian languages",
"cda": "Choni",
"cdc": "Chadic languages",
"cdd": "Caddoan languages",
"cde": "Chenchu",
"cdf": "Chiru",
"cdh": "Chambeali",
"cdi": "Chodri",
"cdj": "Churahi",
"cdm": "Chepang",
"cdn": "Chaudangsi",
"cdo": "Min Dong Chinese",
"cdr": "Cinda-Regi-Tiyal",
"cds": "Chadian Sign Language",
"cdy": "Chadong",
"cdz": "Koda",
"ce": "Chechen",
"cea": "Lower Chehalis",
"ceb": "Cebuano",
"ceg": "Chamacoco",
"cek": "Eastern Khumi Chin",
"cel": "Celtic languages",
"cen": "Cen",
"cet": "Centúúm",
"cey": "Ekai Chin",
"cfa": "Dijim-Bwilim",
"cfd": "Cara",
"cfg": "Como Karim",
"cfm": "Falam Chin",
"cga": "Changriwa",
"cgc": "Kagayanen",
"cgg": "Chiga",
"cgk": "Chocangacakha",
"ch": "Chamorro",
"chb": "Chibcha",
"chc": "Catawba",
"chd": "Highland Oaxaca Chontal",
"chf": "Tabasco Chontal",
"chg": "Chagatai",
"chh": "Chinook",
"chj": "Ojitlán Chinantec",
"chk": "Chuukese",
"chl": "Cahuilla",
"chm": "Mari (Russia)",
"chn": "Chinook jargon",
"cho": "Choctaw",
"chp": "Chipewyan; Dene Suline",
"chq": "Quiotepec Chinantec",
"chr": "Cherokee",
"cht": "Cholón",
"chw": "Chuwabu",
"chx": "Chantyal",
"chy": "Cheyenne",
"chz": "Ozumacín Chinantec",
"cia": "Cia-Cia",
"cib": "Ci Gbe",
"cic": "Chickasaw",
"cid": "Chimariko",
"cie": "Cineni",
"cih": "Chinali",
"cik": "Chitkuli Kinnauri",
"cim": "Cimbrian",
"cin": "Cinta Larga",
"cip": "Chiapanec",
"cir": "Tiri; Haméa; Méa",
"ciw": "Chippewa",
"ciy": "Chaima",
"cja": "Western Cham",
"cje": "Chru",
"cjh": "Upper Chehalis",
"cji": "Chamalal",
"cjk": "Chokwe",
"cjm": "Eastern Cham",
"cjn": "Chenapian",
"cjo": "Ashéninka Pajonal",
"cjp": "Cabécar",
"cjs": "Shor",
"cjv": "Chuave",
"cjy": "Jinyu Chinese",
"ckb": "Central Kurdish",
"ckh": "Chak",
"ckl": "Cibak",
"ckm": "Chakavian",
"ckn": "Kaang Chin",
"cko": "Anufo",
"ckq": "Kajakse",
"ckr": "Kairak",
"cks": "Tayo",
"ckt": "Chukot",
"cku": "Koasati",
"ckv": "Kavalan",
"ckx": "Caka",
"cky": "Cakfem-Mushere",
"ckz": "Cakchiquel-Quiché Mixed Language",
"cla": "Ron",
"clc": "Chilcotin",
"cld": "Chaldean Neo-Aramaic",
"cle": "Lealao Chinantec",
"clh": "Chilisso",
"cli": "Chakali",
"clj": "Laitu Chin",
"clk": "Idu-Mishmi",
"cll": "Chala",
"clm": "Clallam",
"clo": "Lowland Oaxaca Chontal",
"clt": "Lautu Chin",
"clu": "Caluyanun",
"clw": "Chulym",
"cly": "Eastern Highland Chatino",
"cma": "Maa",
"cmc": "Chamic languages",
"cme": "Cerma",
"cmg": "Classical Mongolian",
"cmi": "Emberá-Chamí",
"cml": "Campalagian",
"cmm": "Michigamea",
"cmn": "Mandarin Chinese",
"cmo": "Central Mnong",
"cmr": "Mro-Khimi Chin",
"cms": "Messapic",
"cmt": "Camtho",
"cna": "Changthang",
"cnb": "Chinbon Chin",
"cnc": "Côông",
"cng": "Northern Qiang",
"cnh": "Hakha Chin; Haka Chin",
"cni": "Asháninka",
"cnk": "Khumi Chin",
"cnl": "Lalana Chinantec",
"cno": "Con",
"cnp": "Northern Ping Chinese; Northern Pinghua",
"cnq": "Chung",
"cnr": "Montenegrin",
"cns": "Central Asmat",
"cnt": "Tepetotutla Chinantec",
"cnu": "Chenoua",
"cnw": "Ngawn Chin",
"cnx": "Middle Cornish",
"co": "Corsican",
"coa": "Cocos Islands Malay",
"cob": "Chicomuceltec",
"coc": "Cocopa",
"cod": "Cocama-Cocamilla",
"coe": "Koreguaje",
"cof": "Colorado",
"cog": "Chong",
"coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma",
"coj": "Cochimi",
"cok": "Santa Teresa Cora",
"col": "Columbia-Wenatchi",
"com": "Comanche",
"con": "Cofán",
"coo": "Comox",
"cop": "Coptic",
"coq": "Coquille",
"cot": "Caquinte",
"cou": "Wamey",
"cov": "Cao Miao",
"cow": "Cowlitz",
"cox": "Nanti",
"coz": "Chochotec",
"cpa": "Palantla Chinantec",
"cpb": "Ucayali-Yurúa Ashéninka",
"cpc": "Ajyíninka Apurucayali",
"cpe": "English-based creoles and pidgins",
"cpf": "French-based creoles and pidgins",
"cpg": "Cappadocian Greek",
"cpi": "Chinese Pidgin English",
"cpn": "Cherepon",
"cpo": "Kpeego",
"cpp": "Portuguese-based creoles and pidgins",
"cps": "Capiznon",
"cpu": "Pichis Ashéninka",
"cpx": "Pu-Xian Chinese",
"cpy": "South Ucayali Ashéninka",
"cqd": "Chuanqiandian Cluster Miao",
"cr": "Cree",
"cra": "Chara",
"crb": "Island Carib",
"crc": "Lonwolwol",
"crd": "Coeur d'Alene",
"crf": "Caramanta",
"crg": "Michif",
"crh": "Crimean Tatar; Crimean Turkish",
"cri": "Sãotomense",
"crj": "Southern East Cree",
"crk": "Plains Cree",
"crl": "Northern East Cree",
"crm": "Moose Cree",
"crn": "El Nayar Cora",
"cro": "Crow",
"crp": "Creoles and pidgins",
"crq": "Iyo'wujwa Chorote",
"crr": "Carolina Algonquian",
"crs": "Seselwa Creole French",
"crt": "Iyojwa'ja Chorote",
"crv": "Chaura",
"crw": "Chrau",
"crx": "Carrier",
"cry": "Cori",
"crz": "Cruzeño",
"cs": "Czech",
"csa": "Chiltepec Chinantec",
"csb": "Kashubian",
"csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana",
"csd": "Chiangmai Sign Language",
"cse": "Czech Sign Language",
"csf": "Cuba Sign Language",
"csg": "Chilean Sign Language",
"csh": "Asho Chin",
"csi": "Coast Miwok",
"csj": "Songlai Chin",
"csk": "Jola-Kasa",
"csl": "Chinese Sign Language",
"csm": "Central Sierra Miwok",
"csn": "Colombian Sign Language",
"cso": "Sochiapam Chinantec; Sochiapan Chinantec",
"csp": "Southern Ping Chinese; Southern Pinghua",
"csq": "Croatia Sign Language",
"csr": "Costa Rican Sign Language",
"css": "Southern Ohlone",
"cst": "Northern Ohlone",
"csu": "Central Sudanic languages",
"csv": "Sumtu Chin",
"csw": "Swampy Cree",
"csx": "Cambodian Sign Language",
"csy": "Siyin Chin",
"csz": "Coos",
"cta": "Tataltepec Chatino",
"ctc": "Chetco",
"ctd": "Tedim Chin",
"cte": "Tepinapa Chinantec",
"ctg": "Chittagonian",
"cth": "Thaiphum Chin",
"ctl": "Tlacoatzintepec Chinantec",
"ctm": "Chitimacha",
"ctn": "Chhintange",
"cto": "Emberá-Catío",
"ctp": "Western Highland Chatino",
"cts": "Northern Catanduanes Bikol",
"ctt": "Wayanad Chetti",
"ctu": "Chol",
"cty": "Moundadan Chetty",
"ctz": "Zacatepec Chatino",
"cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic",
"cua": "Cua",
"cub": "Cubeo",
"cuc": "Usila Chinantec",
"cuh": "Chuka; Gichuka",
"cui": "Cuiba",
"cuj": "Mashco Piro",
"cuk": "San Blas Kuna",
"cul": "Culina; Kulina",
"cuo": "Cumanagoto",
"cup": "Cupeño",
"cuq": "Cun",
"cur": "Chhulung",
"cus": "Cushitic languages",
"cut": "Teutila Cuicatec",
"cuu": "Tai Ya",
"cuv": "Cuvok",
"cuw": "Chukwa",
"cux": "Tepeuxila Cuicatec",
"cuy": "Cuitlatec",
"cv": "Chuvash",
"cvg": "Chug",
"cvn": "Valle Nacional Chinantec",
"cwa": "Kabwa",
"cwb": "Maindo",
"cwd": "Woods Cree",
"cwe": "Kwere",
"cwg": "Chewong; Cheq Wong",
"cwt": "Kuwaataay",
"cy": "Welsh",
"cya": "Nopala Chatino",
"cyb": "Cayubaba",
"cyo": "Cuyonon",
"czh": "Huizhou Chinese",
"czk": "Knaanic",
"czn": "Zenzontepec Chatino",
"czo": "Min Zhong Chinese",
"czt": "Zotung Chin",
"da": "Danish",
"daa": "Dangaléat",
"dac": "Dambi",
"dad": "Marik",
"dae": "Duupa",
"dag": "Dagbani",
"dah": "Gwahatike",
"dai": "Day",
"daj": "Dar Fur Daju",
"dak": "Dakota",
"dal": "Dahalo",
"dam": "Damakawa",
"dao": "Daai Chin",
"daq": "Dandami Maria",
"dar": "Dargwa",
"das": "Daho-Doo",
"dau": "Dar Sila Daju",
"dav": "Taita; Dawida",
"daw": "Davawenyo",
"dax": "Dayi",
"day": "Land Dayak languages",
"daz": "Dao",
"dba": "Bangime",
"dbb": "Deno",
"dbd": "Dadiya",
"dbe": "Dabe",
"dbf": "Edopi",
"dbg": "Dogul Dom Dogon",
"dbi": "Doka",
"dbj": "Ida'an",
"dbl": "Dyirbal",
"dbm": "Duguri",
"dbn": "Duriankere",
"dbo": "Dulbu",
"dbp": "Duwai",
"dbq": "Daba",
"dbr": "Dabarre",
"dbt": "Ben Tey Dogon",
"dbu": "Bondum Dom Dogon",
"dbv": "Dungu",
"dbw": "Bankan Tey Dogon",
"dby": "Dibiyaso",
"dcc": "Deccan",
"dcr": "Negerhollands",
"dda": "Dadi Dadi",
"ddd": "Dongotono",
"dde": "Doondo",
"ddg": "Fataluku",
"ddi": "West Goodenough",
"ddj": "Jaru",
"ddn": "Dendi (Benin)",
"ddo": "Dido",
"ddr": "Dhudhuroa",
"dds": "Donno So Dogon",
"ddw": "Dawera-Daweloor",
"de": "German",
"dec": "Dagik",
"ded": "Dedua",
"dee": "Dewoin",
"def": "Dezfuli",
"deg": "Degema",
"deh": "Dehwari",
"dei": "Demisa",
"dek": "Dek",
"del": "Delaware",
"dem": "Dem",
"den": "Slave (Athapascan)",
"dep": "Pidgin Delaware",
"deq": "Dendi (Central African Republic)",
"der": "Deori",
"des": "Desano",
"dev": "Domung",
"dez": "Dengese",
"dga": "Southern Dagaare",
"dgb": "Bunoge Dogon",
"dgc": "Casiguran Dumagat Agta",
"dgd": "Dagaari Dioula",
"dge": "Degenan",
"dgg": "Doga",
"dgh": "Dghwede",
"dgi": "Northern Dagara",
"dgk": "Dagba",
"dgl": "Andaandi; Dongolawi",
"dgn": "Dagoman",
"dgo": "Dogri (individual language)",
"dgr": "Dogrib; Tłı̨chǫ",
"dgs": "Dogoso",
"dgt": "Ndra'ngith",
"dgw": "Daungwurrung",
"dgx": "Doghoro",
"dgz": "Daga",
"dhd": "Dhundari",
"dhg": "Dhangu-Djangu; Dhangu; Djangu",
"dhi": "Dhimal",
"dhl": "Dhalandji",
"dhm": "Zemba",
"dhn": "Dhanki",
"dho": "Dhodia",
"dhr": "Dhargari",
"dhs": "Dhaiso",
"dhu": "Dhurga",
"dhv": "Dehu; Drehu",
"dhw": "Dhanwar (Nepal)",
"dhx": "Dhungaloo",
"dia": "Dia",
"dib": "South Central Dinka",
"dic": "Lakota Dida",
"did": "Didinga",
"dif": "Dieri; Diyari",
"dig": "Digo; Chidigo",
"dih": "Kumiai",
"dii": "Dimbong",
"dij": "Dai",
"dik": "Southwestern Dinka",
"dil": "Dilling",
"dim": "Dime",
"din": "Dinka",
"dio": "Dibo",
"dip": "Northeastern Dinka",
"diq": "Dimli (individual language)",
"dir": "Dirim",
"dis": "Dimasa",
"diu": "Diriku",
"diw": "Northwestern Dinka",
"dix": "Dixon Reef",
"diy": "Diuwe",
"diz": "Ding",
"dja": "Djadjawurrung",
"djb": "Djinba",
"djc": "Dar Daju Daju",
"djd": "Djamindjung; Ngaliwurru",
"dje": "Zarma",
"djf": "Djangun",
"dji": "Djinang",
"djj": "Djeebbana",
"djk": "Eastern Maroon Creole; Businenge Tongo; Nenge",
"djm": "Jamsay Dogon",
"djn": "Jawoyn; Djauan",
"djo": "Jangkang",
"djr": "Djambarrpuyngu",
"dju": "Kapriman",
"djw": "Djawi",
"dka": "Dakpakha",
"dkg": "Kadung",
"dkk": "Dakka",
"dkr": "Kuijau",
"dks": "Southeastern Dinka",
"dkx": "Mazagway",
"dlg": "Dolgan",
"dlk": "Dahalik",
"dlm": "Dalmatian",
"dln": "Darlong",
"dma": "Duma",
"dmb": "Mombo Dogon",
"dmc": "Gavak",
"dmd": "Madhi Madhi",
"dme": "Dugwor",
"dmf": "Medefaidrin",
"dmg": "Upper Kinabatangan",
"dmk": "Domaaki",
"dml": "Dameli",
"dmm": "Dama",
"dmn": "Mande languages",
"dmo": "Kemedzung",
"dmr": "East Damar",
"dms": "Dampelas",
"dmu": "Dubu; Tebi",
"dmv": "Dumpas",
"dmw": "Mudburra",
"dmx": "Dema",
"dmy": "Demta; Sowari",
"dna": "Upper Grand Valley Dani",
"dnd": "Daonda",
"dne": "Ndendeule",
"dng": "Dungan",
"dni": "Lower Grand Valley Dani",
"dnj": "Dan",
"dnk": "Dengka",
"dnn": "Dzùùngoo",
"dno": "Ndrulo; Northern Lendu",
"dnr": "Danaru",
"dnt": "Mid Grand Valley Dani",
"dnu": "Danau",
"dnv": "Danu",
"dnw": "Western Dani",
"dny": "Dení",
"doa": "Dom",
"dob": "Dobu",
"doc": "Northern Dong",
"doe": "Doe",
"dof": "Domu",
"doh": "Dong",
"doi": "Dogri (macrolanguage)",
"dok": "Dondo",
"dol": "Doso",
"don": "Toura (Papua New Guinea)",
"doo": "Dongo",
"dop": "Lukpa",
"doq": "Dominican Sign Language",
"dor": "Dori'o",
"dos": "Dogosé",
"dot": "Dass",
"dov": "Dombe",
"dow": "Doyayo",
"dox": "Bussa",
"doy": "Dompo",
"doz": "Dorze",
"dpp": "Papar",
"dra": "Dravidian languages",
"drb": "Dair",
"drc": "Minderico",
"drd": "Darmiya",
"dre": "Dolpo",
"drg": "Rungus",
"dri": "C'Lela",
"drl": "Paakantyi",
"drn": "West Damar",
"dro": "Daro-Matu Melanau",
"drq": "Dura",
"drs": "Gedeo",
"drt": "Drents",
"dru": "Rukai",
"dry": "Darai",
"dsb": "Lower Sorbian",
"dse": "Dutch Sign Language",
"dsh": "Daasanach",
"dsi": "Disa",
"dsl": "Danish Sign Language",
"dsn": "Dusner",
"dso": "Desiya",
"dsq": "Tadaksahak",
"dsz": "Mardin Sign Language",
"dta": "Daur",
"dtb": "Labuk-Kinabatangan Kadazan",
"dtd": "Ditidaht",
"dth": "Adithinngithigh",
"dti": "Ana Tinga Dogon",
"dtk": "Tene Kan Dogon",
"dtm": "Tomo Kan Dogon",
"dtn": "Daatsʼíin",
"dto": "Tommo So Dogon",
"dtp": "Kadazan Dusun; Central Dusun",
"dtr": "Lotud",
"dts": "Toro So Dogon",
"dtt": "Toro Tegu Dogon",
"dtu": "Tebul Ure Dogon",
"dty": "Dotyali",
"dua": "Duala",
"dub": "Dubli",
"duc": "Duna",
"due": "Umiray Dumaget Agta",
"duf": "Dumbea; Drubea",
"dug": "Duruma; Chiduruma",
"duh": "Dungra Bhil",
"dui": "Dumun",
"duk": "Uyajitaya",
"dul": "Alabat Island Agta",
"dum": "Middle Dutch (ca. 1050-1350)",
"dun": "Dusun Deyah",
"duo": "Dupaninan Agta",
"dup": "Duano",
"duq": "Dusun Malang",
"dur": "Dii",
"dus": "Dumi",
"duu": "Drung",
"duv": "Duvle",
"duw": "Dusun Witu",
"dux": "Duungooma",
"duy": "Dicamay Agta",
"duz": "Duli-Gey",
"dv": "Dhivehi; Divehi; Maldivian",
"dva": "Duau",
"dwa": "Diri",
"dwk": "Dawik Kui",
"dwr": "Dawro",
"dws": "Dutton World Speedwords",
"dwu": "Dhuwal",
"dww": "Dawawa",
"dwy": "Dhuwaya",
"dwz": "Dewas Rai",
"dya": "Dyan",
"dyb": "Dyaberdyaber",
"dyd": "Dyugun",
"dyg": "Villa Viciosa Agta",
"dyi": "Djimini Senoufo",
"dym": "Yanda Dom Dogon",
"dyn": "Dyangadi; Dhanggatti",
"dyo": "Jola-Fonyi",
"dyu": "Dyula",
"dyy": "Djabugay; Dyaabugay",
"dz": "Dzongkha",
"dza": "Tunzu",
"dze": "Djiwarli",
"dzg": "Dazaga",
"dzl": "Dzalakha",
"dzn": "Dzando",
"eaa": "Karenggapa",
"ebc": "Beginci",
"ebg": "Ebughu",
"ebk": "Eastern Bontok",
"ebo": "Teke-Ebo",
"ebr": "Ebrié",
"ebu": "Embu; Kiembu",
"ecr": "Eteocretan",
"ecs": "Ecuadorian Sign Language",
"ecy": "Eteocypriot",
"ee": "Ewe",
"eee": "E",
"efa": "Efai",
"efe": "Efe",
"efi": "Efik",
"ega": "Ega",
"egl": "Emilian",
"egm": "Benamanga",
"ego": "Eggon",
"egx": "Egyptian languages",
"egy": "Egyptian (Ancient)",
"ehs": "Miyakubo Sign Language",
"ehu": "Ehueun",
"eip": "Eipomek",
"eit": "Eitiep",
"eiv": "Askopan",
"eja": "Ejamat",
"eka": "Ekajuk",
"eke": "Ekit",
"ekg": "Ekari",
"eki": "Eki",
"ekk": "Standard Estonian",
"ekl": "Kol (Bangladesh); Kol",
"ekm": "Elip",
"eko": "Koti",
"ekp": "Ekpeye",
"ekr": "Yace",
"eky": "Eastern Kayah",
"el": "Modern Greek (1453-)",
"ele": "Elepi",
"elh": "El Hugeirat",
"eli": "Nding",
"elk": "Elkei",
"elm": "Eleme",
"elo": "El Molo",
"elu": "Elu",
"elx": "Elamite",
"ema": "Emai-Iuleha-Ora",
"emb": "Embaloh",
"eme": "Emerillon",
"emg": "Eastern Meohang",
"emi": "Mussau-Emira",
"emk": "Eastern Maninkakan",
"emm": "Mamulique",
"emn": "Eman",
"emp": "Northern Emberá",
"emq": "Eastern Minyag",
"ems": "Pacific Gulf Yupik",
"emu": "Eastern Muria",
"emw": "Emplawas",
"emx": "Erromintxela",
"emy": "Epigraphic Mayan",
"emz": "Mbessa",
"en": "English",
"ena": "Apali",
"enb": "Markweeta",
"enc": "En",
"end": "Ende",
"enf": "Forest Enets",
"enh": "Tundra Enets",
"enl": "Enlhet",
"enm": "Middle English (1100-1500)",
"enn": "Engenni",
"eno": "Enggano",
"enq": "Enga",
"enr": "Emumu; Emem",
"enu": "Enu",
"env": "Enwan (Edo State)",
"enw": "Enwan (Akwa Ibom State)",
"enx": "Enxet",
"eo": "Esperanto",
"eot": "Beti (Côte d'Ivoire)",
"epi": "Epie",
"era": "Eravallan",
"erg": "Sie",
"erh": "Eruwa",
"eri": "Ogea",
"erk": "South Efate",
"ero": "Horpa",
"err": "Erre",
"ers": "Ersu",
"ert": "Eritai",
"erw": "Erokwanas",
"es": "Spanish; Castilian",
"ese": "Ese Ejja",
"esg": "Aheri Gondi",
"esh": "Eshtehardi",
"esi": "North Alaskan Inupiatun",
"esk": "Northwest Alaska Inupiatun",
"esl": "Egypt Sign Language",
"esm": "Esuma",
"esn": "Salvadoran Sign Language",
"eso": "Estonian Sign Language",
"esq": "Esselen",
"ess": "Central Siberian Yupik",
"esu": "Central Yupik",
"esx": "Eskimo-Aleut languages",
"esy": "Eskayan",
"et": "Estonian",
"etb": "Etebi",
"etc": "Etchemin",
"eth": "Ethiopian Sign Language",
"etn": "Eton (Vanuatu)",
"eto": "Eton (Cameroon)",
"etr": "Edolo",
"ets": "Yekhee",
"ett": "Etruscan",
"etu": "Ejagham",
"etx": "Eten",
"etz": "Semimi",
"eu": "Basque",
"euq": "Basque (family)",
"eve": "Even",
"evh": "Uvbie",
"evn": "Evenki",
"ewo": "Ewondo",
"ext": "Extremaduran",
"eya": "Eyak",
"eyo": "Keiyo",
"eza": "Ezaa",
"eze": "Uzekwe",
"fa": "Persian",
"faa": "Fasu",
"fab": "Fa d'Ambu",
"fad": "Wagi",
"faf": "Fagani",
"fag": "Finongan",
"fah": "Baissa Fali",
"fai": "Faiwol",
"faj": "Faita",
"fak": "Fang (Cameroon)",
"fal": "South Fali",
"fam": "Fam",
"fan": "Fang (Equatorial Guinea)",
"fap": "Paloor",
"far": "Fataleka",
"fat": "Fanti",
"fau": "Fayu",
"fax": "Fala",
"fay": "Southwestern Fars",
"faz": "Northwestern Fars",
"fbl": "West Albay Bikol",
"fcs": "Quebec Sign Language",
"fer": "Feroge",
"ff": "Fulah",
"ffi": "Foia Foia",
"ffm": "Maasina Fulfulde",
"fgr": "Fongoro",
"fi": "Finnish",
"fia": "Nobiin",
"fie": "Fyer",
"fif": "Faifi",
"fil": "Filipino; Pilipino",
"fip": "Fipa",
"fir": "Firan",
"fit": "Tornedalen Finnish; Meänkieli",
"fiu": "Finno-Ugrian languages",
"fiw": "Fiwaga",
"fj": "Fijian",
"fkk": "Kirya-Konzəl",
"fkv": "Kven Finnish",
"fla": "Kalispel-Pend d'Oreille",
"flh": "Foau",
"fli": "Fali",
"fll": "North Fali",
"fln": "Flinders Island",
"flr": "Fuliiru",
"fly": "Flaaitaal; Tsotsitaal",
"fmp": "Fe'fe'",
"fmu": "Far Western Muria",
"fnb": "Fanbak",
"fng": "Fanagalo",
"fni": "Fania",
"fo": "Faroese",
"fod": "Foodo",
"foi": "Foi",
"fom": "Foma",
"fon": "Fon",
"for": "Fore",
"fos": "Siraya",
"fox": "Formosan languages",
"fpe": "Fernando Po Creole English",
"fqs": "Fas",
"fr": "French",
"frc": "Cajun French",
"frd": "Fordata",
"frk": "Frankish",
"frm": "Middle French (ca. 1400-1600)",
"fro": "Old French (842-ca. 1400)",
"frp": "Arpitan; Francoprovençal",
"frq": "Forak",
"frr": "Northern Frisian",
"frs": "Eastern Frisian",
"frt": "Fortsenal",
"fse": "Finnish Sign Language",
"fsl": "French Sign Language",
"fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli",
"fub": "Adamawa Fulfulde",
"fuc": "Pulaar",
"fud": "East Futuna",
"fue": "Borgu Fulfulde",
"fuf": "Pular",
"fuh": "Western Niger Fulfulde",
"fui": "Bagirmi Fulfulde",
"fuj": "Ko",
"fum": "Fum",
"fun": "Fulniô",
"fuq": "Central-Eastern Niger Fulfulde",
"fur": "Friulian",
"fut": "Futuna-Aniwa",
"fuu": "Furu",
"fuv": "Nigerian Fulfulde",
"fuy": "Fuyug",
"fvr": "Fur",
"fwa": "Fwâi",
"fwe": "Fwe",
"fy": "Western Frisian",
"ga": "Irish",
"gaa": "Ga",
"gab": "Gabri",
"gac": "Mixed Great Andamanese",
"gad": "Gaddang",
"gae": "Guarequena",
"gaf": "Gende",
"gag": "Gagauz",
"gah": "Alekano",
"gai": "Borei",
"gaj": "Gadsup",
"gak": "Gamkonora",
"gal": "Galolen",
"gam": "Kandawo",
"gan": "Gan Chinese",
"gao": "Gants",
"gap": "Gal",
"gaq": "Gata'",
"gar": "Galeya",
"gas": "Adiwasi Garasia",
"gat": "Kenati",
"gau": "Mudhili Gadaba",
"gaw": "Nobonob",
"gax": "Borana-Arsi-Guji Oromo",
"gay": "Gayo",
"gaz": "West Central Oromo",
"gba": "Gbaya (Central African Republic)",
"gbb": "Kaytetye",
"gbd": "Karajarri",
"gbe": "Niksek",
"gbf": "Gaikundi",
"gbg": "Gbanziri",
"gbh": "Defi Gbe",
"gbi": "Galela",
"gbj": "Bodo Gadaba",
"gbk": "Gaddi",
"gbl": "Gamit",
"gbm": "Garhwali",
"gbn": "Mo'da",
"gbo": "Northern Grebo",
"gbp": "Gbaya-Bossangoa",
"gbq": "Gbaya-Bozoum",
"gbr": "Gbagyi",
"gbs": "Gbesi Gbe",
"gbu": "Gagadu",
"gbv": "Gbanu",
"gbw": "Gabi-Gabi",
"gbx": "Eastern Xwla Gbe",
"gby": "Gbari",
"gbz": "Zoroastrian Dari",
"gcc": "Mali",
"gcd": "Ganggalida",
"gce": "Galice",
"gcf": "Guadeloupean Creole French",
"gcl": "Grenadian Creole English",
"gcn": "Gaina",
"gcr": "Guianese Creole French",
"gct": "Colonia Tovar German",
"gd": "Scottish Gaelic; Gaelic",
"gda": "Gade Lohar",
"gdb": "Pottangi Ollar Gadaba",
"gdc": "Gugu Badhun",
"gdd": "Gedaged",
"gde": "Gude",
"gdf": "Guduf-Gava",
"gdg": "Ga'dang",
"gdh": "Gadjerawang; Gajirrabeng",
"gdi": "Gundi",
"gdj": "Gurdjar",
"gdk": "Gadang",
"gdl": "Dirasha",
"gdm": "Laal",
"gdn": "Umanakaina",
"gdo": "Ghodoberi",
"gdq": "Mehri",
"gdr": "Wipi",
"gds": "Ghandruk Sign Language",
"gdt": "Kungardutyi",
"gdu": "Gudu",
"gdx": "Godwari",
"gea": "Geruma",
"geb": "Kire",
"gec": "Gboloo Grebo",
"ged": "Gade",
"gef": "Gerai",
"geg": "Gengle",
"geh": "Hutterite German; Hutterisch",
"gei": "Gebe",
"gej": "Gen",
"gek": "Ywom",
"gel": "ut-Ma'in",
"gem": "Germanic languages",
"geq": "Geme",
"ges": "Geser-Gorom",
"gev": "Eviya",
"gew": "Gera",
"gex": "Garre",
"gey": "Enya",
"gez": "Geez",
"gfk": "Patpatar",
"gft": "Gafat",
"gga": "Gao",
"ggb": "Gbii",
"ggd": "Gugadj",
"gge": "Gurr-goni",
"ggg": "Gurgula",
"ggk": "Kungarakany",
"ggl": "Ganglau",
"ggt": "Gitua",
"ggu": "Gagu; Gban",
"ggw": "Gogodala",
"gha": "Ghadamès",
"ghc": "Hiberno-Scottish Gaelic",
"ghe": "Southern Ghale",
"ghh": "Northern Ghale",
"ghk": "Geko Karen",
"ghl": "Ghulfan",
"ghn": "Ghanongga",
"gho": "Ghomara",
"ghr": "Ghera",
"ghs": "Guhu-Samane",
"ght": "Kuke; Kutang Ghale",
"gia": "Kija",
"gib": "Gibanawa",
"gic": "Gail",
"gid": "Gidar",
"gie": "Gaɓogbo; Guébie",
"gig": "Goaria",
"gih": "Githabul",
"gii": "Girirra",
"gil": "Gilbertese",
"gim": "Gimi (Eastern Highlands)",
"gin": "Hinukh",
"gip": "Gimi (West New Britain)",
"giq": "Green Gelao",
"gir": "Red Gelao",
"gis": "North Giziga",
"git": "Gitxsan",
"giu": "Mulao",
"giw": "White Gelao",
"gix": "Gilima",
"giy": "Giyug",
"giz": "South Giziga",
"gjk": "Kachi Koli",
"gjm": "Gunditjmara",
"gjn": "Gonja",
"gjr": "Gurindji Kriol",
"gju": "Gujari",
"gka": "Guya",
"gkd": "Magɨ (Madang Province)",
"gke": "Ndai",
"gkn": "Gokana",
"gko": "Kok-Nar",
"gkp": "Guinea Kpelle",
"gku": "ǂUngkue",
"gl": "Galician",
"glb": "Belning",
"glc": "Bon Gula",
"gld": "Nanai",
"glh": "Northwest Pashai; Northwest Pashayi",
"glj": "Gula Iro",
"glk": "Gilaki",
"gll": "Garlali",
"glo": "Galambu",
"glr": "Glaro-Twabo",
"glu": "Gula (Chad)",
"glw": "Glavda",
"gly": "Gule",
"gma": "Gambera",
"gmb": "Gula'alaa",
"gmd": "Mághdì",
"gme": "East Germanic languages",
"gmg": "Magɨyi",
"gmh": "Middle High German (ca. 1050-1500)",
"gml": "Middle Low German",
"gmm": "Gbaya-Mbodomo",
"gmn": "Gimnime",
"gmq": "North Germanic languages",
"gmr": "Mirning; Mirniny",
"gmu": "Gumalu",
"gmv": "Gamo",
"gmw": "West Germanic languages",
"gmx": "Magoma",
"gmy": "Mycenaean Greek",
"gmz": "Mgbolizhia",
"gn": "Guarani",
"gna": "Kaansa",
"gnb": "Gangte",
"gnc": "Guanche",
"gnd": "Zulgo-Gemzek",
"gne": "Ganang",
"gng": "Ngangam",
"gnh": "Lere",
"gni": "Gooniyandi",
"gnj": "Ngen",
"gnk": "ǁGana",
"gnl": "Gangulu",
"gnm": "Ginuman",
"gnn": "Gumatj",
"gno": "Northern Gondi",
"gnq": "Gana",
"gnr": "Gureng Gureng",
"gnt": "Guntai",
"gnu": "Gnau",
"gnw": "Western Bolivian Guaraní",
"gnz": "Ganzi",
"goa": "Guro",
"gob": "Playero",
"goc": "Gorakor",
"god": "Godié",
"goe": "Gongduk",
"gof": "Gofa",
"gog": "Gogo",
"goh": "Old High German (ca. 750-1050)",
"goi": "Gobasi",
"goj": "Gowlan",
"gok": "Gowli",
"gol": "Gola",
"gom": "Goan Konkani",
"gon": "Gondi",
"goo": "Gone Dau",
"gop": "Yeretuar",
"goq": "Gorap",
"gor": "Gorontalo",
"gos": "Gronings",
"got": "Gothic",
"gou": "Gavar",
"gov": "Goo",
"gow": "Gorowa",
"gox": "Gobu",
"goy": "Goundo",
"goz": "Gozarkhani",
"gpa": "Gupa-Abawa",
"gpe": "Ghanaian Pidgin English",
"gpn": "Taiap",
"gqa": "Ga'anda",
"gqi": "Guiqiong",
"gqn": "Guana (Brazil)",
"gqr": "Gor",
"gqu": "Qau",
"gra": "Rajput Garasia",
"grb": "Grebo",
"grc": "Ancient Greek (to 1453)",
"grd": "Guruntum-Mbaaru",
"grg": "Madi",
"grh": "Gbiri-Niragu",
"gri": "Ghari",
"grj": "Southern Grebo",
"grk": "Greek languages",
"grm": "Kota Marudu Talantang",
"gro": "Groma",
"grq": "Gorovu",
"grr": "Taznatit",
"grs": "Gresi",
"grt": "Garo",
"gru": "Kistane",
"grv": "Central Grebo",
"grw": "Gweda",
"grx": "Guriaso",
"gry": "Barclayville Grebo",
"grz": "Guramalum",
"gse": "Ghanaian Sign Language",
"gsg": "German Sign Language",
"gsl": "Gusilay",
"gsm": "Guatemalan Sign Language",
"gsn": "Nema; Gusan",
"gso": "Southwest Gbaya",
"gsp": "Wasembo",
"gss": "Greek Sign Language",
"gsw": "Swiss German; Alemannic; Alsatian",
"gta": "Guató",
"gtu": "Aghu-Tharnggala",
"gu": "Gujarati",
"gua": "Shiki",
"gub": "Guajajára",
"guc": "Wayuu",
"gud": "Yocoboué Dida",
"gue": "Gurindji",
"guf": "Gupapuyngu",
"gug": "Paraguayan Guaraní",
"guh": "Guahibo",
"gui": "Eastern Bolivian Guaraní",
"guk": "Gumuz",
"gul": "Sea Island Creole English",
"gum": "Guambiano",
"gun": "Mbyá Guaraní",
"guo": "Guayabero",
"gup": "Gunwinggu",
"guq": "Aché",
"gur": "Farefare",
"gus": "Guinean Sign Language",
"gut": "Maléku Jaíka",
"guu": "Yanomamö",
"guw": "Gun",
"gux": "Gourmanchéma",
"guz": "Gusii; Ekegusii",
"gv": "Manx",
"gva": "Guana (Paraguay)",
"gvc": "Guanano",
"gve": "Duwet",
"gvf": "Golin",
"gvj": "Guajá",
"gvl": "Gulay",
"gvm": "Gurmana",
"gvn": "Kuku-Yalanji",
"gvo": "Gavião Do Jiparaná",
"gvp": "Pará Gavião",
"gvr": "Gurung",
"gvs": "Gumawana",
"gvy": "Guyani",
"gwa": "Mbato",
"gwb": "Gwa",
"gwc": "Gawri; Kalami",
"gwd": "Gawwada",
"gwe": "Gweno",
"gwf": "Gowro",
"gwg": "Moo",
"gwi": "Gwichʼin",
"gwj": "ǀGwi",
"gwm": "Awngthim",
"gwn": "Gwandara",
"gwr": "Gwere",
"gwt": "Gawar-Bati",
"gwu": "Guwamu",
"gww": "Kwini",
"gwx": "Gua",
"gxx": "Wè Southern",
"gya": "Northwest Gbaya",
"gyb": "Garus",
"gyd": "Kayardild",
"gye": "Gyem",
"gyf": "Gungabula",
"gyg": "Gbayi",
"gyi": "Gyele",
"gyl": "Gayil",
"gym": "Ngäbere",
"gyn": "Guyanese Creole English",
"gyo": "Gyalsumdo",
"gyr": "Guarayu",
"gyy": "Gunya",
"gyz": "Geji; Gyaazi",
"gza": "Ganza",
"gzi": "Gazi",
"gzn": "Gane",
"ha": "Hausa",
"haa": "Han",
"hab": "Hanoi Sign Language",
"hac": "Gurani",
"had": "Hatam",
"hae": "Eastern Oromo",
"haf": "Haiphong Sign Language",
"hag": "Hanga",
"hah": "Hahon",
"hai": "Haida",
"haj": "Hajong",
"hak": "Hakka Chinese",
"hal": "Halang",
"ham": "Hewa",
"han": "Hangaza",
"hao": "Hakö",
"hap": "Hupla",
"haq": "Ha",
"har": "Harari",
"has": "Haisla",
"hav": "Havu",
"haw": "Hawaiian",
"hax": "Southern Haida",
"hay": "Haya",
"haz": "Hazaragi",
"hba": "Hamba",
"hbb": "Huba",
"hbn": "Heiban",
"hbo": "Ancient Hebrew",
"hbu": "Habu",
"hca": "Andaman Creole Hindi",
"hch": "Huichol",
"hdn": "Northern Haida",
"hds": "Honduras Sign Language",
"hdy": "Hadiyya",
"he": "Hebrew",
"hea": "Northern Qiandong Miao",
"hed": "Herdé",
"heg": "Helong",
"heh": "Hehe",
"hei": "Heiltsuk",
"hem": "Hemba",
"hgm": "Haiǁom",
"hgw": "Haigwai",
"hhi": "Hoia Hoia",
"hhr": "Kerak",
"hhy": "Hoyahoya",
"hi": "Hindi",
"hia": "Lamang",
"hib": "Hibito",
"hid": "Hidatsa",
"hif": "Fiji Hindi",
"hig": "Kamwe",
"hih": "Pamosu",
"hii": "Hinduri",
"hij": "Hijuk",
"hik": "Seit-Kaitetu",
"hil": "Hiligaynon",
"him": "Himachali languages; Western Pahari languages",
"hio": "Tsoa",
"hir": "Himarimã",
"hit": "Hittite",
"hiw": "Hiw",
"hix": "Hixkaryána",
"hji": "Haji",
"hka": "Kahe",
"hke": "Hunde",
"hkh": "Khah; Poguli",
"hkk": "Hunjara-Kaina Ke",
"hkn": "Mel-Khaonh",
"hks": "Hong Kong Sign Language; Heung Kong Sau Yue",
"hla": "Halia",
"hlb": "Halbi",
"hld": "Halang Doan",
"hle": "Hlersu",
"hlt": "Matu Chin",
"hlu": "Hieroglyphic Luwian",
"hma": "Southern Mashan Hmong; Southern Mashan Miao",
"hmb": "Humburi Senni Songhay",
"hmc": "Central Huishui Hmong; Central Huishui Miao",
"hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao",
"hme": "Eastern Huishui Hmong; Eastern Huishui Miao",
"hmf": "Hmong Don",
"hmg": "Southwestern Guiyang Hmong",
"hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao",
"hmi": "Northern Huishui Hmong; Northern Huishui Miao",
"hmj": "Ge; Gejia",
"hmk": "Maek",
"hml": "Luopohe Hmong; Luopohe Miao",
"hmm": "Central Mashan Hmong; Central Mashan Miao",
"hmn": "Hmong; Mong",
"hmp": "Northern Mashan Hmong; Northern Mashan Miao",
"hmq": "Eastern Qiandong Miao",
"hmr": "Hmar",
"hms": "Southern Qiandong Miao",
"hmt": "Hamtai",
"hmu": "Hamap",
"hmv": "Hmong Dô",
"hmw": "Western Mashan Hmong; Western Mashan Miao",
"hmx": "Hmong-Mien languages",
"hmy": "Southern Guiyang Hmong; Southern Guiyang Miao",
"hmz": "Hmong Shua; Sinicized Miao",
"hna": "Mina (Cameroon)",
"hnd": "Southern Hindko",
"hne": "Chhattisgarhi",
"hng": "Hungu",
"hnh": "ǁAni",
"hni": "Hani",
"hnj": "Hmong Njua; Mong Leng; Mong Njua",
"hnn": "Hanunoo",
"hno": "Northern Hindko",
"hns": "Caribbean Hindustani",
"hnu": "Hung",
"ho": "Hiri Motu",
"hoa": "Hoava",
"hob": "Mari (Madang Province)",
"hoc": "Ho",
"hod": "Holma",
"hoe": "Horom",
"hoh": "Hobyót",
"hoi": "Holikachuk",
"hoj": "Hadothi; Haroti",
"hok": "Hokan languages",
"hol": "Holu",
"hom": "Homa",
"hoo": "Holoholo",
"hop": "Hopi",
"hor": "Horo",
"hos": "Ho Chi Minh City Sign Language",
"hot": "Hote; Malê",
"hov": "Hovongan",
"how": "Honi",
"hoy": "Holiya",
"hoz": "Hozo",
"hpo": "Hpon",
"hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language",
"hr": "Croatian",
"hra": "Hrangkhol",
"hrc": "Niwer Mil",
"hre": "Hre",
"hrk": "Haruku",
"hrm": "Horned Miao",
"hro": "Haroi",
"hrp": "Nhirrpi",
"hrt": "Hértevin",
"hru": "Hruso",
"hrw": "Warwar Feni",
"hrx": "Hunsrik",
"hrz": "Harzani",
"hsb": "Upper Sorbian",
"hsh": "Hungarian Sign Language",
"hsl": "Hausa Sign Language",
"hsn": "Xiang Chinese",
"hss": "Harsusi",
"ht": "Haitian; Haitian Creole",
"hti": "Hoti",
"hto": "Minica Huitoto",
"hts": "Hadza",
"htu": "Hitu",
"htx": "Middle Hittite",
"hu": "Hungarian",
"hub": "Huambisa",
"huc": "ǂHua; ǂʼAmkhoe",
"hud": "Huaulu",
"hue": "San Francisco Del Mar Huave",
"huf": "Humene",
"hug": "Huachipaeri",
"huh": "Huilliche",
"hui": "Huli",
"huj": "Northern Guiyang Hmong; Northern Guiyang Miao",
"huk": "Hulung",
"hul": "Hula",
"hum": "Hungana",
"huo": "Hu",
"hup": "Hupa",
"huq": "Tsat",
"hur": "Halkomelem",
"hus": "Huastec",
"hut": "Humla",
"huu": "Murui Huitoto",
"huv": "San Mateo Del Mar Huave",
"huw": "Hukumina",
"hux": "Nüpode Huitoto",
"huy": "Hulaulá",
"huz": "Hunzib",
"hvc": "Haitian Vodoun Culture Language",
"hve": "San Dionisio Del Mar Huave",
"hvk": "Haveke",
"hvn": "Sabu",
"hvv": "Santa María Del Mar Huave",
"hwa": "Wané",
"hwc": "Hawai'i Creole English; Hawai'i Pidgin",
"hwo": "Hwana",
"hy": "Armenian",
"hya": "Hya",
"hyw": "Western Armenian",
"hyx": "Armenian (family)",
"hz": "Herero",
"ia": "Interlingua (International Auxiliary Language Association)",
"iai": "Iaai",
"ian": "Iatmul",
"iar": "Purari",
"iba": "Iban",
"ibb": "Ibibio",
"ibd": "Iwaidja",
"ibe": "Akpes",
"ibg": "Ibanag",
"ibh": "Bih",
"ibl": "Ibaloi",
"ibm": "Agoi",
"ibn": "Ibino",
"ibr": "Ibuoro",
"ibu": "Ibu",
"iby": "Ibani",
"ica": "Ede Ica",
"ich": "Etkywan",
"icl": "Icelandic Sign Language",
"icr": "Islander Creole English",
"id": "Indonesian",
"ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi",
"idb": "Indo-Portuguese",
"idc": "Idon; Ajiya",
"idd": "Ede Idaca",
"ide": "Idere",
"idi": "Idi",
"idr": "Indri",
"ids": "Idesa",
"idt": "Idaté",
"idu": "Idoma",
"ie": "Interlingue; Occidental",
"ifa": "Amganad Ifugao",
"ifb": "Batad Ifugao; Ayangan Ifugao",
"ife": "Ifè",
"iff": "Ifo",
"ifk": "Tuwali Ifugao",
"ifm": "Teke-Fuumu",
"ifu": "Mayoyao Ifugao",
"ify": "Keley-I Kallahan",
"ig": "Igbo",
"igb": "Ebira",
"ige": "Igede",
"igg": "Igana",
"igl": "Igala",
"igm": "Kanggape",
"ign": "Ignaciano",
"igo": "Isebe",
"igs": "Interglossa",
"igw": "Igwe",
"ihb": "Iha Based Pidgin",
"ihi": "Ihievbe",
"ihp": "Iha",
"ihw": "Bidhawal",
"ii": "Sichuan Yi; Nuosu",
"iin": "Thiin",
"iir": "Indo-Iranian languages",
"ijc": "Izon",
"ije": "Biseni",
"ijj": "Ede Ije",
"ijn": "Kalabari",
"ijo": "Ijo languages",
"ijs": "Southeast Ijo",
"ik": "Inupiaq",
"ike": "Eastern Canadian Inuktitut",
"iki": "Iko",
"ikk": "Ika",
"ikl": "Ikulu",
"iko": "Olulumo-Ikom",
"ikp": "Ikpeshi",
"ikr": "Ikaranggal",
"iks": "Inuit Sign Language",
"ikt": "Inuinnaqtun; Western Canadian Inuktitut",
"ikv": "Iku-Gora-Ankwa",
"ikw": "Ikwere",
"ikx": "Ik",
"ikz": "Ikizu",
"ila": "Ile Ape",
"ilb": "Ila",
"ilg": "Garig-Ilgar",
"ili": "Ili Turki",
"ilk": "Ilongot",
"ilm": "Iranun (Malaysia)",
"ilo": "Iloko",
"ilp": "Iranun (Philippines)",
"ils": "International Sign",
"ilu": "Ili'uun",
"ilv": "Ilue",
"ima": "Mala Malasar",
"imi": "Anamgura",
"iml": "Miluk",
"imn": "Imonda",
"imo": "Imbongu",
"imr": "Imroing",
"ims": "Marsian",
"imt": "Imotong",
"imy": "Milyan",
"inb": "Inga",
"inc": "Indic languages",
"ine": "Indo-European languages",
"ing": "Degexit'an",
"inh": "Ingush",
"inj": "Jungle Inga",
"inl": "Indonesian Sign Language",
"inm": "Minaean",
"inn": "Isinai",
"ino": "Inoke-Yate",
"inp": "Iñapari",
"ins": "Indian Sign Language",
"int": "Intha",
"inz": "Ineseño",
"io": "Ido",
"ior": "Inor",
"iou": "Tuma-Irumu",
"iow": "Iowa-Oto",
"ipi": "Ipili",
"ipo": "Ipiko",
"iqu": "Iquito",
"iqw": "Ikwo",
"ira": "Iranian languages",
"ire": "Iresim",
"irh": "Irarutu",
"iri": "Rigwe; Irigwe",
"irk": "Iraqw",
"irn": "Irántxe",
"iro": "Iroquoian languages",
"irr": "Ir",
"iru": "Irula",
"irx": "Kamberau",
"iry": "Iraya",
"is": "Icelandic",
"isa": "Isabi",
"isc": "Isconahua",
"isd": "Isnag",
"ise": "Italian Sign Language",
"isg": "Irish Sign Language",
"ish": "Esan",
"isi": "Nkem-Nkum",
"isk": "Ishkashimi",
"ism": "Masimasi",
"isn": "Isanzu",
"iso": "Isoko",
"isr": "Israeli Sign Language",
"ist": "Istriot",
"isu": "Isu (Menchum Division)",
"it": "Italian",
"itb": "Binongan Itneg",
"itc": "Italic languages",
"itd": "Southern Tidung",
"ite": "Itene",
"iti": "Inlaod Itneg",
"itk": "Judeo-Italian",
"itl": "Itelmen",
"itm": "Itu Mbon Uzo",
"ito": "Itonama",
"itr": "Iteri",
"its": "Isekiri",
"itt": "Maeng Itneg",
"itv": "Itawit",
"itw": "Ito",
"itx": "Itik",
"ity": "Moyadan Itneg",
"itz": "Itzá",
"iu": "Inuktitut",
"ium": "Iu Mien",
"ivb": "Ibatan",
"ivv": "Ivatan",
"iwk": "I-Wak",
"iwm": "Iwam",
"iwo": "Iwur",
"iws": "Sepik Iwam",
"ixc": "Ixcatec",
"ixl": "Ixil",
"iya": "Iyayu",
"iyo": "Mesaka",
"iyx": "Yaka (Congo)",
"izh": "Ingrian",
"izr": "Izere",
"izz": "Izii",
"ja": "Japanese",
"jaa": "Jamamadí",
"jab": "Hyam",
"jac": "Popti'; Jakalteko",
"jad": "Jahanka",
"jae": "Yabem",
"jaf": "Jara",
"jah": "Jah Hut",
"jaj": "Zazao",
"jak": "Jakun",
"jal": "Yalahatan",
"jam": "Jamaican Creole English",
"jan": "Jandai",
"jao": "Yanyuwa",
"jaq": "Yaqay",
"jas": "New Caledonian Javanese",
"jat": "Jakati",
"jau": "Yaur",
"jax": "Jambi Malay",
"jay": "Yan-nhangu; Nhangu",
"jaz": "Jawe",
"jbe": "Judeo-Berber",
"jbi": "Badjiri",
"jbj": "Arandai",
"jbk": "Barikewa",
"jbm": "Bijim",
"jbn": "Nafusi",
"jbo": "Lojban",
"jbr": "Jofotek-Bromnya",
"jbt": "Jabutí",
"jbu": "Jukun Takum",
"jbw": "Yawijibaya",
"jcs": "Jamaican Country Sign Language",
"jct": "Krymchak",
"jda": "Jad",
"jdg": "Jadgali",
"jdt": "Judeo-Tat",
"jeb": "Jebero",
"jee": "Jerung",
"jeh": "Jeh",
"jei": "Yei",
"jek": "Jeri Kuo",
"jel": "Yelmek",
"jen": "Dza",
"jer": "Jere",
"jet": "Manem",
"jeu": "Jonkor Bourmataguil",
"jgb": "Ngbee",
"jge": "Judeo-Georgian",
"jgk": "Gwak",
"jgo": "Ngomba",
"jhi": "Jehai",
"jhs": "Jhankot Sign Language",
"jia": "Jina",
"jib": "Jibu",
"jic": "Tol",
"jid": "Bu (Kaduna State)",
"jie": "Jilbe",
"jig": "Jingulu; Djingili",
"jih": "sTodsde; Shangzhai",
"jii": "Jiiddu",
"jil": "Jilim",
"jim": "Jimi (Cameroon)",
"jio": "Jiamao",
"jiq": "Guanyinqiao; Lavrung",
"jit": "Jita",
"jiu": "Youle Jinuo",
"jiv": "Shuar",
"jiy": "Buyuan Jinuo",
"jje": "Jejueo",
"jjr": "Bankal",
"jka": "Kaera",
"jkm": "Mobwa Karen",
"jko": "Kubo",
"jkp": "Paku Karen",
"jkr": "Koro (India)",
"jks": "Amami Koniya Sign Language",
"jku": "Labir",
"jle": "Ngile",
"jls": "Jamaican Sign Language",
"jma": "Dima",
"jmb": "Zumbun",
"jmc": "Machame",
"jmd": "Yamdena",
"jmi": "Jimi (Nigeria)",
"jml": "Jumli",
"jmn": "Makuri Naga",
"jmr": "Kamara",
"jms": "Mashi (Nigeria)",
"jmw": "Mouwase",
"jmx": "Western Juxtlahuaca Mixtec",
"jna": "Jangshung",
"jnd": "Jandavra",
"jng": "Yangman",
"jni": "Janji",
"jnj": "Yemsa",
"jnl": "Rawat",
"jns": "Jaunsari",
"job": "Joba",
"jod": "Wojenaka",
"jog": "Jogi",
"jor": "Jorá",
"jos": "Jordanian Sign Language",
"jow": "Jowulu",
"jpa": "Jewish Palestinian Aramaic",
"jpr": "Judeo-Persian",
"jpx": "Japanese (family)",
"jqr": "Jaqaru",
"jra": "Jarai",
"jrb": "Judeo-Arabic",
"jrr": "Jiru",
"jrt": "Jakattoe",
"jru": "Japrería",
"jsl": "Japanese Sign Language",
"jua": "Júma",
"jub": "Wannu",
"juc": "Jurchen",
"jud": "Worodougou",
"juh": "Hõne",
"jui": "Ngadjuri",
"juk": "Wapan",
"jul": "Jirel",
"jum": "Jumjum",
"jun": "Juang",
"juo": "Jiba",
"jup": "Hupdë",
"jur": "Jurúna",
"jus": "Jumla Sign Language",
"jut": "Jutish",
"juu": "Ju",
"juw": "Wãpha",
"juy": "Juray",
"jv": "Javanese",
"jvd": "Javindo",
"jvn": "Caribbean Javanese",
"jwi": "Jwira-Pepesa",
"jya": "Jiarong",
"jye": "Judeo-Yemeni Arabic",
"jyy": "Jaya",
"ka": "Georgian",
"kaa": "Kara-Kalpak; Karakalpak",
"kab": "Kabyle",
"kac": "Kachin; Jingpho",
"kad": "Adara",
"kae": "Ketangalan",
"kaf": "Katso",
"kag": "Kajaman",
"kah": "Kara (Central African Republic)",
"kai": "Karekare",
"kaj": "Jju",
"kak": "Kalanguya; Kayapa Kallahan",
"kam": "Kamba (Kenya)",
"kao": "Xaasongaxango",
"kap": "Bezhta",
"kaq": "Capanahua",
"kar": "Karen languages",
"kav": "Katukína",
"kaw": "Kawi",
"kax": "Kao",
"kay": "Kamayurá",
"kba": "Kalarko",
"kbb": "Kaxuiâna",
"kbc": "Kadiwéu",
"kbd": "Kabardian",
"kbe": "Kanju",
"kbg": "Khamba",
"kbh": "Camsá",
"kbi": "Kaptiau",
"kbj": "Kari",
"kbk": "Grass Koiari",
"kbl": "Kanembu",
"kbm": "Iwal",
"kbn": "Kare (Central African Republic)",
"kbo": "Keliko",
"kbp": "Kabiyè",
"kbq": "Kamano",
"kbr": "Kafa",
"kbs": "Kande",
"kbt": "Abadi",
"kbu": "Kabutra",
"kbv": "Dera (Indonesia)",
"kbw": "Kaiep",
"kbx": "Ap Ma",
"kby": "Manga Kanuri",
"kbz": "Duhwa",
"kca": "Khanty",
"kcb": "Kawacha",
"kcc": "Lubila",
"kcd": "Ngkâlmpw Kanum",
"kce": "Kaivi",
"kcf": "Ukaan",
"kcg": "Tyap",
"kch": "Vono",
"kci": "Kamantan",
"kcj": "Kobiana",
"kck": "Kalanga",
"kcl": "Kela (Papua New Guinea); Kala",
"kcm": "Gula (Central African Republic)",
"kcn": "Nubi",
"kco": "Kinalakna",
"kcp": "Kanga",
"kcq": "Kamo",
"kcr": "Katla",
"kcs": "Koenoem",
"kct": "Kaian",
"kcu": "Kami (Tanzania)",
"kcv": "Kete",
"kcw": "Kabwari",
"kcx": "Kachama-Ganjule",
"kcy": "Korandje",
"kcz": "Konongo",
"kda": "Worimi",
"kdc": "Kutu",
"kdd": "Yankunytjatjara",
"kde": "Makonde",
"kdf": "Mamusi",
"kdg": "Seba",
"kdh": "Tem",
"kdi": "Kumam",
"kdj": "Karamojong",
"kdk": "Numèè; Kwényi",
"kdl": "Tsikimba",
"kdm": "Kagoma",
"kdn": "Kunda",
"kdo": "Kordofanian languages",
"kdp": "Kaningdon-Nindem",
"kdq": "Koch",
"kdr": "Karaim",
"kdt": "Kuy",
"kdu": "Kadaru",
"kdw": "Koneraw",
"kdx": "Kam",
"kdy": "Keder; Keijar",
"kdz": "Kwaja",
"kea": "Kabuverdianu",
"keb": "Kélé",
"kec": "Keiga",
"ked": "Kerewe",
"kee": "Eastern Keres",
"kef": "Kpessi",
"keg": "Tese",
"keh": "Keak",
"kei": "Kei",
"kej": "Kadar",
"kek": "Kekchí",
"kel": "Kela (Democratic Republic of Congo)",
"kem": "Kemak",
"ken": "Kenyang",
"keo": "Kakwa",
"kep": "Kaikadi",
"keq": "Kamar",
"ker": "Kera",
"kes": "Kugbo",
"ket": "Ket",
"keu": "Akebu",
"kev": "Kanikkaran",
"kew": "West Kewa",
"kex": "Kukna",
"key": "Kupia",
"kez": "Kukele",
"kfa": "Kodava",
"kfb": "Northwestern Kolami",
"kfc": "Konda-Dora",
"kfd": "Korra Koraga",
"kfe": "Kota (India)",
"kff": "Koya",
"kfg": "Kudiya",
"kfh": "Kurichiya",
"kfi": "Kannada Kurumba",
"kfj": "Kemiehua",
"kfk": "Kinnauri",
"kfl": "Kung",
"kfm": "Khunsari",
"kfn": "Kuk",
"kfo": "Koro (Côte d'Ivoire)",
"kfp": "Korwa",
"kfq": "Korku",
"kfr": "Kachhi; Kutchi",
"kfs": "Bilaspuri",
"kft": "Kanjari",
"kfu": "Katkari",
"kfv": "Kurmukar",
"kfw": "Kharam Naga",
"kfx": "Kullu Pahari",
"kfy": "Kumaoni",
"kfz": "Koromfé",
"kg": "Kongo",
"kga": "Koyaga",
"kgb": "Kawe",
"kge": "Komering",
"kgf": "Kube",
"kgg": "Kusunda",
"kgi": "Selangor Sign Language",
"kgj": "Gamale Kham",
"kgk": "Kaiwá",
"kgl": "Kunggari",
"kgm": "Karipúna",
"kgn": "Karingani",
"kgo": "Krongo",
"kgp": "Kaingang",
"kgq": "Kamoro",
"kgr": "Abun",
"kgs": "Kumbainggar",
"kgt": "Somyev",
"kgu": "Kobol",
"kgv": "Karas",
"kgw": "Karon Dori",
"kgx": "Kamaru",
"kgy": "Kyerung",
"kha": "Khasi",
"khb": "Lü",
"khc": "Tukang Besi North",
"khd": "Bädi Kanum",
"khe": "Korowai",
"khf": "Khuen",
"khg": "Khams Tibetan",
"khh": "Kehu",
"khi": "Khoisan languages",
"khj": "Kuturmi",
"khk": "Halh Mongolian",
"khl": "Lusi",
"khn": "Khandesi",
"kho": "Khotanese; Sakan",
"khp": "Kapori; Kapauri",
"khq": "Koyra Chiini Songhay",
"khr": "Kharia",
"khs": "Kasua",
"kht": "Khamti",
"khu": "Nkhumbi",
"khv": "Khvarshi",
"khw": "Khowar",
"khx": "Kanu",
"khy": "Kele (Democratic Republic of Congo)",
"khz": "Keapara",
"ki": "Kikuyu; Gikuyu",
"kia": "Kim",
"kib": "Koalib",
"kic": "Kickapoo",
"kid": "Koshin",
"kie": "Kibet",
"kif": "Eastern Parbate Kham",
"kig": "Kimaama; Kimaghima",
"kih": "Kilmeri",
"kii": "Kitsai",
"kij": "Kilivila",
"kil": "Kariya",
"kim": "Karagas",
"kio": "Kiowa",
"kip": "Sheshi Kham",
"kiq": "Kosadle; Kosare",
"kis": "Kis",
"kit": "Agob",
"kiu": "Kirmanjki (individual language)",
"kiv": "Kimbu",
"kiw": "Northeast Kiwai",
"kix": "Khiamniungan Naga",
"kiy": "Kirikiri",
"kiz": "Kisi",
"kj": "Kuanyama; Kwanyama",
"kja": "Mlap",
"kjb": "Q'anjob'al; Kanjobal",
"kjc": "Coastal Konjo",
"kjd": "Southern Kiwai",
"kje": "Kisar",
"kjg": "Khmu",
"kjh": "Khakas",
"kji": "Zabana",
"kjj": "Khinalugh",
"kjk": "Highland Konjo",
"kjl": "Western Parbate Kham",
"kjm": "Kháng",
"kjn": "Kunjen",
"kjo": "Harijan Kinnauri",
"kjp": "Pwo Eastern Karen",
"kjq": "Western Keres",
"kjr": "Kurudu",
"kjs": "East Kewa",
"kjt": "Phrae Pwo Karen",
"kju": "Kashaya",
"kjv": "Kaikavian Literary Language",
"kjx": "Ramopa",
"kjy": "Erave",
"kjz": "Bumthangkha",
"kk": "Kazakh",
"kka": "Kakanda",
"kkb": "Kwerisa",
"kkc": "Odoodee",
"kkd": "Kinuku",
"kke": "Kakabe",
"kkf": "Kalaktang Monpa",
"kkg": "Mabaka Valley Kalinga",
"kkh": "Khün",
"kki": "Kagulu",
"kkj": "Kako",
"kkk": "Kokota",
"kkl": "Kosarek Yale",
"kkm": "Kiong",
"kkn": "Kon Keu",
"kko": "Karko",
"kkp": "Gugubera; Koko-Bera",
"kkq": "Kaeku",
"kkr": "Kir-Balar",
"kks": "Giiwo",
"kkt": "Koi",
"kku": "Tumi",
"kkv": "Kangean",
"kkw": "Teke-Kukuya",
"kkx": "Kohin",
"kky": "Guugu Yimidhirr; Guguyimidjir",
"kkz": "Kaska",
"kl": "Kalaallisut; Greenlandic",
"kla": "Klamath-Modoc",
"klb": "Kiliwa",
"klc": "Kolbila",
"kld": "Gamilaraay",
"kle": "Kulung (Nepal)",
"klf": "Kendeje",
"klg": "Tagakaulo",
"klh": "Weliki",
"kli": "Kalumpang",
"klj": "Khalaj",
"klk": "Kono (Nigeria)",
"kll": "Kagan Kalagan",
"klm": "Migum",
"kln": "Kalenjin",
"klo": "Kapya",
"klp": "Kamasa",
"klq": "Rumu",
"klr": "Khaling",
"kls": "Kalasha",
"klt": "Nukna",
"klu": "Klao",
"klv": "Maskelynes",
"klw": "Tado; Lindu",
"klx": "Koluwawa",
"kly": "Kalao",
"klz": "Kabola",
"km": "Khmer; Central Khmer",
"kma": "Konni",
"kmb": "Kimbundu",
"kmc": "Southern Dong",
"kmd": "Majukayang Kalinga",
"kme": "Bakole",
"kmf": "Kare (Papua New Guinea)",
"kmg": "Kâte",
"kmh": "Kalam",
"kmi": "Kami (Nigeria)",
"kmj": "Kumarbhag Paharia",
"kmk": "Limos Kalinga",
"kml": "Tanudan Kalinga",
"kmm": "Kom (India)",
"kmn": "Awtuw",
"kmo": "Kwoma",
"kmp": "Gimme",
"kmq": "Kwama",
"kmr": "Northern Kurdish",
"kms": "Kamasau",
"kmt": "Kemtuik",
"kmu": "Kanite",
"kmv": "Karipúna Creole French",
"kmw": "Komo (Democratic Republic of Congo)",
"kmx": "Waboda",
"kmy": "Koma",
"kmz": "Khorasani Turkish",
"kn": "Kannada",
"kna": "Dera (Nigeria)",
"knb": "Lubuagan Kalinga",
"knc": "Central Kanuri",
"knd": "Konda",
"kne": "Kankanaey",
"knf": "Mankanya",
"kng": "Koongo",
"kni": "Kanufi",
"knj": "Western Kanjobal",
"knk": "Kuranko",
"knl": "Keninjal",
"knm": "Kanamarí",
"knn": "Konkani (individual language)",
"kno": "Kono (Sierra Leone)",
"knp": "Kwanja",
"knq": "Kintaq",
"knr": "Kaningra",
"kns": "Kensiu",
"knt": "Panoan Katukína",
"knu": "Kono (Guinea)",
"knv": "Tabo",
"knw": "Kung-Ekoka",
"knx": "Kendayan; Salako",
"kny": "Kanyok",
"knz": "Kalamsé",
"ko": "Korean",
"koa": "Konomala",
"koc": "Kpati",
"kod": "Kodi",
"koe": "Kacipo-Bale Suri",
"kof": "Kubi",
"kog": "Cogui; Kogi",
"koh": "Koyo",
"koi": "Komi-Permyak",
"kok": "Konkani (macrolanguage)",
"kol": "Kol (Papua New Guinea)",
"koo": "Konzo",
"kop": "Waube",
"koq": "Kota (Gabon)",
"kos": "Kosraean",
"kot": "Lagwan",
"kou": "Koke",
"kov": "Kudu-Camo",
"kow": "Kugama",
"koy": "Koyukon",
"koz": "Korak",
"kpa": "Kutto",
"kpb": "Mullu Kurumba",
"kpc": "Curripaco",
"kpd": "Koba",
"kpe": "Kpelle",
"kpf": "Komba",
"kpg": "Kapingamarangi",
"kph": "Kplang",
"kpi": "Kofei",
"kpj": "Karajá",
"kpk": "Kpan",
"kpl": "Kpala",
"kpm": "Koho",
"kpn": "Kepkiriwát",
"kpo": "Ikposo",
"kpq": "Korupun-Sela",
"kpr": "Korafe-Yegha",
"kps": "Tehit",
"kpt": "Karata",
"kpu": "Kafoa",
"kpv": "Komi-Zyrian",
"kpw": "Kobon",
"kpx": "Mountain Koiali",
"kpy": "Koryak",
"kpz": "Kupsabiny",
"kqa": "Mum",
"kqb": "Kovai",
"kqc": "Doromu-Koki",
"kqd": "Koy Sanjaq Surat",
"kqe": "Kalagan",
"kqf": "Kakabai",
"kqg": "Khe",
"kqh": "Kisankasa",
"kqi": "Koitabu",
"kqj": "Koromira",
"kqk": "Kotafon Gbe",
"kql": "Kyenele",
"kqm": "Khisa",
"kqn": "Kaonde",
"kqo": "Eastern Krahn",
"kqp": "Kimré",
"kqq": "Krenak",
"kqr": "Kimaragang",
"kqs": "Northern Kissi",
"kqt": "Klias River Kadazan",
"kqu": "Seroa",
"kqv": "Okolod",
"kqw": "Kandas",
"kqx": "Mser",
"kqy": "Koorete",
"kqz": "Korana",
"kr": "Kanuri",
"kra": "Kumhali",
"krb": "Karkin",
"krc": "Karachay-Balkar",
"krd": "Kairui-Midiki",
"kre": "Panará",
"krf": "Koro (Vanuatu)",
"krh": "Kurama",
"kri": "Krio",
"krj": "Kinaray-A",
"krk": "Kerek",
"krl": "Karelian",
"krn": "Sapo",
"kro": "Kru languages",
"krp": "Korop",
"krr": "Krung",
"krs": "Gbaya (Sudan)",
"krt": "Tumari Kanuri",
"kru": "Kurukh",
"krv": "Kavet",
"krw": "Western Krahn",
"krx": "Karon",
"kry": "Kryts",
"krz": "Sota Kanum",
"ks": "Kashmiri",
"ksa": "Shuwa-Zamani",
"ksb": "Shambala",
"ksc": "Southern Kalinga",
"ksd": "Kuanua",
"kse": "Kuni",
"ksf": "Bafia",
"ksg": "Kusaghe",
"ksh": "Kölsch",
"ksi": "Krisa; I'saka",
"ksj": "Uare",
"ksk": "Kansa",
"ksl": "Kumalu",
"ksm": "Kumba",
"ksn": "Kasiguranin",
"kso": "Kofa",
"ksp": "Kaba",
"ksq": "Kwaami",
"ksr": "Borong",
"kss": "Southern Kisi",
"kst": "Winyé",
"ksu": "Khamyang",
"ksv": "Kusu",
"ksw": "S'gaw Karen",
"ksx": "Kedang",
"ksy": "Kharia Thar",
"ksz": "Kodaku",
"kta": "Katua",
"ktb": "Kambaata",
"ktc": "Kholok",
"ktd": "Kokata; Kukatha",
"kte": "Nubri",
"ktf": "Kwami",
"ktg": "Kalkutung",
"kth": "Karanga",
"kti": "North Muyu",
"ktj": "Plapo Krumen",
"ktk": "Kaniet",
"ktl": "Koroshi",
"ktm": "Kurti",
"ktn": "Karitiâna",
"kto": "Kuot",
"ktp": "Kaduo",
"ktq": "Katabaga",
"kts": "South Muyu",
"ktt": "Ketum",
"ktu": "Kituba (Democratic Republic of Congo)",
"ktv": "Eastern Katu",
"ktw": "Kato",
"ktx": "Kaxararí",
"kty": "Kango (Bas-Uélé District)",
"ktz": "Juǀʼhoan; Juǀʼhoansi",
"ku": "Kurdish",
"kub": "Kutep",
"kuc": "Kwinsu",
"kud": "'Auhelawa",
"kue": "Kuman (Papua New Guinea)",
"kuf": "Western Katu",
"kug": "Kupa",
"kuh": "Kushi",
"kui": "Kuikúro-Kalapálo; Kalapalo",
"kuj": "Kuria",
"kuk": "Kepo'",
"kul": "Kulere",
"kum": "Kumyk",
"kun": "Kunama",
"kuo": "Kumukio",
"kup": "Kunimaipa",
"kuq": "Karipuna",
"kus": "Kusaal",
"kut": "Kutenai",
"kuu": "Upper Kuskokwim",
"kuv": "Kur",
"kuw": "Kpagua",
"kux": "Kukatja",
"kuy": "Kuuku-Ya'u",
"kuz": "Kunza",
"kv": "Komi",
"kva": "Bagvalal",
"kvb": "Kubu",
"kvc": "Kove",
"kvd": "Kui (Indonesia)",
"kve": "Kalabakan",
"kvf": "Kabalai",
"kvg": "Kuni-Boazi",
"kvh": "Komodo",
"kvi": "Kwang",
"kvj": "Psikye",
"kvk": "Korean Sign Language",
"kvl": "Kayaw",
"kvm": "Kendem",
"kvn": "Border Kuna",
"kvo": "Dobel",
"kvp": "Kompane",
"kvq": "Geba Karen",
"kvr": "Kerinci",
"kvt": "Lahta Karen; Lahta",
"kvu": "Yinbaw Karen",
"kvv": "Kola",
"kvw": "Wersing",
"kvx": "Parkari Koli",
"kvy": "Yintale Karen; Yintale",
"kvz": "Tsakwambo; Tsaukambo",
"kw": "Cornish",
"kwa": "Dâw",
"kwb": "Kwa",
"kwc": "Likwala",
"kwd": "Kwaio",
"kwe": "Kwerba",
"kwf": "Kwara'ae",
"kwg": "Sara Kaba Deme",
"kwh": "Kowiai",
"kwi": "Awa-Cuaiquer",
"kwj": "Kwanga",
"kwk": "Kwakiutl",
"kwl": "Kofyar",
"kwm": "Kwambi",
"kwn": "Kwangali",
"kwo": "Kwomtari",
"kwp": "Kodia",
"kwr": "Kwer",
"kws": "Kwese",
"kwt": "Kwesten",
"kwu": "Kwakum",
"kwv": "Sara Kaba Náà",
"kww": "Kwinti",
"kwx": "Khirwar",
"kwy": "San Salvador Kongo",
"kwz": "Kwadi",
"kxa": "Kairiru",
"kxb": "Krobu",
"kxc": "Konso; Khonso",
"kxd": "Brunei",
"kxf": "Manumanaw Karen; Manumanaw",
"kxh": "Karo (Ethiopia)",
"kxi": "Keningau Murut",
"kxj": "Kulfa",
"kxk": "Zayein Karen",
"kxm": "Northern Khmer",
"kxn": "Kanowit-Tanjong Melanau",
"kxo": "Kanoé",
"kxp": "Wadiyara Koli",
"kxq": "Smärky Kanum",
"kxr": "Koro (Papua New Guinea)",
"kxs": "Kangjia",
"kxt": "Koiwat",
"kxv": "Kuvi",
"kxw": "Konai",
"kxx": "Likuba",
"kxy": "Kayong",
"kxz": "Kerewo",
"ky": "Kirghiz; Kyrgyz",
"kya": "Kwaya",
"kyb": "Butbut Kalinga",
"kyc": "Kyaka",
"kyd": "Karey",
"kye": "Krache",
"kyf": "Kouya",
"kyg": "Keyagana",
"kyh": "Karok",
"kyi": "Kiput",
"kyj": "Karao",
"kyk": "Kamayo",
"kyl": "Kalapuya",
"kym": "Kpatili",
"kyn": "Northern Binukidnon",
"kyo": "Kelon",
"kyp": "Kang",
"kyq": "Kenga",
"kyr": "Kuruáya",
"kys": "Baram Kayan",
"kyt": "Kayagar",
"kyu": "Western Kayah",
"kyv": "Kayort",
"kyw": "Kudmali",
"kyx": "Rapoisi",
"kyy": "Kambaira",
"kyz": "Kayabí",
"kza": "Western Karaboro",
"kzb": "Kaibobo",
"kzc": "Bondoukou Kulango",
"kzd": "Kadai",
"kze": "Kosena",
"kzf": "Da'a Kaili",
"kzg": "Kikai",
"kzi": "Kelabit",
"kzk": "Kazukuru",
"kzl": "Kayeli",
"kzm": "Kais",
"kzn": "Kokola",
"kzo": "Kaningi",
"kzp": "Kaidipang",
"kzq": "Kaike",
"kzr": "Karang",
"kzs": "Sugut Dusun",
"kzu": "Kayupulau",
"kzv": "Komyandaret",
"kzw": "Karirí-Xocó",
"kzx": "Kamarian",
"kzy": "Kango (Tshopo District)",
"kzz": "Kalabra",
"la": "Latin",
"laa": "Southern Subanen",
"lab": "Linear A",
"lac": "Lacandon",
"lad": "Ladino",
"lae": "Pattani",
"laf": "Lafofa",
"lag": "Langi",
"lah": "Lahnda",
"lai": "Lambya",
"laj": "Lango (Uganda)",
"lal": "Lalia",
"lam": "Lamba",
"lan": "Laru",
"lap": "Laka (Chad)",
"laq": "Qabiao",
"lar": "Larteh",
"las": "Lama (Togo)",
"lau": "Laba",
"law": "Lauje",
"lax": "Tiwa",
"lay": "Lama Bai",
"laz": "Aribwatsa",
"lb": "Luxembourgish; Letzeburgesch",
"lbb": "Label",
"lbc": "Lakkia",
"lbe": "Lak",
"lbf": "Tinani",
"lbg": "Laopang",
"lbi": "La'bi",
"lbj": "Ladakhi",
"lbk": "Central Bontok",
"lbl": "Libon Bikol",
"lbm": "Lodhi",
"lbn": "Rmeet",
"lbo": "Laven",
"lbq": "Wampar",
"lbr": "Lohorung",
"lbs": "Libyan Sign Language",
"lbt": "Lachi",
"lbu": "Labu",
"lbv": "Lavatbura-Lamusong",
"lbw": "Tolaki",
"lbx": "Lawangan",
"lby": "Lamalama; Lamu-Lamu",
"lbz": "Lardil",
"lcc": "Legenyem",
"lcd": "Lola",
"lce": "Loncong; Sekak",
"lcf": "Lubu",
"lch": "Luchazi",
"lcl": "Lisela",
"lcm": "Tungag",
"lcp": "Western Lawa",
"lcq": "Luhu",
"lcs": "Lisabata-Nuniali",
"lda": "Kla-Dan",
"ldb": "Dũya",
"ldd": "Luri",
"ldg": "Lenyima",
"ldh": "Lamja-Dengsa-Tola",
"ldi": "Laari",
"ldj": "Lemoro",
"ldk": "Leelau",
"ldl": "Kaan",
"ldm": "Landoma",
"ldn": "Láadan",
"ldo": "Loo",
"ldp": "Tso",
"ldq": "Lufu",
"lea": "Lega-Shabunda",
"leb": "Lala-Bisa",
"lec": "Leco",
"led": "Lendu",
"lee": "Lyélé",
"lef": "Lelemi",
"leh": "Lenje",
"lei": "Lemio",
"lej": "Lengola",
"lek": "Leipon",
"lel": "Lele (Democratic Republic of Congo)",
"lem": "Nomaande",
"len": "Lenca",
"leo": "Leti (Cameroon)",
"lep": "Lepcha",
"leq": "Lembena",
"ler": "Lenkau",
"les": "Lese",
"let": "Lesing-Gelimi; Amio-Gelimi",
"leu": "Kara (Papua New Guinea)",
"lev": "Lamma",
"lew": "Ledo Kaili",
"lex": "Luang",
"ley": "Lemolang",
"lez": "Lezghian",
"lfa": "Lefa",
"lfn": "Lingua Franca Nova",
"lg": "Ganda; Luganda",
"lga": "Lungga",
"lgb": "Laghu",
"lgg": "Lugbara",
"lgh": "Laghuu",
"lgi": "Lengilu",
"lgk": "Lingarak; Neverver",
"lgl": "Wala",
"lgm": "Lega-Mwenga",
"lgn": "T'apo; Opuuo",
"lgo": "Lango (South Sudan)",
"lgq": "Logba",
"lgr": "Lengo",
"lgt": "Pahi",
"lgu": "Longgu",
"lgz": "Ligenza",
"lha": "Laha (Viet Nam)",
"lhh": "Laha (Indonesia)",
"lhi": "Lahu Shi",
"lhl": "Lahul Lohar",
"lhm": "Lhomi",
"lhn": "Lahanan",
"lhp": "Lhokpu",
"lhs": "Mlahsö",
"lht": "Lo-Toga",
"lhu": "Lahu",
"li": "Limburgan; Limburger; Limburgish",
"lia": "West-Central Limba",
"lib": "Likum",
"lic": "Hlai",
"lid": "Nyindrou",
"lie": "Likila",
"lif": "Limbu",
"lig": "Ligbi",
"lih": "Lihir",
"lij": "Ligurian",
"lik": "Lika",
"lil": "Lillooet",
"lio": "Liki",
"lip": "Sekpele",
"liq": "Libido",
"lir": "Liberian English",
"lis": "Lisu",
"liu": "Logorik",
"liv": "Liv",
"liw": "Col",
"lix": "Liabuku",
"liy": "Banda-Bambari",
"liz": "Libinza",
"lja": "Golpa",
"lje": "Rampi",
"lji": "Laiyolo",
"ljl": "Li'o",
"ljp": "Lampung Api",
"ljw": "Yirandali",
"ljx": "Yuru",
"lka": "Lakalei",
"lkb": "Kabras; Lukabaras",
"lkc": "Kucong",
"lkd": "Lakondê",
"lke": "Kenyi",
"lkh": "Lakha",
"lki": "Laki",
"lkj": "Remun",
"lkl": "Laeko-Libuat",
"lkm": "Kalaamaya",
"lkn": "Lakon; Vure",
"lko": "Khayo; Olukhayo",
"lkr": "Päri",
"lks": "Kisa; Olushisa",
"lkt": "Lakota",
"lku": "Kungkari",
"lky": "Lokoya",
"lla": "Lala-Roba",
"llb": "Lolo",
"llc": "Lele (Guinea)",
"lld": "Ladin",
"lle": "Lele (Papua New Guinea)",
"llf": "Hermit",
"llg": "Lole",
"llh": "Lamu",
"lli": "Teke-Laali",
"llj": "Ladji Ladji",
"llk": "Lelak",
"lll": "Lilau",
"llm": "Lasalimu",
"lln": "Lele (Chad)",
"llp": "North Efate",
"llq": "Lolak",
"lls": "Lithuanian Sign Language",
"llu": "Lau",
"llx": "Lauan",
"lma": "East Limba",
"lmb": "Merei",
"lmc": "Limilngan",
"lmd": "Lumun",
"lme": "Pévé",
"lmf": "South Lembata",
"lmg": "Lamogai",
"lmh": "Lambichhong",
"lmi": "Lombi",
"lmj": "West Lembata",
"lmk": "Lamkang",
"lml": "Hano",
"lmn": "Lambadi",
"lmo": "Lombard",
"lmp": "Limbum",
"lmq": "Lamatuka",
"lmr": "Lamalera",
"lmu": "Lamenu",
"lmv": "Lomaiviti",
"lmw": "Lake Miwok",
"lmx": "Laimbue",
"lmy": "Lamboya",
"ln": "Lingala",
"lna": "Langbashe",
"lnb": "Mbalanhu",
"lnd": "Lundayeh; Lun Bawang",
"lng": "Langobardic",
"lnh": "Lanoh",
"lni": "Daantanai'",
"lnj": "Leningitij",
"lnl": "South Central Banda",
"lnm": "Langam",
"lnn": "Lorediakarkar",
"lns": "Lamnso'",
"lnu": "Longuda",
"lnw": "Lanima",
"lnz": "Lonzo",
"lo": "Lao",
"loa": "Loloda",
"lob": "Lobi",
"loc": "Inonhan",
"loe": "Saluan",
"lof": "Logol",
"log": "Logo",
"loh": "Narim",
"loi": "Loma (Côte d'Ivoire)",
"loj": "Lou",
"lok": "Loko",
"lol": "Mongo",
"lom": "Loma (Liberia)",
"lon": "Malawi Lomwe",
"loo": "Lombo",
"lop": "Lopa",
"loq": "Lobala",
"lor": "Téén",
"los": "Loniu",
"lot": "Otuho",
"lou": "Louisiana Creole",
"lov": "Lopi",
"low": "Tampias Lobu",
"lox": "Loun",
"loy": "Loke",
"loz": "Lozi",
"lpa": "Lelepa",
"lpe": "Lepki",
"lpn": "Long Phuri Naga",
"lpo": "Lipo",
"lpx": "Lopit",
"lqr": "Logir",
"lra": "Rara Bakati'",
"lrc": "Northern Luri",
"lre": "Laurentian",
"lrg": "Laragia",
"lri": "Marachi; Olumarachi",
"lrk": "Loarki",
"lrl": "Lari",
"lrm": "Marama; Olumarama",
"lrn": "Lorang",
"lro": "Laro",
"lrr": "Southern Yamphu",
"lrt": "Larantuka Malay",
"lrv": "Larevat",
"lrz": "Lemerig",
"lsa": "Lasgerdi",
"lsb": "Burundian Sign Language; Langue des Signes Burundaise",
"lsc": "Albarradas Sign Language; Lengua de señas Albarradas",
"lsd": "Lishana Deni",
"lse": "Lusengo",
"lsh": "Lish",
"lsi": "Lashi",
"lsl": "Latvian Sign Language",
"lsm": "Saamia; Olusamia",
"lsn": "Tibetan Sign Language",
"lso": "Laos Sign Language",
"lsp": "Panamanian Sign Language; Lengua de Señas Panameñas",
"lsr": "Aruop",
"lss": "Lasi",
"lst": "Trinidad and Tobago Sign Language",
"lsv": "Sivia Sign Language",
"lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise",
"lsy": "Mauritian Sign Language",
"lt": "Lithuanian",
"ltc": "Late Middle Chinese",
"ltg": "Latgalian",
"lth": "Thur",
"lti": "Leti (Indonesia)",
"ltn": "Latundê",
"lto": "Tsotso; Olutsotso",
"lts": "Tachoni; Lutachoni",
"ltu": "Latu",
"lu": "Luba-Katanga",
"lua": "Luba-Lulua",
"luc": "Aringa",
"lud": "Ludian",
"lue": "Luvale",
"luf": "Laua",
"lui": "Luiseno",
"luj": "Luna",
"luk": "Lunanakha",
"lul": "Olu'bo",
"lum": "Luimbi",
"lun": "Lunda",
"luo": "Luo (Kenya and Tanzania); Dholuo",
"lup": "Lumbu",
"luq": "Lucumi",
"lur": "Laura",
"lus": "Lushai",
"lut": "Lushootseed",
"luu": "Lumba-Yakkha",
"luv": "Luwati",
"luw": "Luo (Cameroon)",
"luy": "Luyia; Oluluyia",
"luz": "Southern Luri",
"lv": "Latvian",
"lva": "Maku'a",
"lvi": "Lavi",
"lvk": "Lavukaleve",
"lvs": "Standard Latvian",
"lvu": "Levuka",
"lwa": "Lwalu",
"lwe": "Lewo Eleng",
"lwg": "Wanga; Oluwanga",
"lwh": "White Lachi",
"lwl": "Eastern Lawa",
"lwm": "Laomian",
"lwo": "Luwo",
"lws": "Malawian Sign Language",
"lwt": "Lewotobi",
"lwu": "Lawu",
"lww": "Lewo",
"lxm": "Lakurumau",
"lya": "Layakha",
"lyg": "Lyngngam",
"lyn": "Luyana",
"lzh": "Literary Chinese",
"lzl": "Litzlitz",
"lzn": "Leinong Naga",
"lzz": "Laz",
"maa": "San Jerónimo Tecóatl Mazatec",
"mab": "Yutanduchi Mixtec",
"mad": "Madurese",
"mae": "Bo-Rukul",
"maf": "Mafa",
"mag": "Magahi",
"mai": "Maithili",
"maj": "Jalapa De Díaz Mazatec",
"mak": "Makasar",
"mam": "Mam",
"man": "Mandingo; Manding",
"map": "Austronesian languages",
"maq": "Chiquihuitlán Mazatec",
"mas": "Masai",
"mat": "San Francisco Matlatzinca",
"mau": "Huautla Mazatec",
"mav": "Sateré-Mawé",
"maw": "Mampruli",
"max": "North Moluccan Malay",
"maz": "Central Mazahua",
"mba": "Higaonon",
"mbb": "Western Bukidnon Manobo",
"mbc": "Macushi",
"mbd": "Dibabawon Manobo",
"mbe": "Molale",
"mbf": "Baba Malay",
"mbh": "Mangseng",
"mbi": "Ilianen Manobo",
"mbj": "Nadëb",
"mbk": "Malol",
"mbl": "Maxakalí",
"mbm": "Ombamba",
"mbn": "Macaguán",
"mbo": "Mbo (Cameroon)",
"mbp": "Malayo",
"mbq": "Maisin",
"mbr": "Nukak Makú",
"mbs": "Sarangani Manobo",
"mbt": "Matigsalug Manobo",
"mbu": "Mbula-Bwazza",
"mbv": "Mbulungish",
"mbw": "Maring",
"mbx": "Mari (East Sepik Province)",
"mby": "Memoni",
"mbz": "Amoltepec Mixtec",
"mca": "Maca",
"mcb": "Machiguenga",
"mcc": "Bitur",
"mcd": "Sharanahua",
"mce": "Itundujia Mixtec",
"mcf": "Matsés",
"mcg": "Mapoyo",
"mch": "Maquiritari",
"mci": "Mese",
"mcj": "Mvanip",
"mck": "Mbunda",
"mcl": "Macaguaje",
"mcm": "Malaccan Creole Portuguese",
"mcn": "Masana",
"mco": "Coatlán Mixe",
"mcp": "Makaa",
"mcq": "Ese",
"mcr": "Menya",
"mcs": "Mambai",
"mct": "Mengisa",
"mcu": "Cameroon Mambila",
"mcv": "Minanibai",
"mcw": "Mawa (Chad)",
"mcx": "Mpiemo",
"mcy": "South Watut",
"mcz": "Mawan",
"mda": "Mada (Nigeria)",
"mdb": "Morigi",
"mdc": "Male (Papua New Guinea)",
"mdd": "Mbum",
"mde": "Maba (Chad)",
"mdf": "Moksha",
"mdg": "Massalat",
"mdh": "Maguindanaon",
"mdi": "Mamvu",
"mdj": "Mangbetu",
"mdk": "Mangbutu",
"mdl": "Maltese Sign Language",
"mdm": "Mayogo",
"mdn": "Mbati",
"mdp": "Mbala",
"mdq": "Mbole",
"mdr": "Mandar",
"mds": "Maria (Papua New Guinea)",
"mdt": "Mbere",
"mdu": "Mboko",
"mdv": "Santa Lucía Monteverde Mixtec",
"mdw": "Mbosi",
"mdx": "Dizin",
"mdy": "Male (Ethiopia)",
"mdz": "Suruí Do Pará",
"mea": "Menka",
"meb": "Ikobi",
"mec": "Marra",
"med": "Melpa",
"mee": "Mengen",
"mef": "Megam",
"meh": "Southwestern Tlaxiaco Mixtec",
"mei": "Midob",
"mej": "Meyah",
"mek": "Mekeo",
"mel": "Central Melanau",
"mem": "Mangala",
"men": "Mende (Sierra Leone)",
"meo": "Kedah Malay",
"mep": "Miriwoong",
"meq": "Merey",
"mer": "Meru",
"mes": "Masmaje",
"met": "Mato",
"meu": "Motu",
"mev": "Mano",
"mew": "Maaka",
"mey": "Hassaniyya",
"mez": "Menominee",
"mfa": "Pattani Malay",
"mfb": "Bangka",
"mfc": "Mba",
"mfd": "Mendankwe-Nkwen",
"mfe": "Morisyen",
"mff": "Naki",
"mfg": "Mogofin",
"mfh": "Matal",
"mfi": "Wandala",
"mfj": "Mefele",
"mfk": "North Mofu",
"mfl": "Putai",
"mfm": "Marghi South",
"mfn": "Cross River Mbembe",
"mfo": "Mbe",
"mfp": "Makassar Malay",
"mfq": "Moba",
"mfr": "Marrithiyel",
"mfs": "Mexican Sign Language",
"mft": "Mokerang",
"mfu": "Mbwela",
"mfv": "Mandjak",
"mfw": "Mulaha",
"mfx": "Melo",
"mfy": "Mayo",
"mfz": "Mabaan",
"mg": "Malagasy",
"mga": "Middle Irish (900-1200)",
"mgb": "Mararit",
"mgc": "Morokodo",
"mgd": "Moru",
"mge": "Mango",
"mgf": "Maklew",
"mgg": "Mpumpong",
"mgh": "Makhuwa-Meetto",
"mgi": "Lijili",
"mgj": "Abureni",
"mgk": "Mawes",
"mgl": "Maleu-Kilenge",
"mgm": "Mambae",
"mgn": "Mbangi",
"mgo": "Meta'",
"mgp": "Eastern Magar",
"mgq": "Malila",
"mgr": "Mambwe-Lungu",
"mgs": "Manda (Tanzania)",
"mgt": "Mongol",
"mgu": "Mailu",
"mgv": "Matengo",
"mgw": "Matumbi",
"mgy": "Mbunga",
"mgz": "Mbugwe",
"mh": "Marshallese",
"mha": "Manda (India)",
"mhb": "Mahongwe",
"mhc": "Mocho",
"mhd": "Mbugu",
"mhe": "Besisi; Mah Meri",
"mhf": "Mamaa",
"mhg": "Margu",
"mhi": "Ma'di",
"mhj": "Mogholi",
"mhk": "Mungaka",
"mhl": "Mauwake",
"mhm": "Makhuwa-Moniga",
"mhn": "Mócheno",
"mho": "Mashi (Zambia)",
"mhp": "Balinese Malay",
"mhq": "Mandan",
"mhr": "Eastern Mari",
"mhs": "Buru (Indonesia)",
"mht": "Mandahuaca",
"mhu": "Digaro-Mishmi; Darang Deng",
"mhw": "Mbukushu",
"mhx": "Maru; Lhaovo",
"mhy": "Ma'anyan",
"mhz": "Mor (Mor Islands)",
"mi": "Maori",
"mia": "Miami",
"mib": "Atatláhuca Mixtec",
"mic": "Mi'kmaq; Micmac",
"mid": "Mandaic",
"mie": "Ocotepec Mixtec",
"mif": "Mofu-Gudur",
"mig": "San Miguel El Grande Mixtec",
"mih": "Chayuco Mixtec",
"mii": "Chigmecatitlán Mixtec",
"mij": "Abar; Mungbam",
"mik": "Mikasuki",
"mil": "Peñoles Mixtec",
"mim": "Alacatlatzala Mixtec",
"min": "Minangkabau",
"mio": "Pinotepa Nacional Mixtec",
"mip": "Apasco-Apoala Mixtec",
"miq": "Mískito",
"mir": "Isthmus Mixe",
"mit": "Southern Puebla Mixtec",
"miu": "Cacaloxtepec Mixtec",
"miw": "Akoye",
"mix": "Mixtepec Mixtec",
"miy": "Ayutla Mixtec",
"miz": "Coatzospan Mixtec",
"mjb": "Makalero",
"mjc": "San Juan Colorado Mixtec",
"mjd": "Northwest Maidu",
"mje": "Muskum",
"mjg": "Tu",
"mjh": "Mwera (Nyasa)",
"mji": "Kim Mun",
"mjj": "Mawak",
"mjk": "Matukar",
"mjl": "Mandeali",
"mjm": "Medebur",
"mjn": "Ma (Papua New Guinea)",
"mjo": "Malankuravan",
"mjp": "Malapandaram",
"mjq": "Malaryan",
"mjr": "Malavedan",
"mjs": "Miship",
"mjt": "Sauria Paharia",
"mju": "Manna-Dora",
"mjv": "Mannan",
"mjw": "Karbi",
"mjx": "Mahali",
"mjy": "Mahican",
"mjz": "Majhi",
"mk": "Macedonian",
"mka": "Mbre",
"mkb": "Mal Paharia",
"mkc": "Siliput",
"mke": "Mawchi",
"mkf": "Miya",
"mkg": "Mak (China)",
"mkh": "Mon-Khmer languages",
"mki": "Dhatki",
"mkj": "Mokilese",
"mkk": "Byep",
"mkl": "Mokole",
"mkm": "Moklen",
"mkn": "Kupang Malay",
"mko": "Mingang Doso",
"mkp": "Moikodi",
"mkq": "Bay Miwok",
"mkr": "Malas",
"mks": "Silacayoapan Mixtec",
"mkt": "Vamale",
"mku": "Konyanka Maninka",
"mkv": "Mafea",
"mkw": "Kituba (Congo)",
"mkx": "Kinamiging Manobo",
"mky": "East Makian",
"mkz": "Makasae",
"ml": "Malayalam",
"mla": "Malo",
"mlb": "Mbule",
"mlc": "Cao Lan",
"mle": "Manambu",
"mlf": "Mal",
"mlh": "Mape",
"mli": "Malimpung",
"mlj": "Miltu",
"mlk": "Ilwana; Kiwilwana",
"mll": "Malua Bay",
"mlm": "Mulam",
"mln": "Malango",
"mlo": "Mlomp",
"mlp": "Bargam",
"mlq": "Western Maninkakan",
"mlr": "Vame",
"mls": "Masalit",
"mlu": "To'abaita",
"mlv": "Motlav; Mwotlap",
"mlw": "Moloko",
"mlx": "Malfaxal; Naha'ai",
"mlz": "Malaynon",
"mma": "Mama",
"mmb": "Momina",
"mmc": "Michoacán Mazahua",
"mmd": "Maonan",
"mme": "Mae",
"mmf": "Mundat",
"mmg": "North Ambrym",
"mmh": "Mehináku",
"mmi": "Musar",
"mmj": "Majhwar",
"mmk": "Mukha-Dora",
"mml": "Man Met",
"mmm": "Maii",
"mmn": "Mamanwa",
"mmo": "Mangga Buang",
"mmp": "Siawi",
"mmq": "Musak",
"mmr": "Western Xiangxi Miao",
"mmt": "Malalamai",
"mmu": "Mmaala",
"mmv": "Miriti",
"mmw": "Emae",
"mmx": "Madak",
"mmy": "Migaama",
"mmz": "Mabaale",
"mn": "Mongolian",
"mna": "Mbula",
"mnb": "Muna",
"mnc": "Manchu",
"mnd": "Mondé",
"mne": "Naba",
"mnf": "Mundani",
"mng": "Eastern Mnong",
"mnh": "Mono (Democratic Republic of Congo)",
"mni": "Manipuri",
"mnj": "Munji",
"mnk": "Mandinka",
"mnl": "Tiale",
"mnm": "Mapena",
"mnn": "Southern Mnong",
"mno": "Manobo languages",
"mnp": "Min Bei Chinese",
"mnq": "Minriq",
"mnr": "Mono (USA)",
"mns": "Mansi",
"mnu": "Mer",
"mnv": "Rennell-Bellona",
"mnw": "Mon",
"mnx": "Manikion",
"mny": "Manyawa",
"mnz": "Moni",
"moa": "Mwan",
"moc": "Mocoví",
"mod": "Mobilian",
"moe": "Innu; Montagnais",
"mog": "Mongondow",
"moh": "Mohawk",
"moi": "Mboi",
"moj": "Monzombo",
"mok": "Morori",
"mom": "Mangue",
"moo": "Monom",
"mop": "Mopán Maya",
"moq": "Mor (Bomberai Peninsula)",
"mor": "Moro",
"mos": "Mossi",
"mot": "Barí",
"mou": "Mogum",
"mov": "Mohave",
"mow": "Moi (Congo)",
"mox": "Molima",
"moy": "Shekkacho",
"moz": "Mukulu; Gergiko",
"mpa": "Mpoto",
"mpb": "Malak Malak; Mullukmulluk",
"mpc": "Mangarrayi",
"mpd": "Machinere",
"mpe": "Majang",
"mpg": "Marba",
"mph": "Maung",
"mpi": "Mpade",
"mpj": "Martu Wangka; Wangkajunga",
"mpk": "Mbara (Chad)",
"mpl": "Middle Watut",
"mpm": "Yosondúa Mixtec",
"mpn": "Mindiri",
"mpo": "Miu",
"mpp": "Migabac",
"mpq": "Matís",
"mpr": "Vangunu",
"mps": "Dadibi",
"mpt": "Mian",
"mpu": "Makuráp",
"mpv": "Mungkip",
"mpw": "Mapidian",
"mpx": "Misima-Panaeati",
"mpy": "Mapia",
"mpz": "Mpi",
"mqa": "Maba (Indonesia)",
"mqb": "Mbuko",
"mqc": "Mangole",
"mqe": "Matepi",
"mqf": "Momuna",
"mqg": "Kota Bangun Kutai Malay",
"mqh": "Tlazoyaltepec Mixtec",
"mqi": "Mariri",
"mqj": "Mamasa",
"mqk": "Rajah Kabunsuwan Manobo",
"mql": "Mbelime",
"mqm": "South Marquesan",
"mqn": "Moronene",
"mqo": "Modole",
"mqp": "Manipa",
"mqq": "Minokok",
"mqr": "Mander",
"mqs": "West Makian",
"mqt": "Mok",
"mqu": "Mandari",
"mqv": "Mosimo",
"mqw": "Murupi",
"mqx": "Mamuju",
"mqy": "Manggarai",
"mqz": "Pano",
"mr": "Marathi",
"mra": "Mlabri",
"mrb": "Marino",
"mrc": "Maricopa",
"mrd": "Western Magar",
"mre": "Martha's Vineyard Sign Language",
"mrf": "Elseng",
"mrg": "Mising",
"mrh": "Mara Chin",
"mrj": "Western Mari",
"mrk": "Hmwaveke",
"mrl": "Mortlockese",
"mrm": "Merlav; Mwerlap",
"mrn": "Cheke Holo",
"mro": "Mru",
"mrp": "Morouas",
"mrq": "North Marquesan",
"mrr": "Maria (India)",
"mrs": "Maragus",
"mrt": "Marghi Central",
"mru": "Mono (Cameroon)",
"mrv": "Mangareva",
"mrw": "Maranao",
"mrx": "Maremgi; Dineor",
"mry": "Mandaya",
"mrz": "Marind",
"ms": "Malay (macrolanguage)",
"msb": "Masbatenyo",
"msc": "Sankaran Maninka",
"msd": "Yucatec Maya Sign Language",
"mse": "Musey",
"msf": "Mekwei",
"msg": "Moraid",
"msh": "Masikoro Malagasy",
"msi": "Sabah Malay",
"msj": "Ma (Democratic Republic of Congo)",
"msk": "Mansaka",
"msl": "Molof; Poule",
"msm": "Agusan Manobo",
"msn": "Vurës",
"mso": "Mombum",
"msp": "Maritsauá",
"msq": "Caac",
"msr": "Mongolian Sign Language",
"mss": "West Masela",
"msu": "Musom",
"msv": "Maslam",
"msw": "Mansoanka",
"msx": "Moresada",
"msy": "Aruamu",
"msz": "Momare",
"mt": "Maltese",
"mta": "Cotabato Manobo",
"mtb": "Anyin Morofo",
"mtc": "Munit",
"mtd": "Mualang",
"mte": "Mono (Solomon Islands)",
"mtf": "Murik (Papua New Guinea)",
"mtg": "Una",
"mth": "Munggui",
"mti": "Maiwa (Papua New Guinea)",
"mtj": "Moskona",
"mtk": "Mbe'",
"mtl": "Montol",
"mtm": "Mator",
"mtn": "Matagalpa",
"mto": "Totontepec Mixe",
"mtp": "Wichí Lhamtés Nocten",
"mtq": "Muong",
"mtr": "Mewari",
"mts": "Yora",
"mtt": "Mota",
"mtu": "Tututepec Mixtec",
"mtv": "Asaro'o",
"mtw": "Southern Binukidnon",
"mtx": "Tidaá Mixtec",
"mty": "Nabi",
"mua": "Mundang",
"mub": "Mubi",
"muc": "Ajumbu",
"mud": "Mednyj Aleut",
"mue": "Media Lengua",
"mug": "Musgu",
"muh": "Mündü",
"mui": "Musi",
"muj": "Mabire",
"muk": "Mugom",
"mum": "Maiwala",
"mun": "Munda languages",
"muo": "Nyong",
"mup": "Malvi",
"muq": "Eastern Xiangxi Miao",
"mur": "Murle",
"mus": "Creek",
"mut": "Western Muria",
"muu": "Yaaku",
"muv": "Muthuvan",
"mux": "Bo-Ung",
"muy": "Muyang",
"muz": "Mursi",
"mva": "Manam",
"mvb": "Mattole",
"mvd": "Mamboru",
"mve": "Marwari (Pakistan)",
"mvf": "Peripheral Mongolian",
"mvg": "Yucuañe Mixtec",
"mvh": "Mulgi",
"mvi": "Miyako",
"mvk": "Mekmek",
"mvl": "Mbara (Australia)",
"mvn": "Minaveha",
"mvo": "Marovo",
"mvp": "Duri",
"mvq": "Moere",
"mvr": "Marau",
"mvs": "Massep",
"mvt": "Mpotovoro",
"mvu": "Marfa",
"mvv": "Tagal Murut",
"mvw": "Machinga",
"mvx": "Meoswar",
"mvy": "Indus Kohistani",
"mvz": "Mesqan",
"mwa": "Mwatebu",
"mwb": "Juwal",
"mwc": "Are",
"mwe": "Mwera (Chimwera)",
"mwf": "Murrinh-Patha",
"mwg": "Aiklep",
"mwh": "Mouk-Aria",
"mwi": "Labo; Ninde",
"mwk": "Kita Maninkakan",
"mwl": "Mirandese",
"mwm": "Sar",
"mwn": "Nyamwanga",
"mwo": "Central Maewo",
"mwp": "Kala Lagaw Ya",
"mwq": "Mün Chin",
"mwr": "Marwari",
"mws": "Mwimbi-Muthambi",
"mwt": "Moken",
"mwu": "Mittu",
"mwv": "Mentawai",
"mww": "Hmong Daw",
"mwz": "Moingi",
"mxa": "Northwest Oaxaca Mixtec",
"mxb": "Tezoatlán Mixtec",
"mxc": "Manyika",
"mxd": "Modang",
"mxe": "Mele-Fila",
"mxf": "Malgbe",
"mxg": "Mbangala",
"mxh": "Mvuba",
"mxi": "Mozarabic",
"mxj": "Miju-Mishmi; Geman Deng",
"mxk": "Monumbo",
"mxl": "Maxi Gbe",
"mxm": "Meramera",
"mxn": "Moi (Indonesia)",
"mxo": "Mbowe",
"mxp": "Tlahuitoltepec Mixe",
"mxq": "Juquila Mixe",
"mxr": "Murik (Malaysia)",
"mxs": "Huitepec Mixtec",
"mxt": "Jamiltepec Mixtec",
"mxu": "Mada (Cameroon)",
"mxv": "Metlatónoc Mixtec",
"mxw": "Namo",
"mxx": "Mahou; Mawukakan",
"mxy": "Southeastern Nochixtlán Mixtec",
"mxz": "Central Masela",
"my": "Burmese",
"myb": "Mbay",
"myc": "Mayeka",
"mye": "Myene",
"myf": "Bambassi",
"myg": "Manta",
"myh": "Makah",
"myj": "Mangayat",
"myk": "Mamara Senoufo",
"myl": "Moma",
"mym": "Me'en",
"myn": "Mayan languages",
"myo": "Anfillo",
"myp": "Pirahã",
"myr": "Muniche",
"mys": "Mesmes",
"myu": "Mundurukú",
"myv": "Erzya",
"myw": "Muyuw",
"myx": "Masaaba",
"myy": "Macuna",
"myz": "Classical Mandaic",
"mza": "Santa María Zacatepec Mixtec",
"mzb": "Tumzabt",
"mzc": "Madagascar Sign Language",
"mzd": "Malimba",
"mze": "Morawa",
"mzg": "Monastic Sign Language",
"mzh": "Wichí Lhamtés Güisnay",
"mzi": "Ixcatlán Mazatec",
"mzj": "Manya",
"mzk": "Nigeria Mambila",
"mzl": "Mazatlán Mixe",
"mzm": "Mumuye",
"mzn": "Mazanderani",
"mzo": "Matipuhy",
"mzp": "Movima",
"mzq": "Mori Atas",
"mzr": "Marúbo",
"mzs": "Macanese",
"mzt": "Mintil",
"mzu": "Inapang",
"mzv": "Manza",
"mzw": "Deg",
"mzx": "Mawayana",
"mzy": "Mozambican Sign Language",
"mzz": "Maiadomu",
"na": "Nauru",
"naa": "Namla",
"nab": "Southern Nambikuára",
"nac": "Narak",
"nae": "Naka'ela",
"naf": "Nabak",
"nag": "Naga Pidgin",
"nah": "Nahuatl languages",
"nai": "North American Indian languages",
"naj": "Nalu",
"nak": "Nakanai",
"nal": "Nalik",
"nam": "Ngan'gityemerri",
"nan": "Min Nan Chinese",
"nao": "Naaba",
"nap": "Neapolitan",
"naq": "Khoekhoe; Nama (Namibia)",
"nar": "Iguta",
"nas": "Naasioi",
"nat": "Ca̱hungwa̱rya̱; Hungworo",
"naw": "Nawuri",
"nax": "Nakwi",
"nay": "Ngarrindjeri",
"naz": "Coatepec Nahuatl",
"nb": "Norwegian Bokmål",
"nba": "Nyemba",
"nbb": "Ndoe",
"nbc": "Chang Naga",
"nbd": "Ngbinda",
"nbe": "Konyak Naga",
"nbg": "Nagarchal",
"nbh": "Ngamo",
"nbi": "Mao Naga",
"nbj": "Ngarinyman",
"nbk": "Nake",
"nbm": "Ngbaka Ma'bo",
"nbn": "Kuri",
"nbo": "Nkukoli",
"nbp": "Nnam",
"nbq": "Nggem",
"nbr": "Numana",
"nbs": "Namibian Sign Language",
"nbt": "Na",
"nbu": "Rongmei Naga",
"nbv": "Ngamambo",
"nbw": "Southern Ngbandi",
"nby": "Ningera",
"nca": "Iyo",
"ncb": "Central Nicobarese",
"ncc": "Ponam",
"ncd": "Nachering",
"nce": "Yale",
"ncf": "Notsi",
"ncg": "Nisga'a",
"nch": "Central Huasteca Nahuatl",
"nci": "Classical Nahuatl",
"ncj": "Northern Puebla Nahuatl",
"nck": "Na-kara",
"ncl": "Michoacán Nahuatl",
"ncm": "Nambo",
"ncn": "Nauna",
"nco": "Sibe",
"ncq": "Northern Katang",
"ncr": "Ncane",
"ncs": "Nicaraguan Sign Language",
"nct": "Chothe Naga",
"ncu": "Chumburung",
"ncx": "Central Puebla Nahuatl",
"ncz": "Natchez",
"nd": "North Ndebele",
"nda": "Ndasa",
"ndb": "Kenswei Nsei",
"ndc": "Ndau",
"ndd": "Nde-Nsele-Nta",
"ndf": "Nadruvian",
"ndg": "Ndengereko",
"ndh": "Ndali",
"ndi": "Samba Leko",
"ndj": "Ndamba",
"ndk": "Ndaka",
"ndl": "Ndolo",
"ndm": "Ndam",
"ndn": "Ngundi",
"ndp": "Ndo",
"ndq": "Ndombe",
"ndr": "Ndoola",
"nds": "Low German; Low Saxon",
"ndt": "Ndunga",
"ndu": "Dugun",
"ndv": "Ndut",
"ndw": "Ndobo",
"ndx": "Nduga",
"ndy": "Lutos",
"ndz": "Ndogo",
"ne": "Nepali (macrolanguage)",
"nea": "Eastern Ngad'a",
"neb": "Toura (Côte d'Ivoire)",
"nec": "Nedebang",
"ned": "Nde-Gbite",
"nee": "Nêlêmwa-Nixumwak",
"nef": "Nefamese",
"neg": "Negidal",
"neh": "Nyenkha",
"nei": "Neo-Hittite",
"nej": "Neko",
"nek": "Neku",
"nem": "Nemi",
"nen": "Nengone",
"neo": "Ná-Meo",
"neq": "North Central Mixe",
"ner": "Yahadian",
"nes": "Bhoti Kinnauri",
"net": "Nete",
"neu": "Neo",
"nev": "Nyaheun",
"new": "Newari; Nepal Bhasa",
"nex": "Neme",
"ney": "Neyo",
"nez": "Nez Perce",
"nfa": "Dhao",
"nfd": "Ahwai",
"nfl": "Ayiwo; Äiwoo",
"nfr": "Nafaanra",
"nfu": "Mfumte",
"ng": "Ndonga",
"nga": "Ngbaka",
"ngb": "Northern Ngbandi",
"ngc": "Ngombe (Democratic Republic of Congo)",
"ngd": "Ngando (Central African Republic)",
"nge": "Ngemba",
"ngf": "Trans-New Guinea languages",
"ngg": "Ngbaka Manza",
"ngh": "Nǁng",
"ngi": "Ngizim",
"ngj": "Ngie",
"ngk": "Dalabon",
"ngl": "Lomwe",
"ngm": "Ngatik Men's Creole",
"ngn": "Ngwo",
"ngp": "Ngulu",
"ngq": "Ngurimi; Ngoreme",
"ngr": "Engdewu",
"ngs": "Gvoko",
"ngt": "Kriang; Ngeq",
"ngu": "Guerrero Nahuatl",
"ngv": "Nagumi",
"ngw": "Ngwaba",
"ngx": "Nggwahyi",
"ngy": "Tibea",
"ngz": "Ngungwel",
"nha": "Nhanda",
"nhb": "Beng",
"nhc": "Tabasco Nahuatl",
"nhd": "Chiripá; Ava Guaraní",
"nhe": "Eastern Huasteca Nahuatl",
"nhf": "Nhuwala",
"nhg": "Tetelcingo Nahuatl",
"nhh": "Nahari",
"nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl",
"nhk": "Isthmus-Cosoleacaque Nahuatl",
"nhm": "Morelos Nahuatl",
"nhn": "Central Nahuatl",
"nho": "Takuu",
"nhp": "Isthmus-Pajapan Nahuatl",
"nhq": "Huaxcaleca Nahuatl",
"nhr": "Naro",
"nht": "Ometepec Nahuatl",
"nhu": "Noone",
"nhv": "Temascaltepec Nahuatl",
"nhw": "Western Huasteca Nahuatl",
"nhx": "Isthmus-Mecayapan Nahuatl",
"nhy": "Northern Oaxaca Nahuatl",
"nhz": "Santa María La Alta Nahuatl",
"nia": "Nias",
"nib": "Nakame",
"nic": "Niger-Kordofanian languages",
"nid": "Ngandi",
"nie": "Niellim",
"nif": "Nek",
"nig": "Ngalakgan",
"nih": "Nyiha (Tanzania)",
"nii": "Nii",
"nij": "Ngaju",
"nik": "Southern Nicobarese",
"nil": "Nila",
"nim": "Nilamba",
"nin": "Ninzo",
"nio": "Nganasan",
"niq": "Nandi",
"nir": "Nimboran",
"nis": "Nimi",
"nit": "Southeastern Kolami",
"niu": "Niuean",
"niv": "Gilyak",
"niw": "Nimo",
"nix": "Hema",
"niy": "Ngiti",
"niz": "Ningil",
"nja": "Nzanyi",
"njb": "Nocte Naga",
"njd": "Ndonde Hamba",
"njh": "Lotha Naga",
"nji": "Gudanji",
"njj": "Njen",
"njl": "Njalgulgule",
"njm": "Angami Naga",
"njn": "Liangmai Naga",
"njo": "Ao Naga",
"njr": "Njerep",
"njs": "Nisa",
"njt": "Ndyuka-Trio Pidgin",
"nju": "Ngadjunmaya",
"njx": "Kunyi",
"njy": "Njyem",
"njz": "Nyishi",
"nka": "Nkoya",
"nkb": "Khoibu Naga",
"nkc": "Nkongho",
"nkd": "Koireng",
"nke": "Duke",
"nkf": "Inpui Naga",
"nkg": "Nekgini",
"nkh": "Khezha Naga",
"nki": "Thangal Naga",
"nkj": "Nakai",
"nkk": "Nokuku",
"nkm": "Namat",
"nkn": "Nkangala",
"nko": "Nkonya",
"nkp": "Niuatoputapu",
"nkq": "Nkami",
"nkr": "Nukuoro",
"nks": "North Asmat",
"nkt": "Nyika (Tanzania)",
"nku": "Bouna Kulango",
"nkv": "Nyika (Malawi and Zambia)",
"nkw": "Nkutu",
"nkx": "Nkoroo",
"nkz": "Nkari",
"nl": "Dutch; Flemish",
"nla": "Ngombale",
"nlc": "Nalca",
"nle": "East Nyala",
"nlg": "Gela",
"nli": "Grangali",
"nlj": "Nyali",
"nlk": "Ninia Yali",
"nll": "Nihali",
"nlm": "Mankiyali",
"nlo": "Ngul",
"nlq": "Lao Naga",
"nlu": "Nchumbulu",
"nlv": "Orizaba Nahuatl",
"nlw": "Walangama",
"nlx": "Nahali",
"nly": "Nyamal",
"nlz": "Nalögo",
"nma": "Maram Naga",
"nmb": "Big Nambas; V'ënen Taut",
"nmc": "Ngam",
"nmd": "Ndumu",
"nme": "Mzieme Naga",
"nmf": "Tangkhul Naga (India)",
"nmg": "Kwasio",
"nmh": "Monsang Naga",
"nmi": "Nyam",
"nmj": "Ngombe (Central African Republic)",
"nmk": "Namakura",
"nml": "Ndemli",
"nmm": "Manangba",
"nmn": "ǃXóõ",
"nmo": "Moyon Naga",
"nmp": "Nimanbur",
"nmq": "Nambya",
"nmr": "Nimbari",
"nms": "Letemboi",
"nmt": "Namonuito",
"nmu": "Northeast Maidu",
"nmv": "Ngamini",
"nmw": "Nimoa; Rifao",
"nmx": "Nama (Papua New Guinea)",
"nmy": "Namuyi",
"nmz": "Nawdm",
"nn": "Norwegian Nynorsk",
"nna": "Nyangumarta",
"nnb": "Nande",
"nnc": "Nancere",
"nnd": "West Ambae",
"nne": "Ngandyera",
"nnf": "Ngaing",
"nng": "Maring Naga",
"nnh": "Ngiemboon",
"nni": "North Nuaulu",
"nnj": "Nyangatom",
"nnk": "Nankina",
"nnl": "Northern Rengma Naga",
"nnm": "Namia",
"nnn": "Ngete",
"nnp": "Wancho Naga",
"nnq": "Ngindo",
"nnr": "Narungga",
"nnt": "Nanticoke",
"nnu": "Dwang",
"nnv": "Nugunu (Australia)",
"nnw": "Southern Nuni",
"nny": "Nyangga",
"nnz": "Nda'nda'",
"no": "Norwegian",
"noa": "Woun Meu",
"noc": "Nuk",
"nod": "Northern Thai",
"noe": "Nimadi",
"nof": "Nomane",
"nog": "Nogai",
"noh": "Nomu",
"noi": "Noiri",
"noj": "Nonuya",
"nok": "Nooksack",
"nol": "Nomlaki",
"nom": "Nocamán",
"non": "Old Norse",
"nop": "Numanggang",
"noq": "Ngongo",
"nos": "Eastern Nisu",
"not": "Nomatsiguenga",
"nou": "Ewage-Notu",
"nov": "Novial",
"now": "Nyambo",
"noy": "Noy",
"noz": "Nayi",
"npa": "Nar Phu",
"npb": "Nupbikha",
"npg": "Ponyo-Gongwang Naga",
"nph": "Phom Naga",
"npi": "Nepali (individual language)",
"npl": "Southeastern Puebla Nahuatl",
"npn": "Mondropolon",
"npo": "Pochuri Naga",
"nps": "Nipsan",
"npu": "Puimei Naga",
"npx": "Noipx",
"npy": "Napu",
"nqg": "Southern Nago",
"nqk": "Kura Ede Nago",
"nql": "Ngendelengo",
"nqm": "Ndom",
"nqn": "Nen",
"nqo": "N'Ko; N’Ko",
"nqq": "Kyan-Karyaw Naga",
"nqt": "Nteng",
"nqy": "Akyaung Ari Naga",
"nr": "South Ndebele",
"nra": "Ngom",
"nrb": "Nara",
"nrc": "Noric",
"nre": "Southern Rengma Naga",
"nrf": "Jèrriais; Guernésiais",
"nrg": "Narango",
"nri": "Chokri Naga",
"nrk": "Ngarla",
"nrl": "Ngarluma",
"nrm": "Narom",
"nrn": "Norn",
"nrp": "North Picene",
"nrr": "Norra; Nora",
"nrt": "Northern Kalapuya",
"nru": "Narua",
"nrx": "Ngurmbur",
"nrz": "Lala",
"nsa": "Sangtam Naga",
"nsb": "Lower Nossob",
"nsc": "Nshi",
"nsd": "Southern Nisu",
"nse": "Nsenga",
"nsf": "Northwestern Nisu",
"nsg": "Ngasa",
"nsh": "Ngoshie",
"nsi": "Nigerian Sign Language",
"nsk": "Naskapi",
"nsl": "Norwegian Sign Language",
"nsm": "Sumi Naga",
"nsn": "Nehan",
"nso": "Pedi; Northern Sotho; Sepedi",
"nsp": "Nepalese Sign Language",
"nsq": "Northern Sierra Miwok",
"nsr": "Maritime Sign Language",
"nss": "Nali",
"nst": "Tase Naga",
"nsu": "Sierra Negra Nahuatl",
"nsv": "Southwestern Nisu",
"nsw": "Navut",
"nsx": "Nsongo",
"nsy": "Nasal",
"nsz": "Nisenan",
"ntd": "Northern Tidung",
"nte": "Nathembo",
"ntg": "Ngantangarra",
"nti": "Natioro",
"ntj": "Ngaanyatjarra",
"ntk": "Ikoma-Nata-Isenye",
"ntm": "Nateni",
"nto": "Ntomba",
"ntp": "Northern Tepehuan",
"ntr": "Delo",
"ntu": "Natügu",
"ntw": "Nottoway",
"ntx": "Tangkhul Naga (Myanmar)",
"nty": "Mantsi",
"ntz": "Natanzi",
"nua": "Yuanga",
"nub": "Nubian languages",
"nuc": "Nukuini",
"nud": "Ngala",
"nue": "Ngundu",
"nuf": "Nusu",
"nug": "Nungali",
"nuh": "Ndunda",
"nui": "Ngumbi",
"nuj": "Nyole",
"nuk": "Nuu-chah-nulth; Nuuchahnulth",
"nul": "Nusa Laut",
"num": "Niuafo'ou",
"nun": "Anong",
"nuo": "Nguôn",
"nup": "Nupe-Nupe-Tako",
"nuq": "Nukumanu",
"nur": "Nukuria",
"nus": "Nuer",
"nut": "Nung (Viet Nam)",
"nuu": "Ngbundu",
"nuv": "Northern Nuni",
"nuw": "Nguluwan",
"nux": "Mehek",
"nuy": "Nunggubuyu",
"nuz": "Tlamacazapa Nahuatl",
"nv": "Navajo; Navaho",
"nvh": "Nasarian",
"nvm": "Namiae",
"nvo": "Nyokon",
"nwa": "Nawathinehena",
"nwb": "Nyabwa",
"nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari",
"nwe": "Ngwe",
"nwg": "Ngayawung",
"nwi": "Southwest Tanna",
"nwm": "Nyamusa-Molo",
"nwo": "Nauo",
"nwr": "Nawaru",
"nww": "Ndwewe",
"nwx": "Middle Newar",
"nwy": "Nottoway-Meherrin",
"nxa": "Nauete",
"nxd": "Ngando (Democratic Republic of Congo)",
"nxe": "Nage",
"nxg": "Ngad'a",
"nxi": "Nindi",
"nxk": "Koki Naga",
"nxl": "South Nuaulu",
"nxm": "Numidian",
"nxn": "Ngawun",
"nxo": "Ndambomo",
"nxq": "Naxi",
"nxr": "Ninggerum",
"nxx": "Nafri",
"ny": "Nyanja; Chewa; Chichewa",
"nyb": "Nyangbo",
"nyc": "Nyanga-li",
"nyd": "Nyore; Olunyole",
"nye": "Nyengo",
"nyf": "Giryama; Kigiryama",
"nyg": "Nyindu",
"nyh": "Nyikina",
"nyi": "Ama (Sudan)",
"nyj": "Nyanga",
"nyk": "Nyaneka",
"nyl": "Nyeu",
"nym": "Nyamwezi",
"nyn": "Nyankole",
"nyo": "Nyoro",
"nyp": "Nyang'i",
"nyq": "Nayini",
"nyr": "Nyiha (Malawi)",
"nys": "Nyungar",
"nyt": "Nyawaygi",
"nyu": "Nyungwe",
"nyv": "Nyulnyul",
"nyw": "Nyaw",
"nyx": "Nganyaywana",
"nyy": "Nyakyusa-Ngonde",
"nza": "Tigon Mbembe",
"nzb": "Njebi",
"nzd": "Nzadi",
"nzi": "Nzima",
"nzk": "Nzakara",
"nzm": "Zeme Naga",
"nzs": "New Zealand Sign Language",
"nzu": "Teke-Nzikou",
"nzy": "Nzakambay",
"nzz": "Nanga Dama Dogon",
"oaa": "Orok",
"oac": "Oroch",
"oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)",
"oav": "Old Avar",
"obi": "Obispeño",
"obk": "Southern Bontok",
"obl": "Oblo",
"obm": "Moabite",
"obo": "Obo Manobo",
"obr": "Old Burmese",
"obt": "Old Breton",
"obu": "Obulom",
"oc": "Occitan (post 1500)",
"oca": "Ocaina",
"och": "Old Chinese",
"ocm": "Old Cham",
"oco": "Old Cornish",
"ocu": "Atzingo Matlatzinca",
"oda": "Odut",
"odk": "Od",
"odt": "Old Dutch",
"odu": "Odual",
"ofo": "Ofo",
"ofs": "Old Frisian",
"ofu": "Efutop",
"ogb": "Ogbia",
"ogc": "Ogbah",
"oge": "Old Georgian",
"ogg": "Ogbogolo",
"ogo": "Khana",
"ogu": "Ogbronuagum",
"oht": "Old Hittite",
"ohu": "Old Hungarian",
"oia": "Oirata",
"oie": "Okolie",
"oin": "Inebu One",
"oj": "Ojibwa",
"ojb": "Northwestern Ojibwa",
"ojc": "Central Ojibwa",
"ojg": "Eastern Ojibwa",
"ojp": "Old Japanese",
"ojs": "Severn Ojibwa",
"ojv": "Ontong Java",
"ojw": "Western Ojibwa",
"oka": "Okanagan",
"okb": "Okobo",
"okc": "Kobo",
"okd": "Okodia",
"oke": "Okpe (Southwestern Edo)",
"okg": "Koko Babangk",
"okh": "Koresh-e Rostam",
"oki": "Okiek",
"okj": "Oko-Juwoi",
"okk": "Kwamtim One",
"okl": "Old Kentish Sign Language",
"okm": "Middle Korean (10th-16th cent.)",
"okn": "Oki-No-Erabu",
"oko": "Old Korean (3rd-9th cent.)",
"okr": "Kirike",
"oks": "Oko-Eni-Osayen",
"oku": "Oku",
"okv": "Orokaiva",
"okx": "Okpe (Northwestern Edo)",
"okz": "Old Khmer",
"ola": "Walungge",
"old": "Mochi",
"ole": "Olekha",
"olk": "Olkol",
"olm": "Oloma",
"olo": "Livvi",
"olr": "Olrat",
"olt": "Old Lithuanian",
"olu": "Kuvale",
"om": "Oromo",
"oma": "Omaha-Ponca",
"omb": "East Ambae",
"omc": "Mochica",
"omg": "Omagua",
"omi": "Omi",
"omk": "Omok",
"oml": "Ombo",
"omn": "Minoan",
"omo": "Utarmbung",
"omp": "Old Manipuri",
"omq": "Oto-Manguean languages",
"omr": "Old Marathi",
"omt": "Omotik",
"omu": "Omurano",
"omv": "Omotic languages",
"omw": "South Tairora",
"omx": "Old Mon",
"omy": "Old Malay",
"ona": "Ona",
"onb": "Lingao",
"one": "Oneida",
"ong": "Olo",
"oni": "Onin",
"onj": "Onjob",
"onk": "Kabore One",
"onn": "Onobasulu",
"ono": "Onondaga",
"onp": "Sartang",
"onr": "Northern One",
"ons": "Ono",
"ont": "Ontenu",
"onu": "Unua",
"onw": "Old Nubian",
"onx": "Onin Based Pidgin",
"ood": "Tohono O'odham",
"oog": "Ong",
"oon": "Önge",
"oor": "Oorlams",
"oos": "Old Ossetic",
"opa": "Okpamheri",
"opk": "Kopkaka",
"opm": "Oksapmin",
"opo": "Opao",
"opt": "Opata",
"opy": "Ofayé",
"or": "Oriya (macrolanguage); Odia (macrolanguage)",
"ora": "Oroha",
"orc": "Orma",
"ore": "Orejón",
"org": "Oring",
"orh": "Oroqen",
"orn": "Orang Kanaq",
"oro": "Orokolo",
"orr": "Oruma",
"ors": "Orang Seletar",
"ort": "Adivasi Oriya",
"oru": "Ormuri",
"orv": "Old Russian",
"orw": "Oro Win",
"orx": "Oro",
"ory": "Odia (individual language); Oriya (individual language)",
"orz": "Ormu",
"os": "Ossetian; Ossetic",
"osa": "Osage",
"osc": "Oscan",
"osi": "Osing",
"osn": "Old Sundanese",
"oso": "Ososo",
"osp": "Old Spanish",
"ost": "Osatu",
"osu": "Southern One",
"osx": "Old Saxon",
"ota": "Ottoman Turkish (1500-1928)",
"otb": "Old Tibetan",
"otd": "Ot Danum",
"ote": "Mezquital Otomi",
"oti": "Oti",
"otk": "Old Turkish",
"otl": "Tilapa Otomi",
"otm": "Eastern Highland Otomi",
"otn": "Tenango Otomi",
"oto": "Otomian languages",
"otq": "Querétaro Otomi",
"otr": "Otoro",
"ots": "Estado de México Otomi",
"ott": "Temoaya Otomi",
"otu": "Otuke",
"otw": "Ottawa",
"otx": "Texcatepec Otomi",
"oty": "Old Tamil",
"otz": "Ixtenco Otomi",
"oua": "Tagargrent",
"oub": "Glio-Oubi",
"oue": "Oune",
"oui": "Old Uighur",
"oum": "Ouma",
"ovd": "Elfdalian; Övdalian",
"owi": "Owiniga",
"owl": "Old Welsh",
"oyb": "Oy",
"oyd": "Oyda",
"oym": "Wayampi",
"oyy": "Oya'oya",
"ozm": "Koonzime",
"pa": "Panjabi; Punjabi",
"paa": "Papuan languages",
"pab": "Parecís",
"pac": "Pacoh",
"pad": "Paumarí",
"pae": "Pagibete",
"paf": "Paranawát",
"pag": "Pangasinan",
"pah": "Tenharim",
"pai": "Pe",
"pak": "Parakanã",
"pal": "Pahlavi",
"pam": "Pampanga; Kapampangan",
"pao": "Northern Paiute",
"pap": "Papiamento",
"paq": "Parya",
"par": "Panamint; Timbisha",
"pas": "Papasena",
"pau": "Palauan",
"pav": "Pakaásnovos",
"paw": "Pawnee",
"pax": "Pankararé",
"pay": "Pech",
"paz": "Pankararú",
"pbb": "Páez",
"pbc": "Patamona",
"pbe": "Mezontla Popoloca",
"pbf": "Coyotepec Popoloca",
"pbg": "Paraujano",
"pbh": "E'ñapa Woromaipu",
"pbi": "Parkwa",
"pbl": "Mak (Nigeria)",
"pbm": "Puebla Mazatec",
"pbn": "Kpasam",
"pbo": "Papel",
"pbp": "Badyara",
"pbr": "Pangwa",
"pbs": "Central Pame",
"pbt": "Southern Pashto",
"pbu": "Northern Pashto",
"pbv": "Pnar",
"pby": "Pyu (Papua New Guinea)",
"pca": "Santa Inés Ahuatempan Popoloca",
"pcb": "Pear",
"pcc": "Bouyei",
"pcd": "Picard",
"pce": "Ruching Palaung",
"pcf": "Paliyan",
"pcg": "Paniya",
"pch": "Pardhan",
"pci": "Duruwa",
"pcj": "Parenga",
"pck": "Paite Chin",
"pcl": "Pardhi",
"pcm": "Nigerian Pidgin",
"pcn": "Piti",
"pcp": "Pacahuara",
"pcw": "Pyapun",
"pda": "Anam",
"pdc": "Pennsylvania German",
"pdi": "Pa Di",
"pdn": "Podena; Fedan",
"pdo": "Padoe",
"pdt": "Plautdietsch",
"pdu": "Kayan",
"pea": "Peranakan Indonesian",
"peb": "Eastern Pomo",
"ped": "Mala (Papua New Guinea)",
"pee": "Taje",
"pef": "Northeastern Pomo",
"peg": "Pengo",
"peh": "Bonan",
"pei": "Chichimeca-Jonaz",
"pej": "Northern Pomo",
"pek": "Penchal",
"pel": "Pekal",
"pem": "Phende",
"peo": "Old Persian (ca. 600-400 B.C.)",
"pep": "Kunja",
"peq": "Southern Pomo",
"pes": "Iranian Persian",
"pev": "Pémono",
"pex": "Petats",
"pey": "Petjo",
"pez": "Eastern Penan",
"pfa": "Pááfang",
"pfe": "Pere",
"pfl": "Pfaelzisch",
"pga": "Sudanese Creole Arabic",
"pgd": "Gāndhārī",
"pgg": "Pangwali",
"pgi": "Pagi",
"pgk": "Rerep",
"pgl": "Primitive Irish",
"pgn": "Paelignian",
"pgs": "Pangseng",
"pgu": "Pagu",
"pgz": "Papua New Guinean Sign Language",
"pha": "Pa-Hng",
"phd": "Phudagi",
"phg": "Phuong",
"phh": "Phukha",
"phi": "Philippine languages",
"phj": "Pahari",
"phk": "Phake",
"phl": "Phalura; Palula",
"phm": "Phimbi",
"phn": "Phoenician",
"pho": "Phunoi",
"phq": "Phana'",
"phr": "Pahari-Potwari",
"pht": "Phu Thai",
"phu": "Phuan",
"phv": "Pahlavani",
"phw": "Phangduwali",
"pi": "Pali",
"pia": "Pima Bajo",
"pib": "Yine",
"pic": "Pinji",
"pid": "Piaroa",
"pie": "Piro",
"pif": "Pingelapese",
"pig": "Pisabo",
"pih": "Pitcairn-Norfolk",
"pij": "Pijao",
"pil": "Yom",
"pim": "Powhatan",
"pin": "Piame",
"pio": "Piapoco",
"pip": "Pero",
"pir": "Piratapuyo",
"pis": "Pijin",
"pit": "Pitta Pitta",
"piu": "Pintupi-Luritja",
"piv": "Pileni; Vaeakau-Taumako",
"piw": "Pimbwe",
"pix": "Piu",
"piy": "Piya-Kwonci",
"piz": "Pije",
"pjt": "Pitjantjatjara",
"pka": "Ardhamāgadhī Prākrit",
"pkb": "Pokomo; Kipfokomo",
"pkc": "Paekche",
"pkg": "Pak-Tong",
"pkh": "Pankhu",
"pkn": "Pakanha",
"pko": "Pökoot",
"pkp": "Pukapuka",
"pkr": "Attapady Kurumba",
"pks": "Pakistan Sign Language",
"pkt": "Maleng",
"pku": "Paku",
"pl": "Polish",
"pla": "Miani",
"plb": "Polonombauk",
"plc": "Central Palawano",
"pld": "Polari",
"ple": "Palu'e",
"plf": "Central Malayo-Polynesian languages",
"plg": "Pilagá",
"plh": "Paulohi",
"plj": "Polci",
"plk": "Kohistani Shina",
"pll": "Shwe Palaung",
"pln": "Palenquero",
"plo": "Oluta Popoluca",
"plq": "Palaic",
"plr": "Palaka Senoufo",
"pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca",
"plt": "Plateau Malagasy",
"plu": "Palikúr",
"plv": "Southwest Palawano",
"plw": "Brooke's Point Palawano",
"ply": "Bolyu",
"plz": "Paluan",
"pma": "Paama",
"pmb": "Pambia",
"pmd": "Pallanganmiddang",
"pme": "Pwaamei",
"pmf": "Pamona",
"pmh": "Māhārāṣṭri Prākrit",
"pmi": "Northern Pumi",
"pmj": "Southern Pumi",
"pmk": "Pamlico",
"pml": "Lingua Franca",
"pmm": "Pomo",
"pmn": "Pam",
"pmo": "Pom",
"pmq": "Northern Pame",
"pmr": "Paynamar",
"pms": "Piemontese",
"pmt": "Tuamotuan",
"pmw": "Plains Miwok",
"pmx": "Poumei Naga",
"pmy": "Papuan Malay",
"pmz": "Southern Pame",
"pna": "Punan Bah-Biau",
"pnb": "Western Panjabi",
"pnc": "Pannei",
"pnd": "Mpinda",
"pne": "Western Penan",
"png": "Pangu; Pongu",
"pnh": "Penrhyn",
"pni": "Aoheng",
"pnj": "Pinjarup",
"pnk": "Paunaka",
"pnl": "Paleni",
"pnm": "Punan Batu 1",
"pnn": "Pinai-Hagahai",
"pno": "Panobo",
"pnp": "Pancana",
"pnq": "Pana (Burkina Faso)",
"pnr": "Panim",
"pns": "Ponosakan",
"pnt": "Pontic",
"pnu": "Jiongnai Bunu",
"pnv": "Pinigura",
"pnw": "Banyjima; Panytyima",
"pnx": "Phong-Kniang",
"pny": "Pinyin",
"pnz": "Pana (Central African Republic)",
"poc": "Poqomam",
"poe": "San Juan Atzingo Popoloca",
"pof": "Poke",
"pog": "Potiguára",
"poh": "Poqomchi'",
"poi": "Highland Popoluca",
"pok": "Pokangá",
"pom": "Southeastern Pomo",
"pon": "Pohnpeian",
"poo": "Central Pomo",
"pop": "Pwapwâ",
"poq": "Texistepec Popoluca",
"pos": "Sayula Popoluca",
"pot": "Potawatomi",
"pov": "Upper Guinea Crioulo",
"pow": "San Felipe Otlaltepec Popoloca",
"pox": "Polabian",
"poy": "Pogolo",
"poz": "Malayo-Polynesian languages",
"ppe": "Papi",
"ppi": "Paipai",
"ppk": "Uma",
"ppl": "Pipil; Nicarao",
"ppm": "Papuma",
"ppn": "Papapana",
"ppo": "Folopa",
"ppp": "Pelende",
"ppq": "Pei",
"pps": "San Luís Temalacayuca Popoloca",
"ppt": "Pare",
"ppu": "Papora",
"pqa": "Pa'a",
"pqe": "Eastern Malayo-Polynesian languages",
"pqm": "Malecite-Passamaquoddy",
"pqw": "Western Malayo-Polynesian languages",
"pra": "Prakrit languages",
"prc": "Parachi",
"prd": "Parsi-Dari",
"pre": "Principense",
"prf": "Paranan",
"prg": "Prussian",
"prh": "Porohanon",
"pri": "Paicî",
"prk": "Parauk",
"prl": "Peruvian Sign Language",
"prm": "Kibiri",
"prn": "Prasuni",
"pro": "Old Provençal (to 1500); Old Occitan (to 1500)",
"prp": "Parsi",
"prq": "Ashéninka Perené",
"prr": "Puri",
"prs": "Dari; Afghan Persian",
"prt": "Phai",
"pru": "Puragi",
"prw": "Parawen",
"prx": "Purik",
"prz": "Providencia Sign Language",
"ps": "Pushto; Pashto",
"psa": "Asue Awyu",
"psc": "Iranian Sign Language; Persian Sign Language",
"psd": "Plains Indian Sign Language",
"pse": "Central Malay",
"psg": "Penang Sign Language",
"psh": "Southwest Pashai; Southwest Pashayi",
"psi": "Southeast Pashai; Southeast Pashayi",
"psl": "Puerto Rican Sign Language",
"psm": "Pauserna",
"psn": "Panasuan",
"pso": "Polish Sign Language",
"psp": "Philippine Sign Language",
"psq": "Pasi",
"psr": "Portuguese Sign Language",
"pss": "Kaulong",
"pst": "Central Pashto",
"psu": "Sauraseni Prākrit",
"psw": "Port Sandwich",
"psy": "Piscataway",
"pt": "Portuguese",
"pta": "Pai Tavytera",
"pth": "Pataxó Hã-Ha-Hãe",
"pti": "Pindiini; Wangkatha",
"ptn": "Patani",
"pto": "Zo'é",
"ptp": "Patep",
"ptq": "Pattapu",
"ptr": "Piamatsina",
"ptt": "Enrekang",
"ptu": "Bambam",
"ptv": "Port Vato",
"ptw": "Pentlatch",
"pty": "Pathiya",
"pua": "Western Highland Purepecha",
"pub": "Purum",
"puc": "Punan Merap",
"pud": "Punan Aput",
"pue": "Puelche",
"puf": "Punan Merah",
"pug": "Phuie",
"pui": "Puinave",
"puj": "Punan Tubu",
"pum": "Puma",
"puo": "Puoc",
"pup": "Pulabu",
"puq": "Puquina",
"pur": "Puruborá",
"put": "Putoh",
"puu": "Punu",
"puw": "Puluwatese",
"pux": "Puare",
"puy": "Purisimeño",
"pwa": "Pawaia",
"pwb": "Panawa",
"pwg": "Gapapaiwa",
"pwi": "Patwin",
"pwm": "Molbog",
"pwn": "Paiwan",
"pwo": "Pwo Western Karen",
"pwr": "Powari",
"pww": "Pwo Northern Karen",
"pxm": "Quetzaltepec Mixe",
"pye": "Pye Krumen",
"pym": "Fyam",
"pyn": "Poyanáwa",
"pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay",
"pyu": "Puyuma",
"pyx": "Pyu (Myanmar)",
"pyy": "Pyen",
"pzh": "Pazeh",
"pzn": "Jejara Naga; Para Naga",
"qu": "Quechua",
"qua": "Quapaw",
"qub": "Huallaga Huánuco Quechua",
"quc": "K'iche'; Quiché",
"qud": "Calderón Highland Quichua",
"quf": "Lambayeque Quechua",
"qug": "Chimborazo Highland Quichua",
"quh": "South Bolivian Quechua",
"qui": "Quileute",
"quk": "Chachapoyas Quechua",
"qul": "North Bolivian Quechua",
"qum": "Sipacapense",
"qun": "Quinault",
"qup": "Southern Pastaza Quechua",
"quq": "Quinqui",
"qur": "Yanahuanca Pasco Quechua",
"qus": "Santiago del Estero Quichua",
"quv": "Sacapulteco",
"quw": "Tena Lowland Quichua",
"qux": "Yauyos Quechua",
"quy": "Ayacucho Quechua",
"quz": "Cusco Quechua",
"qva": "Ambo-Pasco Quechua",
"qvc": "Cajamarca Quechua",
"qve": "Eastern Apurímac Quechua",
"qvh": "Huamalíes-Dos de Mayo Huánuco Quechua",
"qvi": "Imbabura Highland Quichua",
"qvj": "Loja Highland Quichua",
"qvl": "Cajatambo North Lima Quechua",
"qvm": "Margos-Yarowilca-Lauricocha Quechua",
"qvn": "North Junín Quechua",
"qvo": "Napo Lowland Quechua",
"qvp": "Pacaraos Quechua",
"qvs": "San Martín Quechua",
"qvw": "Huaylla Wanca Quechua",
"qvy": "Queyu",
"qvz": "Northern Pastaza Quichua",
"qwa": "Corongo Ancash Quechua",
"qwc": "Classical Quechua",
"qwe": "Quechuan (family)",
"qwh": "Huaylas Ancash Quechua",
"qwm": "Kuman (Russia)",
"qws": "Sihuas Ancash Quechua",
"qwt": "Kwalhioqua-Tlatskanai",
"qxa": "Chiquián Ancash Quechua",
"qxc": "Chincha Quechua",
"qxh": "Panao Huánuco Quechua",
"qxl": "Salasaca Highland Quichua",
"qxn": "Northern Conchucos Ancash Quechua",
"qxo": "Southern Conchucos Ancash Quechua",
"qxp": "Puno Quechua",
"qxq": "Qashqa'i",
"qxr": "Cañar Highland Quichua",
"qxs": "Southern Qiang",
"qxt": "Santa Ana de Tusi Pasco Quechua",
"qxu": "Arequipa-La Unión Quechua",
"qxw": "Jauja Wanca Quechua",
"qya": "Quenya",
"qyp": "Quiripi",
"raa": "Dungmali",
"rab": "Camling",
"rac": "Rasawa",
"rad": "Rade",
"raf": "Western Meohang",
"rag": "Logooli; Lulogooli",
"rah": "Rabha",
"rai": "Ramoaaina",
"raj": "Rajasthani",
"rak": "Tulu-Bohuai",
"ral": "Ralte",
"ram": "Canela",
"ran": "Riantana",
"rao": "Rao",
"rap": "Rapanui",
"raq": "Saam",
"rar": "Rarotongan; Cook Islands Maori",
"ras": "Tegali",
"rat": "Razajerdi",
"rau": "Raute",
"rav": "Sampang",
"raw": "Rawang",
"rax": "Rang",
"ray": "Rapa",
"raz": "Rahambuu",
"rbb": "Rumai Palaung",
"rbk": "Northern Bontok",
"rbl": "Miraya Bikol",
"rbp": "Barababaraba",
"rcf": "Réunion Creole French",
"rdb": "Rudbari",
"rea": "Rerau",
"reb": "Rembong",
"ree": "Rejang Kayan",
"reg": "Kara (Tanzania)",
"rei": "Reli",
"rej": "Rejang",
"rel": "Rendille",
"rem": "Remo",
"ren": "Rengao",
"rer": "Rer Bare",
"res": "Reshe",
"ret": "Retta",
"rey": "Reyesano",
"rga": "Roria",
"rge": "Romano-Greek",
"rgk": "Rangkas",
"rgn": "Romagnol",
"rgr": "Resígaro",
"rgs": "Southern Roglai",
"rgu": "Ringgou",
"rhg": "Rohingya",
"rhp": "Yahang",
"ria": "Riang (India)",
"rib": "Bribri Sign Language",
"rif": "Tarifit",
"ril": "Riang Lang; Riang (Myanmar)",
"rim": "Nyaturu",
"rin": "Nungu",
"rir": "Ribun",
"rit": "Ritharrngu",
"riu": "Riung",
"rjg": "Rajong",
"rji": "Raji",
"rjs": "Rajbanshi",
"rka": "Kraol",
"rkb": "Rikbaktsa",
"rkh": "Rakahanga-Manihiki",
"rki": "Rakhine",
"rkm": "Marka",
"rkt": "Rangpuri; Kamta",
"rkw": "Arakwal",
"rm": "Romansh",
"rma": "Rama",
"rmb": "Rembarrnga",
"rmc": "Carpathian Romani",
"rmd": "Traveller Danish",
"rme": "Angloromani",
"rmf": "Kalo Finnish Romani",
"rmg": "Traveller Norwegian",
"rmh": "Murkim",
"rmi": "Lomavren",
"rmk": "Romkun",
"rml": "Baltic Romani",
"rmm": "Roma",
"rmn": "Balkan Romani",
"rmo": "Sinte Romani",
"rmp": "Rempi",
"rmq": "Caló",
"rms": "Romanian Sign Language",
"rmt": "Domari",
"rmu": "Tavringer Romani",
"rmv": "Romanova",
"rmw": "Welsh Romani",
"rmx": "Romam",
"rmy": "Vlax Romani",
"rmz": "Marma",
"rn": "Rundi",
"rnb": "Brunca Sign Language",
"rnd": "Ruund",
"rng": "Ronga",
"rnl": "Ranglong",
"rnn": "Roon",
"rnp": "Rongpo",
"rnr": "Nari Nari",
"rnw": "Rungwa",
"ro": "Romanian; Moldavian; Moldovan",
"roa": "Romance languages",
"rob": "Tae'",
"roc": "Cacgia Roglai",
"rod": "Rogo",
"roe": "Ronji",
"rof": "Rombo",
"rog": "Northern Roglai",
"rol": "Romblomanon",
"rom": "Romany",
"roo": "Rotokas",
"rop": "Kriol",
"ror": "Rongga",
"rou": "Runga",
"row": "Dela-Oenale",
"rpn": "Repanbitip",
"rpt": "Rapting",
"rri": "Ririo",
"rro": "Waima",
"rrt": "Arritinngithigh",
"rsb": "Romano-Serbian",
"rsk": "Ruthenian; Rusyn",
"rsl": "Russian Sign Language",
"rsm": "Miriwoong Sign Language",
"rsn": "Rwandan Sign Language",
"rtc": "Rungtu Chin",
"rth": "Ratahan",
"rtm": "Rotuman",
"rts": "Yurats",
"rtw": "Rathawi",
"ru": "Russian",
"rub": "Gungu",
"ruc": "Ruuli",
"rue": "Rusyn",
"ruf": "Luguru",
"rug": "Roviana",
"ruh": "Ruga",
"rui": "Rufiji",
"ruk": "Che",
"ruo": "Istro Romanian",
"rup": "Macedo-Romanian; Aromanian; Arumanian",
"ruq": "Megleno Romanian",
"rut": "Rutul",
"ruu": "Lanas Lobu",
"ruy": "Mala (Nigeria)",
"ruz": "Ruma",
"rw": "Kinyarwanda",
"rwa": "Rawo",
"rwk": "Rwa",
"rwl": "Ruwila",
"rwm": "Amba (Uganda)",
"rwo": "Rawa",
"rwr": "Marwari (India)",
"rxd": "Ngardi",
"rxw": "Karuwali; Garuwali",
"ryn": "Northern Amami-Oshima",
"rys": "Yaeyama",
"ryu": "Central Okinawan",
"rzh": "Rāziḥī",
"sa": "Sanskrit",
"saa": "Saba",
"sab": "Buglere",
"sac": "Meskwaki",
"sad": "Sandawe",
"sae": "Sabanê",
"saf": "Safaliba",
"sah": "Yakut",
"sai": "South American Indian languages",
"saj": "Sahu",
"sak": "Sake",
"sal": "Salishan languages",
"sam": "Samaritan Aramaic",
"sao": "Sause",
"saq": "Samburu",
"sar": "Saraveca",
"sas": "Sasak",
"sat": "Santali",
"sau": "Saleman",
"sav": "Saafi-Saafi",
"saw": "Sawi",
"sax": "Sa",
"say": "Saya",
"saz": "Saurashtra",
"sba": "Ngambay",
"sbb": "Simbo",
"sbc": "Kele (Papua New Guinea)",
"sbd": "Southern Samo",
"sbe": "Saliba",
"sbf": "Chabu; Shabo",
"sbg": "Seget",
"sbh": "Sori-Harengan",
"sbi": "Seti",
"sbj": "Surbakhal",
"sbk": "Safwa",
"sbl": "Botolan Sambal",
"sbm": "Sagala",
"sbn": "Sindhi Bhil",
"sbo": "Sabüm",
"sbp": "Sangu (Tanzania)",
"sbq": "Sileibi",
"sbr": "Sembakung Murut",
"sbs": "Subiya",
"sbt": "Kimki",
"sbu": "Stod Bhoti",
"sbv": "Sabine",
"sbw": "Simba",
"sbx": "Seberuang",
"sby": "Soli",
"sbz": "Sara Kaba",
"sc": "Sardinian",
"scb": "Chut",
"sce": "Dongxiang",
"scf": "San Miguel Creole French",
"scg": "Sanggau",
"sch": "Sakachep",
"sci": "Sri Lankan Creole Malay",
"sck": "Sadri",
"scl": "Shina",
"scn": "Sicilian",
"sco": "Scots",
"scp": "Hyolmo; Helambu Sherpa",
"scq": "Sa'och",
"scs": "North Slavey",
"sct": "Southern Katang",
"scu": "Shumcho",
"scv": "Sheni",
"scw": "Sha",
"scx": "Sicel",
"sd": "Sindhi",
"sda": "Toraja-Sa'dan",
"sdb": "Shabak",
"sdc": "Sassarese Sardinian",
"sde": "Surubu",
"sdf": "Sarli",
"sdg": "Savi",
"sdh": "Southern Kurdish",
"sdj": "Suundi",
"sdk": "Sos Kundi",
"sdl": "Saudi Arabian Sign Language",
"sdn": "Gallurese Sardinian",
"sdo": "Bukar-Sadung Bidayuh",
"sdp": "Sherdukpen",
"sdq": "Semandang",
"sdr": "Oraon Sadri",
"sds": "Sened",
"sdt": "Shuadit",
"sdu": "Sarudu",
"sdv": "Eastern Sudanic languages",
"sdx": "Sibu Melanau",
"sdz": "Sallands",
"se": "Northern Sami",
"sea": "Semai",
"seb": "Shempire Senoufo",
"sec": "Sechelt",
"sed": "Sedang",
"see": "Seneca",
"sef": "Cebaara Senoufo",
"seg": "Segeju",
"seh": "Sena",
"sei": "Seri",
"sej": "Sene",
"sek": "Sekani",
"sel": "Selkup",
"sem": "Semitic languages",
"sen": "Nanerigé Sénoufo",
"seo": "Suarmin",
"sep": "Sìcìté Sénoufo",
"seq": "Senara Sénoufo",
"ser": "Serrano",
"ses": "Koyraboro Senni Songhai",
"set": "Sentani",
"seu": "Serui-Laut",
"sev": "Nyarafolo Senoufo",
"sew": "Sewa Bay",
"sey": "Secoya",
"sez": "Senthang Chin",
"sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language",
"sfe": "Eastern Subanen",
"sfm": "Small Flowery Miao",
"sfs": "South African Sign Language",
"sfw": "Sehwi",
"sg": "Sango",
"sga": "Old Irish (to 900)",
"sgb": "Mag-antsi Ayta",
"sgc": "Kipsigis",
"sgd": "Surigaonon",
"sge": "Segai",
"sgg": "Swiss-German Sign Language",
"sgh": "Shughni",
"sgi": "Suga",
"sgj": "Surgujia",
"sgk": "Sangkong",
"sgm": "Singa",
"sgn": "Sign languages",
"sgp": "Singpho",
"sgr": "Sangisari",
"sgs": "Samogitian",
"sgt": "Brokpake",
"sgu": "Salas",
"sgw": "Sebat Bet Gurage",
"sgx": "Sierra Leone Sign Language",
"sgy": "Sanglechi",
"sgz": "Sursurunga",
"sh": "Serbo-Croatian",
"sha": "Shall-Zwall",
"shb": "Ninam",
"shc": "Sonde",
"shd": "Kundal Shahi",
"she": "Sheko",
"shg": "Shua",
"shh": "Shoshoni",
"shi": "Tachelhit",
"shj": "Shatt",
"shk": "Shilluk",
"shl": "Shendu",
"shm": "Shahrudi",
"shn": "Shan",
"sho": "Shanga",
"shp": "Shipibo-Conibo",
"shq": "Sala",
"shr": "Shi",
"shs": "Shuswap",
"sht": "Shasta",
"shu": "Chadian Arabic",
"shv": "Shehri",
"shw": "Shwai",
"shx": "She",
"shy": "Tachawit",
"shz": "Syenara Senoufo",
"si": "Sinhala; Sinhalese",
"sia": "Akkala Sami",
"sib": "Sebop",
"sid": "Sidamo",
"sie": "Simaa",
"sif": "Siamou",
"sig": "Paasaal",
"sih": "Zire; Sîshëë",
"sii": "Shom Peng",
"sij": "Numbami",
"sik": "Sikiana",
"sil": "Tumulung Sisaala",
"sim": "Mende (Papua New Guinea)",
"sio": "Siouan languages",
"sip": "Sikkimese",
"siq": "Sonia",
"sir": "Siri",
"sis": "Siuslaw",
"sit": "Sino-Tibetan languages",
"siu": "Sinagen",
"siv": "Sumariup",
"siw": "Siwai",
"six": "Sumau",
"siy": "Sivandi",
"siz": "Siwi",
"sja": "Epena",
"sjb": "Sajau Basap",
"sjd": "Kildin Sami",
"sje": "Pite Sami",
"sjg": "Assangori",
"sjk": "Kemi Sami",
"sjl": "Sajalong; Miji",
"sjm": "Mapun",
"sjn": "Sindarin",
"sjo": "Xibe",
"sjp": "Surjapuri",
"sjr": "Siar-Lak",
"sjs": "Senhaja De Srair",
"sjt": "Ter Sami",
"sju": "Ume Sami",
"sjw": "Shawnee",
"sk": "Slovak",
"ska": "Skagit",
"skb": "Saek",
"skc": "Ma Manda",
"skd": "Southern Sierra Miwok",
"ske": "Seke (Vanuatu)",
"skf": "Sakirabiá",
"skg": "Sakalava Malagasy",
"skh": "Sikule",
"ski": "Sika",
"skj": "Seke (Nepal)",
"skm": "Kutong",
"skn": "Kolibugan Subanon",
"sko": "Seko Tengah",
"skp": "Sekapan",
"skq": "Sininkere",
"skr": "Saraiki; Seraiki",
"sks": "Maia",
"skt": "Sakata",
"sku": "Sakao",
"skv": "Skou",
"skw": "Skepi Creole Dutch",
"skx": "Seko Padang",
"sky": "Sikaiana",
"skz": "Sekar",
"sl": "Slovenian",
"sla": "Slavic languages",
"slc": "Sáliba",
"sld": "Sissala",
"sle": "Sholaga",
"slf": "Swiss-Italian Sign Language",
"slg": "Selungai Murut",
"slh": "Southern Puget Sound Salish",
"sli": "Lower Silesian",
"slj": "Salumá",
"sll": "Salt-Yui",
"slm": "Pangutaran Sama",
"sln": "Salinan",
"slp": "Lamaholot",
"slq": "Salchuq",
"slr": "Salar",
"sls": "Singapore Sign Language",
"slt": "Sila",
"slu": "Selaru",
"slw": "Sialum",
"slx": "Salampasu",
"sly": "Selayar",
"slz": "Ma'ya",
"sm": "Samoan",
"sma": "Southern Sami",
"smb": "Simbari",
"smc": "Som",
"smf": "Auwe",
"smg": "Simbali",
"smh": "Samei",
"smi": "Sami languages",
"smj": "Lule Sami",
"smk": "Bolinao",
"sml": "Central Sama",
"smm": "Musasa",
"smn": "Inari Sami",
"smp": "Samaritan",
"smq": "Samo",
"smr": "Simeulue",
"sms": "Skolt Sami",
"smt": "Simte",
"smu": "Somray",
"smv": "Samvedi",
"smw": "Sumbawa",
"smx": "Samba",
"smy": "Semnani",
"smz": "Simeku",
"sn": "Shona",
"snc": "Sinaugoro",
"sne": "Bau Bidayuh",
"snf": "Noon",
"sng": "Sanga (Democratic Republic of Congo)",
"sni": "Sensi",
"snj": "Riverain Sango",
"snk": "Soninke",
"snl": "Sangil",
"snm": "Southern Ma'di",
"snn": "Siona",
"sno": "Snohomish",
"snp": "Siane",
"snq": "Sangu (Gabon)",
"snr": "Sihan",
"sns": "South West Bay; Nahavaq",
"snu": "Senggi; Viid",
"snv": "Sa'ban",
"snw": "Selee",
"snx": "Sam",
"sny": "Saniyo-Hiyewe",
"snz": "Kou",
"so": "Somali",
"soa": "Thai Song",
"sob": "Sobei",
"soc": "So (Democratic Republic of Congo)",
"sod": "Songoora",
"soe": "Songomeno",
"sog": "Sogdian",
"soh": "Aka",
"soi": "Sonha",
"soj": "Soi",
"sok": "Sokoro",
"sol": "Solos",
"son": "Songhai languages",
"soo": "Songo",
"sop": "Songe",
"soq": "Kanasi",
"sor": "Somrai",
"sos": "Seeku",
"sou": "Southern Thai",
"sov": "Sonsorol",
"sow": "Sowanda",
"sox": "Swo",
"soy": "Miyobe",
"soz": "Temi",
"spb": "Sepa (Indonesia)",
"spc": "Sapé",
"spd": "Saep",
"spe": "Sepa (Papua New Guinea)",
"spg": "Sian",
"spi": "Saponi",
"spk": "Sengo",
"spl": "Selepet",
"spm": "Akukem",
"spn": "Sanapaná",
"spo": "Spokane",
"spp": "Supyire Senoufo",
"spq": "Loreto-Ucayali Spanish",
"spr": "Saparua",
"sps": "Saposa",
"spt": "Spiti Bhoti",
"spu": "Sapuan",
"spv": "Sambalpuri; Kosli",
"spx": "South Picene",
"spy": "Sabaot",
"sq": "Albanian",
"sqa": "Shama-Sambuga",
"sqh": "Shau",
"sqj": "Albanian languages",
"sqk": "Albanian Sign Language",
"sqm": "Suma",
"sqn": "Susquehannock",
"sqo": "Sorkhei",
"sqq": "Sou",
"sqr": "Siculo Arabic",
"sqs": "Sri Lankan Sign Language",
"sqt": "Soqotri",
"squ": "Squamish",
"sqx": "Kufr Qassem Sign Language (KQSL)",
"sr": "Serbian",
"sra": "Saruga",
"srb": "Sora",
"src": "Logudorese Sardinian",
"sre": "Sara",
"srf": "Nafi",
"srg": "Sulod",
"srh": "Sarikoli",
"sri": "Siriano",
"srk": "Serudung Murut",
"srl": "Isirawa",
"srm": "Saramaccan",
"srn": "Sranan Tongo",
"sro": "Campidanese Sardinian",
"srq": "Sirionó",
"srr": "Serer",
"srs": "Sarsi",
"srt": "Sauri",
"sru": "Suruí",
"srv": "Southern Sorsoganon",
"srw": "Serua",
"srx": "Sirmauri",
"sry": "Sera",
"srz": "Shahmirzadi",
"ss": "Swati",
"ssa": "Nilo-Saharan languages",
"ssb": "Southern Sama",
"ssc": "Suba-Simbiti",
"ssd": "Siroi",
"sse": "Balangingi; Bangingih Sama",
"ssf": "Thao",
"ssg": "Seimat",
"ssh": "Shihhi Arabic",
"ssi": "Sansi",
"ssj": "Sausi",
"ssk": "Sunam",
"ssl": "Western Sisaala",
"ssm": "Semnam",
"ssn": "Waata",
"sso": "Sissano",
"ssp": "Spanish Sign Language",
"ssq": "So'a",
"ssr": "Swiss-French Sign Language",
"sss": "Sô",
"sst": "Sinasina",
"ssu": "Susuami",
"ssv": "Shark Bay",
"ssx": "Samberigi",
"ssy": "Saho",
"ssz": "Sengseng",
"st": "Southern Sotho",
"sta": "Settla",
"stb": "Northern Subanen",
"std": "Sentinel",
"ste": "Liana-Seti",
"stf": "Seta",
"stg": "Trieng",
"sth": "Shelta",
"sti": "Bulo Stieng",
"stj": "Matya Samo",
"stk": "Arammba",
"stl": "Stellingwerfs",
"stm": "Setaman",
"stn": "Owa",
"sto": "Stoney",
"stp": "Southeastern Tepehuan",
"stq": "Saterfriesisch",
"str": "Straits Salish",
"sts": "Shumashti",
"stt": "Budeh Stieng",
"stu": "Samtao",
"stv": "Silt'e",
"stw": "Satawalese",
"sty": "Siberian Tatar",
"su": "Sundanese",
"sua": "Sulka",
"sub": "Suku",
"suc": "Western Subanon",
"sue": "Suena",
"sug": "Suganga",
"sui": "Suki",
"suj": "Shubi",
"suk": "Sukuma",
"suo": "Bouni",
"suq": "Tirmaga-Chai Suri; Suri",
"sur": "Mwaghavul",
"sus": "Susu",
"sut": "Subtiaba",
"suv": "Puroik",
"suw": "Sumbwa",
"sux": "Sumerian",
"suy": "Suyá",
"suz": "Sunwar",
"sv": "Swedish",
"sva": "Svan",
"svb": "Ulau-Suain",
"svc": "Vincentian Creole English",
"sve": "Serili",
"svk": "Slovakian Sign Language",
"svm": "Slavomolisano",
"svs": "Savosavo",
"svx": "Skalvian",
"sw": "Swahili (macrolanguage)",
"swb": "Maore Comorian",
"swc": "Congo Swahili",
"swf": "Sere",
"swg": "Swabian",
"swh": "Swahili (individual language); Kiswahili",
"swi": "Sui",
"swj": "Sira",
"swk": "Malawi Sena",
"swl": "Swedish Sign Language",
"swm": "Samosa",
"swn": "Sawknah",
"swo": "Shanenawa",
"swp": "Suau",
"swq": "Sharwa",
"swr": "Saweru",
"sws": "Seluwasan",
"swt": "Sawila",
"swu": "Suwawa",
"swv": "Shekhawati",
"sww": "Sowa",
"swx": "Suruahá",
"swy": "Sarua",
"sxb": "Suba",
"sxc": "Sicanian",
"sxe": "Sighu",
"sxg": "Shuhi; Shixing",
"sxk": "Southern Kalapuya",
"sxl": "Selian",
"sxm": "Samre",
"sxn": "Sangir",
"sxo": "Sorothaptic",
"sxr": "Saaroa",
"sxs": "Sasaru",
"sxu": "Upper Saxon",
"sxw": "Saxwe Gbe",
"sya": "Siang",
"syb": "Central Subanen",
"syc": "Classical Syriac",
"syd": "Samoyedic languages",
"syi": "Seki",
"syk": "Sukur",
"syl": "Sylheti",
"sym": "Maya Samo",
"syn": "Senaya",
"syo": "Suoy",
"syr": "Syriac",
"sys": "Sinyar",
"syw": "Kagate",
"syx": "Samay",
"syy": "Al-Sayyid Bedouin Sign Language",
"sza": "Semelai",
"szb": "Ngalum",
"szc": "Semaq Beri",
"szd": "Seru",
"sze": "Seze",
"szg": "Sengele",
"szl": "Silesian",
"szn": "Sula",
"szp": "Suabo",
"szs": "Solomon Islands Sign Language",
"szv": "Isu (Fako Division)",
"szw": "Sawai",
"szy": "Sakizaya",
"ta": "Tamil",
"taa": "Lower Tanana",
"tab": "Tabassaran",
"tac": "Lowland Tarahumara",
"tad": "Tause",
"tae": "Tariana",
"taf": "Tapirapé",
"tag": "Tagoi",
"tai": "Tai languages",
"taj": "Eastern Tamang",
"tak": "Tala",
"tal": "Tal",
"tan": "Tangale",
"tao": "Yami",
"tap": "Taabwa",
"taq": "Tamasheq",
"tar": "Central Tarahumara",
"tas": "Tay Boi",
"tau": "Upper Tanana",
"tav": "Tatuyo",
"taw": "Tai",
"tax": "Tamki",
"tay": "Atayal",
"taz": "Tocho",
"tba": "Aikanã",
"tbc": "Takia",
"tbd": "Kaki Ae",
"tbe": "Tanimbili",
"tbf": "Mandara",
"tbg": "North Tairora",
"tbh": "Dharawal; Thurawal",
"tbi": "Gaam",
"tbj": "Tiang",
"tbk": "Calamian Tagbanwa",
"tbl": "Tboli",
"tbm": "Tagbu",
"tbn": "Barro Negro Tunebo",
"tbo": "Tawala",
"tbp": "Taworta; Diebroud",
"tbq": "Tibeto-Burman languages",
"tbr": "Tumtum",
"tbs": "Tanguat",
"tbt": "Tembo (Kitembo)",
"tbu": "Tubar",
"tbv": "Tobo",
"tbw": "Tagbanwa",
"tbx": "Kapin",
"tby": "Tabaru",
"tbz": "Ditammari",
"tca": "Ticuna",
"tcb": "Tanacross",
"tcc": "Datooga",
"tcd": "Tafi",
"tce": "Southern Tutchone",
"tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec",
"tcg": "Tamagario",
"tch": "Turks And Caicos Creole English",
"tci": "Wára",
"tck": "Tchitchege",
"tcl": "Taman (Myanmar)",
"tcm": "Tanahmerah",
"tcn": "Tichurong",
"tco": "Taungyo",
"tcp": "Tawr Chin",
"tcq": "Kaiy",
"tcs": "Torres Strait Creole; Yumplatok",
"tct": "T'en",
"tcu": "Southeastern Tarahumara",
"tcw": "Tecpatlán Totonac",
"tcx": "Toda",
"tcy": "Tulu",
"tcz": "Thado Chin",
"tda": "Tagdal",
"tdb": "Panchpargania",
"tdc": "Emberá-Tadó",
"tdd": "Tai Nüa",
"tde": "Tiranige Diga Dogon",
"tdf": "Talieng",
"tdg": "Western Tamang",
"tdh": "Thulung",
"tdi": "Tomadino",
"tdj": "Tajio",
"tdk": "Tambas",
"tdl": "Sur",
"tdm": "Taruma",
"tdn": "Tondano",
"tdo": "Teme",
"tdq": "Tita",
"tdr": "Todrah",
"tds": "Doutai",
"tdt": "Tetun Dili",
"tdv": "Toro",
"tdx": "Tandroy-Mahafaly Malagasy",
"tdy": "Tadyawan",
"te": "Telugu",
"tea": "Temiar",
"teb": "Tetete",
"tec": "Terik",
"ted": "Tepo Krumen",
"tee": "Huehuetla Tepehua",
"tef": "Teressa",
"teg": "Teke-Tege",
"teh": "Tehuelche",
"tei": "Torricelli",
"tek": "Ibali Teke",
"tem": "Timne",
"ten": "Tama (Colombia)",
"teo": "Teso",
"tep": "Tepecano",
"teq": "Temein",
"ter": "Tereno",
"tes": "Tengger",
"tet": "Tetum",
"teu": "Soo",
"tev": "Teor",
"tew": "Tewa (USA)",
"tex": "Tennet",
"tey": "Tulishi",
"tez": "Tetserret",
"tfi": "Tofin Gbe",
"tfn": "Tanaina",
"tfo": "Tefaro",
"tfr": "Teribe",
"tft": "Ternate",
"tg": "Tajik",
"tga": "Sagalla",
"tgb": "Tobilung",
"tgc": "Tigak",
"tgd": "Ciwogai",
"tge": "Eastern Gorkha Tamang",
"tgf": "Chalikha",
"tgh": "Tobagonian Creole English",
"tgi": "Lawunuia",
"tgj": "Tagin",
"tgn": "Tandaganon",
"tgo": "Sudest",
"tgp": "Tangoa",
"tgq": "Tring",
"tgr": "Tareng",
"tgs": "Nume",
"tgt": "Central Tagbanwa",
"tgu": "Tanggu",
"tgv": "Tingui-Boto",
"tgw": "Tagwana Senoufo",
"tgx": "Tagish",
"tgy": "Togoyo",
"tgz": "Tagalaka",
"th": "Thai",
"thd": "Kuuk Thaayorre; Thayore",
"the": "Chitwania Tharu",
"thf": "Thangmi",
"thh": "Northern Tarahumara",
"thi": "Tai Long",
"thk": "Tharaka; Kitharaka",
"thl": "Dangaura Tharu",
"thm": "Aheu",
"thn": "Thachanadan",
"thp": "Thompson",
"thq": "Kochila Tharu",
"thr": "Rana Tharu",
"ths": "Thakali",
"tht": "Tahltan",
"thu": "Thuri",
"thv": "Tahaggart Tamahaq",
"thy": "Tha",
"thz": "Tayart Tamajeq",
"ti": "Tigrinya",
"tia": "Tidikelt Tamazight",
"tic": "Tira",
"tif": "Tifal",
"tig": "Tigre",
"tih": "Timugon Murut",
"tii": "Tiene",
"tij": "Tilung",
"tik": "Tikar",
"til": "Tillamook",
"tim": "Timbe",
"tin": "Tindi",
"tio": "Teop",
"tip": "Trimuris",
"tiq": "Tiéfo",
"tis": "Masadiit Itneg",
"tit": "Tinigua",
"tiu": "Adasen",
"tiv": "Tiv",
"tiw": "Tiwi",
"tix": "Southern Tiwa",
"tiy": "Tiruray",
"tiz": "Tai Hongjin",
"tja": "Tajuasohn",
"tjg": "Tunjung",
"tji": "Northern Tujia",
"tjj": "Tjungundji",
"tjl": "Tai Laing",
"tjm": "Timucua",
"tjn": "Tonjon",
"tjo": "Temacine Tamazight",
"tjp": "Tjupany",
"tjs": "Southern Tujia",
"tju": "Tjurruru",
"tjw": "Djabwurrung",
"tk": "Turkmen",
"tka": "Truká",
"tkb": "Buksa",
"tkd": "Tukudede",
"tke": "Takwane",
"tkf": "Tukumanféd",
"tkg": "Tesaka Malagasy",
"tkl": "Tokelau",
"tkm": "Takelma",
"tkn": "Toku-No-Shima",
"tkp": "Tikopia",
"tkq": "Tee",
"tkr": "Tsakhur",
"tks": "Takestani",
"tkt": "Kathoriya Tharu",
"tku": "Upper Necaxa Totonac",
"tkv": "Mur Pano",
"tkw": "Teanu",
"tkx": "Tangko",
"tkz": "Takua",
"tl": "Tagalog",
"tla": "Southwestern Tepehuan",
"tlb": "Tobelo",
"tlc": "Yecuatla Totonac",
"tld": "Talaud",
"tlf": "Telefol",
"tlg": "Tofanma",
"tlh": "Klingon; tlhIngan Hol",
"tli": "Tlingit",
"tlj": "Talinga-Bwisi",
"tlk": "Taloki",
"tll": "Tetela",
"tlm": "Tolomako",
"tln": "Talondo'",
"tlo": "Talodi",
"tlp": "Filomena Mata-Coahuitlán Totonac",
"tlq": "Tai Loi",
"tlr": "Talise",
"tls": "Tambotalo",
"tlt": "Sou Nama; Teluti",
"tlu": "Tulehu",
"tlv": "Taliabu",
"tlx": "Khehek",
"tly": "Talysh",
"tma": "Tama (Chad)",
"tmb": "Katbol; Avava",
"tmc": "Tumak",
"tmd": "Haruai",
"tme": "Tremembé",
"tmf": "Toba-Maskoy",
"tmg": "Ternateño",
"tmh": "Tamashek",
"tmi": "Tutuba",
"tmj": "Samarokena",
"tmk": "Northwestern Tamang",
"tml": "Tamnim Citak",
"tmm": "Tai Thanh",
"tmn": "Taman (Indonesia)",
"tmo": "Temoq",
"tmq": "Tumleo",
"tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)",
"tms": "Tima",
"tmt": "Tasmate",
"tmu": "Iau",
"tmv": "Tembo (Motembo)",
"tmw": "Temuan",
"tmy": "Tami",
"tmz": "Tamanaku",
"tn": "Tswana",
"tna": "Tacana",
"tnb": "Western Tunebo",
"tnc": "Tanimuca-Retuarã",
"tnd": "Angosturas Tunebo",
"tng": "Tobanga",
"tnh": "Maiani",
"tni": "Tandia",
"tnk": "Kwamera",
"tnl": "Lenakel",
"tnm": "Tabla",
"tnn": "North Tanna",
"tno": "Toromono",
"tnp": "Whitesands",
"tnq": "Taino",
"tnr": "Ménik",
"tns": "Tenis",
"tnt": "Tontemboan",
"tnu": "Tay Khang",
"tnv": "Tangchangya",
"tnw": "Tonsawang",
"tnx": "Tanema",
"tny": "Tongwe",
"tnz": "Ten'edn",
"to": "Tonga (Tonga Islands)",
"tob": "Toba",
"toc": "Coyutla Totonac",
"tod": "Toma",
"tof": "Gizrra",
"tog": "Tonga (Nyasa)",
"toh": "Gitonga",
"toi": "Tonga (Zambia)",
"toj": "Tojolabal",
"tok": "Toki Pona",
"tol": "Tolowa",
"tom": "Tombulu",
"too": "Xicotepec De Juárez Totonac",
"top": "Papantla Totonac",
"toq": "Toposa",
"tor": "Togbo-Vara Banda",
"tos": "Highland Totonac",
"tou": "Tho",
"tov": "Upper Taromi",
"tow": "Jemez",
"tox": "Tobian",
"toy": "Topoiyo",
"toz": "To",
"tpa": "Taupota",
"tpc": "Azoyú Me'phaa; Azoyú Tlapanec",
"tpe": "Tippera",
"tpf": "Tarpia",
"tpg": "Kula",
"tpi": "Tok Pisin",
"tpj": "Tapieté",
"tpk": "Tupinikin",
"tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec",
"tpm": "Tampulma",
"tpn": "Tupinambá",
"tpo": "Tai Pao",
"tpp": "Pisaflores Tepehua",
"tpq": "Tukpa",
"tpr": "Tuparí",
"tpt": "Tlachichilco Tepehua",
"tpu": "Tampuan",
"tpv": "Tanapag",
"tpw": "Tupí",
"tpx": "Acatepec Me'phaa; Acatepec Tlapanec",
"tpy": "Trumai",
"tpz": "Tinputz",
"tqb": "Tembé",
"tql": "Lehali",
"tqm": "Turumsa",
"tqn": "Tenino",
"tqo": "Toaripi",
"tqp": "Tomoip",
"tqq": "Tunni",
"tqr": "Torona",
"tqt": "Western Totonac",
"tqu": "Touo",
"tqw": "Tonkawa",
"tr": "Turkish",
"tra": "Tirahi",
"trb": "Terebu",
"trc": "Copala Triqui",
"trd": "Turi",
"tre": "East Tarangan",
"trf": "Trinidadian Creole English",
"trg": "Lishán Didán",
"trh": "Turaka",
"tri": "Trió",
"trj": "Toram",
"trk": "Turkic languages",
"trl": "Traveller Scottish",
"trm": "Tregami",
"trn": "Trinitario",
"tro": "Tarao Naga",
"trp": "Kok Borok",
"trq": "San Martín Itunyoso Triqui",
"trr": "Taushiro",
"trs": "Chicahuaxtla Triqui",
"trt": "Tunggare",
"tru": "Turoyo; Surayt",
"trv": "Sediq; Seediq; Taroko",
"trw": "Torwali",
"trx": "Tringgus-Sembaan Bidayuh",
"try": "Turung",
"trz": "Torá",
"ts": "Tsonga",
"tsa": "Tsaangi",
"tsb": "Tsamai",
"tsc": "Tswa",
"tsd": "Tsakonian",
"tse": "Tunisian Sign Language",
"tsg": "Tausug",
"tsh": "Tsuvan",
"tsi": "Tsimshian",
"tsj": "Tshangla",
"tsk": "Tseku",
"tsl": "Ts'ün-Lao",
"tsm": "Turkish Sign Language; Türk İşaret Dili",
"tsp": "Northern Toussian",
"tsq": "Thai Sign Language",
"tsr": "Akei",
"tss": "Taiwan Sign Language",
"tst": "Tondi Songway Kiini",
"tsu": "Tsou",
"tsv": "Tsogo",
"tsw": "Tsishingini",
"tsx": "Mubami",
"tsy": "Tebul Sign Language",
"tsz": "Purepecha",
"tt": "Tatar",
"tta": "Tutelo",
"ttb": "Gaa",
"ttc": "Tektiteko",
"ttd": "Tauade",
"tte": "Bwanabwana",
"ttf": "Tuotomb",
"ttg": "Tutong",
"tth": "Upper Ta'oih",
"tti": "Tobati",
"ttj": "Tooro",
"ttk": "Totoro",
"ttl": "Totela",
"ttm": "Northern Tutchone",
"ttn": "Towei",
"tto": "Lower Ta'oih",
"ttp": "Tombelala",
"ttq": "Tawallammat Tamajaq",
"ttr": "Tera",
"tts": "Northeastern Thai",
"ttt": "Muslim Tat",
"ttu": "Torau",
"ttv": "Titan",
"ttw": "Long Wat",
"tty": "Sikaritai",
"ttz": "Tsum",
"tua": "Wiarumus",
"tub": "Tübatulabal",
"tuc": "Mutu",
"tud": "Tuxá",
"tue": "Tuyuca",
"tuf": "Central Tunebo",
"tug": "Tunia",
"tuh": "Taulil",
"tui": "Tupuri",
"tuj": "Tugutil",
"tul": "Tula",
"tum": "Tumbuka",
"tun": "Tunica",
"tuo": "Tucano",
"tup": "Tupi languages",
"tuq": "Tedaga",
"tus": "Tuscarora",
"tut": "Altaic languages",
"tuu": "Tututni",
"tuv": "Turkana",
"tuw": "Tungus languages",
"tux": "Tuxináwa",
"tuy": "Tugen",
"tuz": "Turka",
"tva": "Vaghua",
"tvd": "Tsuvadi",
"tve": "Te'un",
"tvk": "Southeast Ambrym",
"tvl": "Tuvalu",
"tvm": "Tela-Masbuar",
"tvn": "Tavoyan",
"tvo": "Tidore",
"tvs": "Taveta",
"tvt": "Tutsa Naga",
"tvu": "Tunen",
"tvw": "Sedoa",
"tvx": "Taivoan",
"tvy": "Timor Pidgin",
"tw": "Twi",
"twa": "Twana",
"twb": "Western Tawbuid",
"twc": "Teshenawa",
"twd": "Twents",
"twe": "Tewa (Indonesia)",
"twf": "Northern Tiwa",
"twg": "Tereweng",
"twh": "Tai Dón",
"twl": "Tawara",
"twm": "Tawang Monpa",
"twn": "Twendi",
"two": "Tswapong",
"twp": "Ere",
"twq": "Tasawaq",
"twr": "Southwestern Tarahumara",
"twt": "Turiwára",
"twu": "Termanu",
"tww": "Tuwari",
"twx": "Tewe",
"twy": "Tawoyan",
"txa": "Tombonuo",
"txb": "Tokharian B",
"txc": "Tsetsaut",
"txe": "Totoli",
"txg": "Tangut",
"txh": "Thracian",
"txi": "Ikpeng",
"txj": "Tarjumo",
"txm": "Tomini",
"txn": "West Tarangan",
"txo": "Toto",
"txq": "Tii",
"txr": "Tartessian",
"txs": "Tonsea",
"txt": "Citak",
"txu": "Kayapó",
"txx": "Tatana",
"txy": "Tanosy Malagasy",
"ty": "Tahitian",
"tya": "Tauya",
"tye": "Kyanga",
"tyh": "O'du",
"tyi": "Teke-Tsaayi",
"tyj": "Tai Do; Tai Yo",
"tyl": "Thu Lao",
"tyn": "Kombai",
"typ": "Thaypan",
"tyr": "Tai Daeng",
"tys": "Tày Sa Pa",
"tyt": "Tày Tac",
"tyu": "Kua",
"tyv": "Tuvinian",
"tyx": "Teke-Tyee",
"tyy": "Tiyaa",
"tyz": "Tày",
"tza": "Tanzanian Sign Language",
"tzh": "Tzeltal",
"tzj": "Tz'utujil",
"tzl": "Talossan",
"tzm": "Central Atlas Tamazight",
"tzn": "Tugun",
"tzo": "Tzotzil",
"tzx": "Tabriak",
"uam": "Uamué",
"uan": "Kuan",
"uar": "Tairuma",
"uba": "Ubang",
"ubi": "Ubi",
"ubl": "Buhi'non Bikol",
"ubr": "Ubir",
"ubu": "Umbu-Ungu",
"uby": "Ubykh",
"uda": "Uda",
"ude": "Udihe",
"udg": "Muduga",
"udi": "Udi",
"udj": "Ujir",
"udl": "Wuzlam",
"udm": "Udmurt",
"udu": "Uduk",
"ues": "Kioko",
"ufi": "Ufim",
"ug": "Uighur; Uyghur",
"uga": "Ugaritic",
"ugb": "Kuku-Ugbanh",
"uge": "Ughele",
"ugh": "Kubachi",
"ugn": "Ugandan Sign Language",
"ugo": "Ugong",
"ugy": "Uruguayan Sign Language",
"uha": "Uhami",
"uhn": "Damal",
"uis": "Uisai",
"uiv": "Iyive",
"uji": "Tanjijili",
"uk": "Ukrainian",
"uka": "Kaburi",
"ukg": "Ukuriguma",
"ukh": "Ukhwejo",
"uki": "Kui (India)",
"ukk": "Muak Sa-aak",
"ukl": "Ukrainian Sign Language",
"ukp": "Ukpe-Bayobiri",
"ukq": "Ukwa",
"uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language",
"uku": "Ukue",
"ukv": "Kuku",
"ukw": "Ukwuani-Aboh-Ndoni",
"uky": "Kuuk-Yak",
"ula": "Fungwa",
"ulb": "Ulukwumi",
"ulc": "Ulch",
"ule": "Lule",
"ulf": "Usku; Afra",
"uli": "Ulithian",
"ulk": "Meriam Mir",
"ull": "Ullatan",
"ulm": "Ulumanda'",
"uln": "Unserdeutsch",
"ulu": "Uma' Lung",
"ulw": "Ulwa",
"uma": "Umatilla",
"umb": "Umbundu",
"umc": "Marrucinian",
"umd": "Umbindhamu",
"umg": "Morrobalama; Umbuygamu",
"umi": "Ukit",
"umm": "Umon",
"umn": "Makyan Naga",
"umo": "Umotína",
"ump": "Umpila",
"umr": "Umbugarla",
"ums": "Pendau",
"umu": "Munsee",
"una": "North Watut",
"und": "Undetermined",
"une": "Uneme",
"ung": "Ngarinyin",
"uni": "Uni",
"unk": "Enawené-Nawé",
"unm": "Unami",
"unn": "Kurnai",
"unr": "Mundari",
"unu": "Unubahe",
"unx": "Munda",
"unz": "Unde Kaili",
"uon": "Kulon",
"upi": "Umeda",
"upv": "Uripiv-Wala-Rano-Atchin",
"ur": "Urdu",
"ura": "Urarina",
"urb": "Urubú-Kaapor; Kaapor",
"urc": "Urningangg",
"ure": "Uru",
"urf": "Uradhi",
"urg": "Urigina",
"urh": "Urhobo",
"uri": "Urim",
"urj": "Uralic languages",
"urk": "Urak Lawoi'",
"url": "Urali",
"urm": "Urapmin",
"urn": "Uruangnirin",
"uro": "Ura (Papua New Guinea)",
"urp": "Uru-Pa-In",
"urr": "Lehalurup; Löyöp",
"urt": "Urat",
"uru": "Urumi",
"urv": "Uruava",
"urw": "Sop",
"urx": "Urimo",
"ury": "Orya",
"urz": "Uru-Eu-Wau-Wau",
"usa": "Usarufa",
"ush": "Ushojo",
"usi": "Usui",
"usk": "Usaghade",
"usp": "Uspanteco",
"uss": "us-Saare",
"usu": "Uya",
"uta": "Otank",
"ute": "Ute-Southern Paiute",
"uth": "ut-Hun",
"utp": "Amba (Solomon Islands)",
"utr": "Etulo",
"utu": "Utu",
"uum": "Urum",
"uur": "Ura (Vanuatu)",
"uuu": "U",
"uve": "West Uvean; Fagauvea",
"uvh": "Uri",
"uvl": "Lote",
"uwa": "Kuku-Uwanh",
"uya": "Doko-Uyanga",
"uz": "Uzbek",
"uzn": "Northern Uzbek",
"uzs": "Southern Uzbek",
"vaa": "Vaagri Booli",
"vae": "Vale",
"vaf": "Vafsi",
"vag": "Vagla",
"vah": "Varhadi-Nagpuri",
"vai": "Vai",
"vaj": "Sekele; Northwestern ǃKung; Vasekele",
"val": "Vehes",
"vam": "Vanimo",
"van": "Valman",
"vao": "Vao",
"vap": "Vaiphei",
"var": "Huarijio",
"vas": "Vasavi",
"vau": "Vanuma",
"vav": "Varli",
"vay": "Wayu",
"vbb": "Southeast Babar",
"vbk": "Southwestern Bontok",
"ve": "Venda",
"vec": "Venetian",
"ved": "Veddah",
"vel": "Veluws",
"vem": "Vemgo-Mabas",
"veo": "Ventureño",
"vep": "Veps",
"ver": "Mom Jango",
"vgr": "Vaghri",
"vgt": "Vlaamse Gebarentaal; Flemish Sign Language",
"vi": "Vietnamese",
"vic": "Virgin Islands Creole English",
"vid": "Vidunda",
"vif": "Vili",
"vig": "Viemo",
"vil": "Vilela",
"vin": "Vinza",
"vis": "Vishavan",
"vit": "Viti",
"viv": "Iduna",
"vka": "Kariyarra",
"vkj": "Kujarge",
"vkk": "Kaur",
"vkl": "Kulisusu",
"vkm": "Kamakan",
"vkn": "Koro Nulu",
"vko": "Kodeoha",
"vkp": "Korlai Creole Portuguese",
"vkt": "Tenggarong Kutai Malay",
"vku": "Kurrama",
"vkz": "Koro Zuba",
"vlp": "Valpei",
"vls": "Vlaams",
"vma": "Martuyhunira",
"vmb": "Barbaram",
"vmc": "Juxtlahuaca Mixtec",
"vmd": "Mudu Koraga",
"vme": "East Masela",
"vmf": "Mainfränkisch",
"vmg": "Lungalunga",
"vmh": "Maraghei",
"vmi": "Miwa",
"vmj": "Ixtayutla Mixtec",
"vmk": "Makhuwa-Shirima",
"vml": "Malgana",
"vmm": "Mitlatongo Mixtec",
"vmp": "Soyaltepec Mazatec",
"vmq": "Soyaltepec Mixtec",
"vmr": "Marenje",
"vms": "Moksela",
"vmu": "Muluridyi",
"vmv": "Valley Maidu",
"vmw": "Makhuwa",
"vmx": "Tamazola Mixtec",
"vmy": "Ayautla Mazatec",
"vmz": "Mazatlán Mazatec",
"vnk": "Vano; Lovono",
"vnm": "Vinmavis; Neve'ei",
"vnp": "Vunapu",
"vo": "Volapük",
"vor": "Voro",
"vot": "Votic",
"vra": "Vera'a",
"vro": "Võro",
"vrs": "Varisi",
"vrt": "Burmbar; Banam Bay",
"vsi": "Moldova Sign Language",
"vsl": "Venezuelan Sign Language",
"vsv": "Valencian Sign Language; Llengua de signes valenciana",
"vto": "Vitou",
"vum": "Vumbu",
"vun": "Vunjo",
"vut": "Vute",
"vwa": "Awa (China)",
"wa": "Walloon",
"waa": "Walla Walla",
"wab": "Wab",
"wac": "Wasco-Wishram",
"wad": "Wamesa; Wondama",
"wae": "Walser",
"waf": "Wakoná",
"wag": "Wa'ema",
"wah": "Watubela",
"wai": "Wares",
"waj": "Waffa",
"wak": "Wakashan languages",
"wal": "Wolaytta; Wolaitta",
"wam": "Wampanoag",
"wan": "Wan",
"wao": "Wappo",
"wap": "Wapishana",
"waq": "Wagiman",
"war": "Waray (Philippines)",
"was": "Washo",
"wat": "Kaninuwa",
"wau": "Waurá",
"wav": "Waka",
"waw": "Waiwai",
"wax": "Watam; Marangis",
"way": "Wayana",
"waz": "Wampur",
"wba": "Warao",
"wbb": "Wabo",
"wbe": "Waritai",
"wbf": "Wara",
"wbh": "Wanda",
"wbi": "Vwanji",
"wbj": "Alagwa",
"wbk": "Waigali",
"wbl": "Wakhi",
"wbm": "Wa",
"wbp": "Warlpiri",
"wbq": "Waddar",
"wbr": "Wagdi",
"wbs": "West Bengal Sign Language",
"wbt": "Warnman",
"wbv": "Wajarri",
"wbw": "Woi",
"wca": "Yanomámi",
"wci": "Waci Gbe",
"wdd": "Wandji",
"wdg": "Wadaginam",
"wdj": "Wadjiginy",
"wdk": "Wadikali",
"wdt": "Wendat",
"wdu": "Wadjigu",
"wdy": "Wadjabangayi",
"wea": "Wewaw",
"wec": "Wè Western",
"wed": "Wedau",
"weg": "Wergaia",
"weh": "Weh",
"wei": "Kiunum",
"wem": "Weme Gbe",
"wen": "Sorbian languages",
"weo": "Wemale",
"wep": "Westphalien",
"wer": "Weri",
"wes": "Cameroon Pidgin",
"wet": "Perai",
"weu": "Rawngtu Chin",
"wew": "Wejewa",
"wfg": "Yafi; Zorop",
"wga": "Wagaya",
"wgb": "Wagawaga",
"wgg": "Wangkangurru; Wangganguru",
"wgi": "Wahgi",
"wgo": "Waigeo",
"wgu": "Wirangu",
"wgy": "Warrgamay",
"wha": "Sou Upaa; Manusela",
"whg": "North Wahgi",
"whk": "Wahau Kenyah",
"whu": "Wahau Kayan",
"wib": "Southern Toussian",
"wic": "Wichita",
"wie": "Wik-Epa",
"wif": "Wik-Keyangan",
"wig": "Wik Ngathan",
"wih": "Wik-Me'anha",
"wii": "Minidien",
"wij": "Wik-Iiyanh",
"wik": "Wikalkan",
"wil": "Wilawila",
"wim": "Wik-Mungkan",
"win": "Ho-Chunk",
"wir": "Wiraféd",
"wiu": "Wiru",
"wiv": "Vitu",
"wiy": "Wiyot",
"wja": "Waja",
"wji": "Warji",
"wka": "Kw'adza",
"wkb": "Kumbaran",
"wkd": "Wakde; Mo",
"wkl": "Kalanadi",
"wkr": "Keerray-Woorroong",
"wku": "Kunduvadi",
"wkw": "Wakawaka",
"wky": "Wangkayutyuru",
"wla": "Walio",
"wlc": "Mwali Comorian",
"wle": "Wolane",
"wlg": "Kunbarlang",
"wlh": "Welaun",
"wli": "Waioli",
"wlk": "Wailaki",
"wll": "Wali (Sudan)",
"wlm": "Middle Welsh",
"wlo": "Wolio",
"wlr": "Wailapa",
"wls": "Wallisian",
"wlu": "Wuliwuli",
"wlv": "Wichí Lhamtés Vejoz",
"wlw": "Walak",
"wlx": "Wali (Ghana)",
"wly": "Waling",
"wma": "Mawa (Nigeria)",
"wmb": "Wambaya",
"wmc": "Wamas",
"wmd": "Mamaindé",
"wme": "Wambule",
"wmg": "Western Minyag",
"wmh": "Waima'a",
"wmi": "Wamin",
"wmm": "Maiwa (Indonesia)",
"wmn": "Waamwang",
"wmo": "Wom (Papua New Guinea)",
"wms": "Wambon",
"wmt": "Walmajarri",
"wmw": "Mwani",
"wmx": "Womo",
"wnb": "Wanambre",
"wnc": "Wantoat",
"wnd": "Wandarang",
"wne": "Waneci",
"wng": "Wanggom",
"wni": "Ndzwani Comorian",
"wnk": "Wanukaka",
"wnm": "Wanggamala",
"wnn": "Wunumara",
"wno": "Wano",
"wnp": "Wanap",
"wnu": "Usan",
"wnw": "Wintu",
"wny": "Wanyi; Waanyi",
"wo": "Wolof",
"woa": "Kuwema; Tyaraity",
"wob": "Wè Northern",
"woc": "Wogeo",
"wod": "Wolani",
"woe": "Woleaian",
"wof": "Gambian Wolof",
"wog": "Wogamusin",
"woi": "Kamang",
"wok": "Longto",
"wom": "Wom (Nigeria)",
"won": "Wongo",
"woo": "Manombai",
"wor": "Woria",
"wos": "Hanga Hundi",
"wow": "Wawonii",
"woy": "Weyto",
"wpc": "Maco",
"wrb": "Waluwarra; Warluwara",
"wrg": "Warungu; Gudjal",
"wrh": "Wiradjuri",
"wri": "Wariyangga",
"wrk": "Garrwa",
"wrl": "Warlmanpa",
"wrm": "Warumungu",
"wrn": "Warnang",
"wro": "Worrorra",
"wrp": "Waropen",
"wrr": "Wardaman",
"wrs": "Waris",
"wru": "Waru",
"wrv": "Waruna",
"wrw": "Gugu Warra",
"wrx": "Wae Rana",
"wry": "Merwari",
"wrz": "Waray (Australia)",
"wsa": "Warembori",
"wsg": "Adilabad Gondi",
"wsi": "Wusi",
"wsk": "Waskia",
"wsr": "Owenia",
"wss": "Wasa",
"wsu": "Wasu",
"wsv": "Wotapuri-Katarqalai",
"wtf": "Watiwa",
"wth": "Wathawurrung",
"wti": "Berta",
"wtk": "Watakataui",
"wtm": "Mewati",
"wtw": "Wotu",
"wua": "Wikngenchera",
"wub": "Wunambal",
"wud": "Wudu",
"wuh": "Wutunhua",
"wul": "Silimo",
"wum": "Wumbvu",
"wun": "Bungu",
"wur": "Wurrugu",
"wut": "Wutung",
"wuu": "Wu Chinese",
"wuv": "Wuvulu-Aua",
"wux": "Wulna",
"wuy": "Wauyai",
"wwa": "Waama",
"wwb": "Wakabunga",
"wwo": "Wetamut; Dorig",
"wwr": "Warrwa",
"www": "Wawa",
"wxa": "Waxianghua",
"wxw": "Wardandi",
"wyb": "Wangaaybuwan-Ngiyambaa",
"wyi": "Woiwurrung",
"wym": "Wymysorys",
"wyn": "Wyandot",
"wyr": "Wayoró",
"wyy": "Western Fijian",
"xaa": "Andalusian Arabic",
"xab": "Sambe",
"xac": "Kachari",
"xad": "Adai",
"xae": "Aequian",
"xag": "Aghwan",
"xai": "Kaimbé",
"xaj": "Ararandewára",
"xak": "Máku",
"xal": "Kalmyk; Oirat",
"xam": "ǀXam",
"xan": "Xamtanga",
"xao": "Khao",
"xap": "Apalachee",
"xaq": "Aquitanian",
"xar": "Karami",
"xas": "Kamas",
"xat": "Katawixi",
"xau": "Kauwera",
"xav": "Xavánte",
"xaw": "Kawaiisu",
"xay": "Kayan Mahakam",
"xbb": "Lower Burdekin",
"xbc": "Bactrian",
"xbd": "Bindal",
"xbe": "Bigambal",
"xbg": "Bunganditj",
"xbi": "Kombio",
"xbj": "Birrpayi",
"xbm": "Middle Breton",
"xbn": "Kenaboi",
"xbo": "Bolgarian",
"xbp": "Bibbulman",
"xbr": "Kambera",
"xbw": "Kambiwá",
"xby": "Batjala; Batyala",
"xcb": "Cumbric",
"xcc": "Camunic",
"xce": "Celtiberian",
"xcg": "Cisalpine Gaulish",
"xch": "Chemakum; Chimakum",
"xcl": "Classical Armenian",
"xcm": "Comecrudo",
"xcn": "Cotoname",
"xco": "Chorasmian",
"xcr": "Carian",
"xct": "Classical Tibetan",
"xcu": "Curonian",
"xcv": "Chuvantsy",
"xcw": "Coahuilteco",
"xcy": "Cayuse",
"xda": "Darkinyung",
"xdc": "Dacian",
"xdk": "Dharuk",
"xdm": "Edomite",
"xdo": "Kwandu",
"xdq": "Kaitag",
"xdy": "Malayic Dayak",
"xeb": "Eblan",
"xed": "Hdi",
"xeg": "ǁXegwi",
"xel": "Kelo",
"xem": "Kembayan",
"xep": "Epi-Olmec",
"xer": "Xerénte",
"xes": "Kesawai",
"xet": "Xetá",
"xeu": "Keoru-Ahia",
"xfa": "Faliscan",
"xga": "Galatian",
"xgb": "Gbin",
"xgd": "Gudang",
"xgf": "Gabrielino-Fernandeño",
"xgg": "Goreng",
"xgi": "Garingbal",
"xgl": "Galindan",
"xgm": "Dharumbal; Guwinmal",
"xgn": "Mongolian languages",
"xgr": "Garza",
"xgu": "Unggumi",
"xgw": "Guwa",
"xh": "Xhosa",
"xha": "Harami",
"xhc": "Hunnic",
"xhd": "Hadrami",
"xhe": "Khetrani",
"xhm": "Middle Khmer (1400 to 1850 CE)",
"xhr": "Hernican",
"xht": "Hattic",
"xhu": "Hurrian",
"xhv": "Khua",
"xib": "Iberian",
"xii": "Xiri",
"xil": "Illyrian",
"xin": "Xinca",
"xir": "Xiriâna",
"xis": "Kisan",
"xiv": "Indus Valley Language",
"xiy": "Xipaya",
"xjb": "Minjungbal",
"xjt": "Jaitmatang",
"xka": "Kalkoti",
"xkb": "Northern Nago",
"xkc": "Kho'ini",
"xkd": "Mendalam Kayan",
"xke": "Kereho",
"xkf": "Khengkha",
"xkg": "Kagoro",
"xki": "Kenyan Sign Language",
"xkj": "Kajali",
"xkk": "Kachok; Kaco'",
"xkl": "Mainstream Kenyah",
"xkn": "Kayan River Kayan",
"xko": "Kiorr",
"xkp": "Kabatei",
"xkq": "Koroni",
"xkr": "Xakriabá",
"xks": "Kumbewaha",
"xkt": "Kantosi",
"xku": "Kaamba",
"xkv": "Kgalagadi",
"xkw": "Kembra",
"xkx": "Karore",
"xky": "Uma' Lasan",
"xkz": "Kurtokha",
"xla": "Kamula",
"xlb": "Loup B",
"xlc": "Lycian",
"xld": "Lydian",
"xle": "Lemnian",
"xlg": "Ligurian (Ancient)",
"xli": "Liburnian",
"xln": "Alanic",
"xlo": "Loup A",
"xlp": "Lepontic",
"xls": "Lusitanian",
"xlu": "Cuneiform Luwian",
"xly": "Elymian",
"xma": "Mushungulu",
"xmb": "Mbonga",
"xmc": "Makhuwa-Marrevone",
"xmd": "Mbudum",
"xme": "Median",
"xmf": "Mingrelian",
"xmg": "Mengaka",
"xmh": "Kugu-Muminh",
"xmj": "Majera",
"xmk": "Ancient Macedonian",
"xml": "Malaysian Sign Language",
"xmm": "Manado Malay",
"xmn": "Manichaean Middle Persian",
"xmo": "Morerebi",
"xmp": "Kuku-Mu'inh",
"xmq": "Kuku-Mangk",
"xmr": "Meroitic",
"xms": "Moroccan Sign Language",
"xmt": "Matbat",
"xmu": "Kamu",
"xmv": "Antankarana Malagasy; Tankarana Malagasy",
"xmw": "Tsimihety Malagasy",
"xmx": "Salawati; Maden",
"xmy": "Mayaguduna",
"xmz": "Mori Bawah",
"xna": "Ancient North Arabian",
"xnb": "Kanakanabu",
"xnd": "Na-Dene languages",
"xng": "Middle Mongolian",
"xnh": "Kuanhua",
"xni": "Ngarigu",
"xnj": "Ngoni (Tanzania)",
"xnk": "Nganakarti",
"xnm": "Ngumbarl",
"xnn": "Northern Kankanay",
"xno": "Anglo-Norman",
"xnq": "Ngoni (Mozambique)",
"xnr": "Kangri",
"xns": "Kanashi",
"xnt": "Narragansett",
"xnu": "Nukunul",
"xny": "Nyiyaparli",
"xnz": "Kenzi; Mattoki",
"xoc": "O'chi'chi'",
"xod": "Kokoda",
"xog": "Soga",
"xoi": "Kominimung",
"xok": "Xokleng",
"xom": "Komo (Sudan)",
"xon": "Konkomba",
"xoo": "Xukurú",
"xop": "Kopar",
"xor": "Korubo",
"xow": "Kowaki",
"xpa": "Pirriya",
"xpb": "Northeastern Tasmanian; Pyemmairrener",
"xpc": "Pecheneg",
"xpd": "Oyster Bay Tasmanian",
"xpe": "Liberia Kpelle",
"xpf": "Southeast Tasmanian; Nuenonne",
"xpg": "Phrygian",
"xph": "North Midlands Tasmanian; Tyerrenoterpanner",
"xpi": "Pictish",
"xpj": "Mpalitjanh",
"xpk": "Kulina Pano",
"xpl": "Port Sorell Tasmanian",
"xpm": "Pumpokol",
"xpn": "Kapinawá",
"xpo": "Pochutec",
"xpp": "Puyo-Paekche",
"xpq": "Mohegan-Pequot",
"xpr": "Parthian",
"xps": "Pisidian",
"xpt": "Punthamara",
"xpu": "Punic",
"xpv": "Northern Tasmanian; Tommeginne",
"xpw": "Northwestern Tasmanian; Peerapper",
"xpx": "Southwestern Tasmanian; Toogee",
"xpy": "Puyo",
"xpz": "Bruny Island Tasmanian",
"xqa": "Karakhanid",
"xqt": "Qatabanian",
"xra": "Krahô",
"xrb": "Eastern Karaboro",
"xrd": "Gundungurra",
"xre": "Kreye",
"xrg": "Minang",
"xri": "Krikati-Timbira",
"xrm": "Armazic",
"xrn": "Arin",
"xrr": "Raetic",
"xrt": "Aranama-Tamique",
"xru": "Marriammu",
"xrw": "Karawa",
"xsa": "Sabaean",
"xsb": "Sambal",
"xsc": "Scythian",
"xsd": "Sidetic",
"xse": "Sempan",
"xsh": "Shamang",
"xsi": "Sio",
"xsj": "Subi",
"xsl": "South Slavey",
"xsm": "Kasem",
"xsn": "Sanga (Nigeria)",
"xso": "Solano",
"xsp": "Silopi",
"xsq": "Makhuwa-Saka",
"xsr": "Sherpa",
"xss": "Assan",
"xsu": "Sanumá",
"xsv": "Sudovian",
"xsy": "Saisiyat",
"xta": "Alcozauca Mixtec",
"xtb": "Chazumba Mixtec",
"xtc": "Katcha-Kadugli-Miri",
"xtd": "Diuxi-Tilantongo Mixtec",
"xte": "Ketengban",
"xtg": "Transalpine Gaulish",
"xth": "Yitha Yitha",
"xti": "Sinicahua Mixtec",
"xtj": "San Juan Teita Mixtec",
"xtl": "Tijaltepec Mixtec",
"xtm": "Magdalena Peñasco Mixtec",
"xtn": "Northern Tlaxiaco Mixtec",
"xto": "Tokharian A",
"xtp": "San Miguel Piedras Mixtec",
"xtq": "Tumshuqese",
"xtr": "Early Tripuri",
"xts": "Sindihui Mixtec",
"xtt": "Tacahua Mixtec",
"xtu": "Cuyamecalco Mixtec",
"xtv": "Thawa",
"xtw": "Tawandê",
"xty": "Yoloxochitl Mixtec",
"xua": "Alu Kurumba",
"xub": "Betta Kurumba",
"xud": "Umiida",
"xug": "Kunigami",
"xuj": "Jennu Kurumba",
"xul": "Ngunawal; Nunukul",
"xum": "Umbrian",
"xun": "Unggaranggu",
"xuo": "Kuo",
"xup": "Upper Umpqua",
"xur": "Urartian",
"xut": "Kuthant",
"xuu": "Kxoe; Khwedam",
"xve": "Venetic",
"xvi": "Kamviri",
"xvn": "Vandalic",
"xvo": "Volscian",
"xvs": "Vestinian",
"xwa": "Kwaza",
"xwc": "Woccon",
"xwd": "Wadi Wadi",
"xwe": "Xwela Gbe",
"xwg": "Kwegu",
"xwj": "Wajuk",
"xwk": "Wangkumara",
"xwl": "Western Xwla Gbe",
"xwo": "Written Oirat",
"xwr": "Kwerba Mamberamo",
"xwt": "Wotjobaluk",
"xww": "Wemba Wemba",
"xxb": "Boro (Ghana)",
"xxk": "Ke'o",
"xxm": "Minkin",
"xxr": "Koropó",
"xxt": "Tambora",
"xya": "Yaygir",
"xyb": "Yandjibara",
"xyj": "Mayi-Yapi",
"xyk": "Mayi-Kulan",
"xyl": "Yalakalore",
"xyt": "Mayi-Thakurti",
"xyy": "Yorta Yorta",
"xzh": "Zhang-Zhung",
"xzm": "Zemgalian",
"xzp": "Ancient Zapotec",
"yaa": "Yaminahua",
"yab": "Yuhup",
"yac": "Pass Valley Yali",
"yad": "Yagua",
"yae": "Pumé",
"yaf": "Yaka (Democratic Republic of Congo)",
"yag": "Yámana",
"yah": "Yazgulyam",
"yai": "Yagnobi",
"yaj": "Banda-Yangere",
"yak": "Yakama",
"yal": "Yalunka",
"yam": "Yamba",
"yan": "Mayangna",
"yao": "Yao",
"yap": "Yapese",
"yaq": "Yaqui",
"yar": "Yabarana",
"yas": "Nugunu (Cameroon)",
"yat": "Yambeta",
"yau": "Yuwana",
"yav": "Yangben",
"yaw": "Yawalapití",
"yax": "Yauma",
"yay": "Agwagwune",
"yaz": "Lokaa",
"yba": "Yala",
"ybb": "Yemba",
"ybe": "West Yugur",
"ybh": "Yakha",
"ybi": "Yamphu",
"ybj": "Hasha",
"ybk": "Bokha",
"ybl": "Yukuben",
"ybm": "Yaben",
"ybn": "Yabaâna",
"ybo": "Yabong",
"ybx": "Yawiyo",
"yby": "Yaweyuha",
"ych": "Chesu",
"ycl": "Lolopo",
"ycn": "Yucuna",
"ycp": "Chepya",
"yda": "Yanda",
"ydd": "Eastern Yiddish",
"yde": "Yangum Dey",
"ydg": "Yidgha",
"ydk": "Yoidik",
"yea": "Ravula",
"yec": "Yeniche",
"yee": "Yimas",
"yei": "Yeni",
"yej": "Yevanic",
"yel": "Yela",
"yer": "Tarok",
"yes": "Nyankpa",
"yet": "Yetfa",
"yeu": "Yerukula",
"yev": "Yapunda",
"yey": "Yeyi",
"yga": "Malyangapa",
"ygi": "Yiningayi",
"ygl": "Yangum Gel",
"ygm": "Yagomi",
"ygp": "Gepo",
"ygr": "Yagaria",
"ygs": "Yolŋu Sign Language",
"ygu": "Yugul",
"ygw": "Yagwoia",
"yha": "Baha Buyang",
"yhd": "Judeo-Iraqi Arabic",
"yhl": "Hlepho Phowa",
"yhs": "Yan-nhaŋu Sign Language",
"yi": "Yiddish",
"yia": "Yinggarda",
"yif": "Ache",
"yig": "Wusa Nasu",
"yih": "Western Yiddish",
"yii": "Yidiny",
"yij": "Yindjibarndi",
"yik": "Dongshanba Lalo",
"yil": "Yindjilandji",
"yim": "Yimchungru Naga",
"yin": "Riang Lai; Yinchia",
"yip": "Pholo",
"yiq": "Miqie",
"yir": "North Awyu",
"yis": "Yis",
"yit": "Eastern Lalu",
"yiu": "Awu",
"yiv": "Northern Nisu",
"yix": "Axi Yi",
"yiz": "Azhe",
"yka": "Yakan",
"ykg": "Northern Yukaghir",
"yki": "Yoke",
"ykk": "Yakaikeke",
"ykl": "Khlula",
"ykm": "Kap",
"ykn": "Kua-nsi",
"yko": "Yasa",
"ykr": "Yekora",
"ykt": "Kathu",
"yku": "Kuamasi",
"yky": "Yakoma",
"yla": "Yaul",
"ylb": "Yaleba",
"yle": "Yele",
"ylg": "Yelogu",
"yli": "Angguruk Yali",
"yll": "Yil",
"ylm": "Limi",
"yln": "Langnian Buyang",
"ylo": "Naluo Yi",
"ylr": "Yalarnnga",
"ylu": "Aribwaung",
"yly": "Nyâlayu; Nyelâyu",
"ymb": "Yambes",
"ymc": "Southern Muji",
"ymd": "Muda",
"yme": "Yameo",
"ymg": "Yamongeri",
"ymh": "Mili",
"ymi": "Moji",
"ymk": "Makwe",
"yml": "Iamalele",
"ymm": "Maay",
"ymn": "Yamna; Sunum",
"ymo": "Yangum Mon",
"ymp": "Yamap",
"ymq": "Qila Muji",
"ymr": "Malasar",
"yms": "Mysian",
"ymx": "Northern Muji",
"ymz": "Muzi",
"yna": "Aluo",
"ynd": "Yandruwandha",
"yne": "Lang'e",
"yng": "Yango",
"ynk": "Naukan Yupik",
"ynl": "Yangulam",
"ynn": "Yana",
"yno": "Yong",
"ynq": "Yendang",
"yns": "Yansi",
"ynu": "Yahuna",
"yo": "Yoruba",
"yob": "Yoba",
"yog": "Yogad",
"yoi": "Yonaguni",
"yok": "Yokuts",
"yol": "Yola",
"yom": "Yombe",
"yon": "Yongkom",
"yot": "Yotti",
"yox": "Yoron",
"yoy": "Yoy",
"ypa": "Phala",
"ypb": "Labo Phowa",
"ypg": "Phola",
"yph": "Phupha",
"ypk": "Yupik languages",
"ypm": "Phuma",
"ypn": "Ani Phowa",
"ypo": "Alo Phola",
"ypp": "Phupa",
"ypz": "Phuza",
"yra": "Yerakai",
"yrb": "Yareba",
"yre": "Yaouré",
"yrk": "Nenets",
"yrl": "Nhengatu",
"yrm": "Yirrk-Mel",
"yrn": "Yerong",
"yro": "Yaroamë",
"yrs": "Yarsun",
"yrw": "Yarawata",
"yry": "Yarluyandi",
"ysc": "Yassic",
"ysd": "Samatao",
"ysg": "Sonaga",
"ysl": "Yugoslavian Sign Language",
"ysm": "Myanmar Sign Language",
"ysn": "Sani",
"yso": "Nisi (China)",
"ysp": "Southern Lolopo",
"ysr": "Sirenik Yupik",
"yss": "Yessan-Mayo",
"ysy": "Sanie",
"yta": "Talu",
"ytl": "Tanglang",
"ytp": "Thopho",
"ytw": "Yout Wam",
"yty": "Yatay",
"yua": "Yucateco; Yucatec Maya",
"yub": "Yugambal",
"yuc": "Yuchi",
"yud": "Judeo-Tripolitanian Arabic",
"yue": "Yue Chinese; Cantonese",
"yuf": "Havasupai-Walapai-Yavapai",
"yug": "Yug",
"yui": "Yurutí",
"yuj": "Karkar-Yuri",
"yuk": "Yuki",
"yul": "Yulu",
"yum": "Quechan",
"yun": "Bena (Nigeria)",
"yup": "Yukpa",
"yuq": "Yuqui",
"yur": "Yurok",
"yut": "Yopno",
"yuw": "Yau (Morobe Province)",
"yux": "Southern Yukaghir",
"yuy": "East Yugur",
"yuz": "Yuracare",
"yva": "Yawa",
"yvt": "Yavitero",
"ywa": "Kalou",
"ywg": "Yinhawangka",
"ywl": "Western Lalu",
"ywn": "Yawanawa",
"ywq": "Wuding-Luquan Yi",
"ywr": "Yawuru",
"ywt": "Xishanba Lalo; Central Lalo",
"ywu": "Wumeng Nasu",
"yww": "Yawarawarga",
"yxa": "Mayawali",
"yxg": "Yagara",
"yxl": "Yardliyawarra",
"yxm": "Yinwum",
"yxu": "Yuyu",
"yxy": "Yabula Yabula",
"yyr": "Yir Yoront",
"yyu": "Yau (Sandaun Province)",
"yyz": "Ayizi",
"yzg": "E'ma Buyang",
"yzk": "Zokhuo",
"za": "Zhuang; Chuang",
"zaa": "Sierra de Juárez Zapotec",
"zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec",
"zac": "Ocotlán Zapotec",
"zad": "Cajonos Zapotec",
"zae": "Yareni Zapotec",
"zaf": "Ayoquesco Zapotec",
"zag": "Zaghawa",
"zah": "Zangwal",
"zai": "Isthmus Zapotec",
"zaj": "Zaramo",
"zak": "Zanaki",
"zal": "Zauzou",
"zam": "Miahuatlán Zapotec",
"zao": "Ozolotepec Zapotec",
"zap": "Zapotec",
"zaq": "Aloápam Zapotec",
"zar": "Rincón Zapotec",
"zas": "Santo Domingo Albarradas Zapotec",
"zat": "Tabaa Zapotec",
"zau": "Zangskari",
"zav": "Yatzachi Zapotec",
"zaw": "Mitla Zapotec",
"zax": "Xadani Zapotec",
"zay": "Zayse-Zergulla; Zaysete",
"zaz": "Zari",
"zba": "Balaibalan",
"zbc": "Central Berawan",
"zbe": "East Berawan",
"zbl": "Blissymbols; Bliss; Blissymbolics",
"zbt": "Batui",
"zbu": "Bu (Bauchi State)",
"zbw": "West Berawan",
"zca": "Coatecas Altas Zapotec",
"zcd": "Las Delicias Zapotec",
"zch": "Central Hongshuihe Zhuang",
"zdj": "Ngazidja Comorian",
"zea": "Zeeuws",
"zeg": "Zenag",
"zeh": "Eastern Hongshuihe Zhuang",
"zen": "Zenaga",
"zga": "Kinga",
"zgb": "Guibei Zhuang",
"zgh": "Standard Moroccan Tamazight",
"zgm": "Minz Zhuang",
"zgn": "Guibian Zhuang",
"zgr": "Magori",
"zh": "Chinese",
"zhb": "Zhaba",
"zhd": "Dai Zhuang",
"zhi": "Zhire",
"zhn": "Nong Zhuang",
"zhw": "Zhoa",
"zhx": "Chinese (family)",
"zia": "Zia",
"zib": "Zimbabwe Sign Language",
"zik": "Zimakani",
"zil": "Zialo",
"zim": "Mesme",
"zin": "Zinza",
"ziw": "Zigula",
"ziz": "Zizilivakan",
"zka": "Kaimbulawa",
"zkb": "Koibal",
"zkd": "Kadu",
"zkg": "Koguryo",
"zkh": "Khorezmian",
"zkk": "Karankawa",
"zkn": "Kanan",
"zko": "Kott",
"zkp": "São Paulo Kaingáng",
"zkr": "Zakhring",
"zkt": "Kitan",
"zku": "Kaurna",
"zkv": "Krevinian",
"zkz": "Khazar",
"zla": "Zula",
"zle": "East Slavic languages",
"zlj": "Liujiang Zhuang",
"zlm": "Malay (individual language)",
"zln": "Lianshan Zhuang",
"zlq": "Liuqian Zhuang",
"zls": "South Slavic languages",
"zlw": "West Slavic languages",
"zma": "Manda (Australia)",
"zmb": "Zimba",
"zmc": "Margany",
"zmd": "Maridan",
"zme": "Mangerr",
"zmf": "Mfinu",
"zmg": "Marti Ke",
"zmh": "Makolkol",
"zmi": "Negeri Sembilan Malay",
"zmj": "Maridjabin",
"zmk": "Mandandanyi",
"zml": "Matngala",
"zmm": "Marimanindji; Marramaninyshi",
"zmn": "Mbangwe",
"zmo": "Molo",
"zmp": "Mpuono",
"zmq": "Mituku",
"zmr": "Maranunggu",
"zms": "Mbesa",
"zmt": "Maringarr",
"zmu": "Muruwari",
"zmv": "Mbariman-Gudhinma",
"zmw": "Mbo (Democratic Republic of Congo)",
"zmx": "Bomitaba",
"zmy": "Mariyedi",
"zmz": "Mbandja",
"zna": "Zan Gula",
"znd": "Zande languages",
"zne": "Zande (individual language)",
"zng": "Mang",
"znk": "Manangkari",
"zns": "Mangas",
"zoc": "Copainalá Zoque",
"zoh": "Chimalapa Zoque",
"zom": "Zou",
"zoo": "Asunción Mixtepec Zapotec",
"zoq": "Tabasco Zoque",
"zor": "Rayón Zoque",
"zos": "Francisco León Zoque",
"zpa": "Lachiguiri Zapotec",
"zpb": "Yautepec Zapotec",
"zpc": "Choapan Zapotec",
"zpd": "Southeastern Ixtlán Zapotec",
"zpe": "Petapa Zapotec",
"zpf": "San Pedro Quiatoni Zapotec",
"zpg": "Guevea De Humboldt Zapotec",
"zph": "Totomachapan Zapotec",
"zpi": "Santa María Quiegolani Zapotec",
"zpj": "Quiavicuzas Zapotec",
"zpk": "Tlacolulita Zapotec",
"zpl": "Lachixío Zapotec",
"zpm": "Mixtepec Zapotec",
"zpn": "Santa Inés Yatzechi Zapotec",
"zpo": "Amatlán Zapotec",
"zpp": "El Alto Zapotec",
"zpq": "Zoogocho Zapotec",
"zpr": "Santiago Xanica Zapotec",
"zps": "Coatlán Zapotec",
"zpt": "San Vicente Coatlán Zapotec",
"zpu": "Yalálag Zapotec",
"zpv": "Chichicapan Zapotec",
"zpw": "Zaniza Zapotec",
"zpx": "San Baltazar Loxicha Zapotec",
"zpy": "Mazaltepec Zapotec",
"zpz": "Texmelucan Zapotec",
"zqe": "Qiubei Zhuang",
"zra": "Kara (Korea)",
"zrg": "Mirgan",
"zrn": "Zerenkel",
"zro": "Záparo",
"zrp": "Zarphatic",
"zrs": "Mairasi",
"zsa": "Sarasira",
"zsk": "Kaskean",
"zsl": "Zambian Sign Language",
"zsm": "Standard Malay",
"zsr": "Southern Rincon Zapotec",
"zsu": "Sukurum",
"zte": "Elotepec Zapotec",
"ztg": "Xanaguía Zapotec",
"ztl": "Lapaguía-Guivini Zapotec",
"ztm": "San Agustín Mixtepec Zapotec",
"ztn": "Santa Catarina Albarradas Zapotec",
"ztp": "Loxicha Zapotec",
"ztq": "Quioquitani-Quierí Zapotec",
"zts": "Tilquiapan Zapotec",
"ztt": "Tejalapan Zapotec",
"ztu": "Güilá Zapotec",
"ztx": "Zaachila Zapotec",
"zty": "Yatee Zapotec",
"zu": "Zulu",
"zua": "Zeem",
"zuh": "Tokano",
"zum": "Kumzari",
"zun": "Zuni",
"zuy": "Zumaya",
"zwa": "Zay",
"zyb": "Yongbei Zhuang",
"zyg": "Yang Zhuang",
"zyj": "Youjiang Zhuang",
"zyn": "Yongnan Zhuang",
"zyp": "Zyphe Chin",
"zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki",
"zzj": "Zuojiang Zhuang"
} | datasets/src/datasets/utils/resources/languages.json/0 | {
"file_path": "datasets/src/datasets/utils/resources/languages.json",
"repo_id": "datasets",
"token_count": 111198
} | 68 |
import os
import tarfile
import pyarrow as pa
import pytest
from datasets import Dataset, concatenate_datasets, load_dataset
from datasets.features import Audio, Features, Sequence, Value
from ..utils import (
require_sndfile,
)
@pytest.fixture()
def tar_wav_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.wav")
path = tmp_path_factory.mktemp("data") / "audio_data.wav.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
@pytest.fixture()
def tar_mp3_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
path = tmp_path_factory.mktemp("data") / "audio_data.mp3.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
def iter_archive(archive_path):
with tarfile.open(archive_path) as tar:
for tarinfo in tar:
file_path = tarinfo.name
file_obj = tar.extractfile(tarinfo)
yield file_path, file_obj
def test_audio_instantiation():
audio = Audio()
assert audio.sampling_rate is None
assert audio.mono is True
assert audio.id is None
assert audio.dtype == "dict"
assert audio.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()})
assert audio._type == "Audio"
def test_audio_feature_type_to_arrow():
features = Features({"audio": Audio()})
assert features.arrow_schema == pa.schema({"audio": Audio().pa_type})
features = Features({"struct_containing_an_audio": {"audio": Audio()}})
assert features.arrow_schema == pa.schema({"struct_containing_an_audio": pa.struct({"audio": Audio().pa_type})})
features = Features({"sequence_of_audios": Sequence(Audio())})
assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: audio_path,
lambda audio_path: open(audio_path, "rb").read(),
lambda audio_path: {"path": audio_path},
lambda audio_path: {"path": audio_path, "bytes": None},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"path": None, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"bytes": open(audio_path, "rb").read()},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: {"path": audio_path, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": None, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read(), "sampling_rate": 16_000},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example_pcm(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio = Audio(sampling_rate=16_000)
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@require_sndfile
def test_audio_decode_example(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (202311,)
assert decoded_example["sampling_rate"] == 44100
with pytest.raises(RuntimeError):
Audio(decode=False).decode_example(audio_path)
@require_sndfile
def test_audio_resampling(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio(sampling_rate=16000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (73401,)
assert decoded_example["sampling_rate"] == 16000
@require_sndfile
def test_audio_decode_example_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (110592,)
assert decoded_example["sampling_rate"] == 44100
@require_sndfile
def test_audio_decode_example_opus(shared_datadir):
audio_path = str(shared_datadir / "test_audio_48000.opus")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (48000,)
assert decoded_example["sampling_rate"] == 48000
@pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio_input = {"path": audio_path, "sampling_rate": 16_000}
audio = Audio(sampling_rate=sampling_rate)
decoded_example = audio.decode_example(audio.encode_example(audio_input))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] is None
assert decoded_example["array"].shape == (16208 * sampling_rate // 16_000,)
assert decoded_example["sampling_rate"] == sampling_rate
@require_sndfile
def test_audio_resampling_mp3_different_sampling_rates(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio_path2 = str(shared_datadir / "test_audio_16000.mp3")
audio = Audio(sampling_rate=48000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (120373,)
assert decoded_example["sampling_rate"] == 48000
decoded_example = audio.decode_example(audio.encode_example(audio_path2))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path2
assert decoded_example["array"].shape == (122688,)
assert decoded_example["sampling_rate"] == 48000
@require_sndfile
def test_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_tar_wav(tar_wav_path):
audio_filename = "test_audio_44100.wav"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_wav_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_tar_mp3(tar_mp3_path):
audio_filename = "test_audio_44100.mp3"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_mp3_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (110592,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (110592,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (110592,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_with_none():
data = {"audio": [None]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"audio"}
assert isinstance(batch["audio"], list) and all(item is None for item in batch["audio"])
column = dset["audio"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"audio": [[None]]}
features = Features({"audio": Sequence(Audio())})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert all(i is None for i in item["audio"])
data = {"nested": [{"audio": None}]}
features = Features({"nested": {"audio": Audio()}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"audio"}
assert item["nested"]["audio"] is None
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@pytest.mark.parametrize(
"build_data",
[
lambda audio_path: {"audio": [audio_path]},
lambda audio_path: {"audio": [open(audio_path, "rb").read()]},
lambda audio_path: {"audio": [{"path": audio_path}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": None}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"path": None, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"bytes": open(audio_path, "rb").read()}]},
],
)
def test_dataset_cast_to_audio_features(shared_datadir, build_data):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = build_data(audio_path)
dset = Dataset.from_dict(data)
item = dset.cast(Features({"audio": Audio()}))[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
item = dset.cast_column("audio", Audio())[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
def test_dataset_concatenate_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
data1 = {"audio": [audio_path]}
dset1 = Dataset.from_dict(data1, features=Features({"audio": Audio()}))
data2 = {"audio": [{"bytes": open(audio_path, "rb").read()}]}
dset2 = Dataset.from_dict(data2, features=Features({"audio": Audio()}))
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert concatenated_dataset[0]["audio"]["array"].shape == dset1[0]["audio"]["array"].shape
assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
def test_dataset_concatenate_nested_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
features = Features({"list_of_structs_of_audios": [{"audio": Audio()}]})
data1 = {"list_of_structs_of_audios": [[{"audio": audio_path}]]}
dset1 = Dataset.from_dict(data1, features=features)
data2 = {"list_of_structs_of_audios": [[{"audio": {"bytes": open(audio_path, "rb").read()}}]]}
dset2 = Dataset.from_dict(data2, features=features)
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert (
concatenated_dataset[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset1[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
assert (
concatenated_dataset[1]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset2[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
@require_sndfile
def test_dataset_with_audio_feature_map_is_not_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
expected_audio = features.encode_batch(data)["audio"][0]
for item in dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello"}
def process_text(example):
example["text"] = example["text"] + " World!"
return example
processed_dset = dset.map(process_text)
for item in processed_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello World!"}
@require_sndfile
def test_dataset_with_audio_feature_map_is_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
def process_audio_sampling_rate_by_example(example):
example["double_sampling_rate"] = 2 * example["audio"]["sampling_rate"]
return example
decoded_dset = dset.map(process_audio_sampling_rate_by_example)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
def process_audio_sampling_rate_by_batch(batch):
double_sampling_rates = []
for audio in batch["audio"]:
double_sampling_rates.append(2 * audio["sampling_rate"])
batch["double_sampling_rate"] = double_sampling_rates
return batch
decoded_dset = dset.map(process_audio_sampling_rate_by_batch, batched=True)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
@require_sndfile
def test_formatted_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path, audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert item["audio"][0]["path"] == audio_path
assert item["audio"][0]["array"].shape == (202311,)
assert item["audio"][0]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@pytest.fixture
def jsonl_audio_dataset_path(shared_datadir, tmp_path_factory):
import json
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = [{"audio": audio_path, "text": "Hello world!"}]
path = str(tmp_path_factory.mktemp("data") / "audio_dataset.jsonl")
with open(path, "w") as f:
for item in data:
f.write(json.dumps(item) + "\n")
return path
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_with_audio_feature(streaming, jsonl_audio_dataset_path, shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data_files = jsonl_audio_dataset_path
features = Features({"audio": Audio(), "text": Value("string")})
dset = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming)
item = dset[0] if not streaming else next(iter(dset))
assert item.keys() == {"audio", "text"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
@require_sndfile
@pytest.mark.integration
def test_dataset_with_audio_feature_loaded_from_cache():
# load first time
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean")
# load from cache
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
assert isinstance(ds, Dataset)
def test_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
def assert_audio_example_undecoded(example):
assert example["audio"] == {"path": audio_path, "bytes": None}
dset.map(assert_audio_example_undecoded)
def assert_audio_batch_undecoded(batch):
for audio in batch["audio"]:
assert audio == {"path": audio_path, "bytes": None}
dset.map(assert_audio_batch_undecoded, batched=True)
def test_audio_embed_storage(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
example = {"bytes": None, "path": audio_path}
storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()}))
embedded_storage = Audio().embed_storage(storage)
embedded_example = embedded_storage.to_pylist()[0]
assert embedded_example == {"bytes": open(audio_path, "rb").read(), "path": "test_audio_44100.wav"}
| datasets/tests/features/test_audio.py/0 | {
"file_path": "datasets/tests/features/test_audio.py",
"repo_id": "datasets",
"token_count": 11528
} | 69 |
import os
import tempfile
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
from datasets import load_from_disk
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.features import ClassLabel, Features, Sequence, Value
from datasets.iterable_dataset import IterableDataset
from datasets.splits import NamedSplit
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_tf, require_torch
class DatasetDictTest(TestCase):
def _create_dummy_dataset(self, multiple_columns=False):
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
dset = Dataset.from_dict(data)
else:
dset = Dataset.from_dict(
{"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]}
)
return dset
def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict:
return DatasetDict(
{
"train": self._create_dummy_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_dataset(multiple_columns=multiple_columns),
}
)
def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset:
def gen():
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
for v1, v2 in zip(data["col_1"], data["col_2"]):
yield {"col_1": v1, "col_2": v2}
else:
for x in range(30):
yield {"filename": "my_name-train" + "_" + f"{x:03d}"}
return IterableDataset.from_generator(gen)
def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict:
return IterableDatasetDict(
{
"train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns),
}
)
def test_flatten(self):
dset_split = Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
)
dset = DatasetDict({"train": dset_split, "test": dset_split})
dset = dset.flatten()
self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]})
self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(
dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")})
)
del dset
def test_set_format_numpy(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="numpy", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.reset_format()
with dset.formatted_as(type="numpy", columns=["col_1"]):
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], None)
self.assertEqual(dset_split.format["format_kwargs"], {})
self.assertEqual(dset_split.format["columns"], dset_split.column_names)
self.assertEqual(dset_split.format["output_all_columns"], False)
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="numpy", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], np.str_)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
@require_torch
def test_set_format_torch(self):
import torch
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="torch", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="torch", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="torch")
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
del dset
@require_tf
def test_set_format_tf(self):
import tensorflow as tf
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="tensorflow", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3)
dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="tensorflow", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a")
del dset
def test_set_format_pandas(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="pandas", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 1)
self.assertIsInstance(dset_split[0], pd.DataFrame)
self.assertListEqual(list(dset_split[0].shape), [1, 1])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="pandas", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 2)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
def test_set_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_transform(transform=transform, columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], "custom")
self.assertEqual(len(dset_split[0].keys()), 1)
self.assertEqual(dset_split[0]["col_1"], "3")
self.assertEqual(dset_split[:2]["col_1"], ["3", "2"])
self.assertEqual(dset_split["col_1"][:2], ["3", "2"])
prev_format = dset[list(dset.keys())[0]].format
for dset_split in dset.values():
dset_split.set_format(**dset_split.format)
self.assertEqual(prev_format, dset_split.format)
dset.set_transform(transform=transform, columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].keys()), 2)
self.assertEqual(dset_split[0]["col_2"], "A")
del dset
def test_with_format(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_format("numpy", columns=["col_1"])
dset.set_format("numpy", columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_with_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_transform(transform, columns=["col_1"])
dset.set_transform(transform, columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_cast(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
features = dset["train"].features
features["col_1"] = Value("float64")
dset = dset.cast(features)
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertEqual(dset_split.features["col_1"], Value("float64"))
self.assertIsInstance(dset_split[0]["col_1"], float)
del dset
def test_remove_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.remove_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertListEqual(dset_split._format_columns, ["col_2"])
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
del dset
def test_rename_column(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"])
del dset
def test_select_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=[])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.select_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
dset = self._create_dummy_dataset_dict(multiple_columns=True)
for dset_split in dset.values():
dset_split._format_columns = ["col_1", "col_2"]
dset = dset.select_columns(column_names=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_1"])
self.assertListEqual(dset_split._format_columns, ["col_1"])
def test_map(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys()))
self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"])
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
mapped_dsets_2: DatasetDict = mapped_dsets_1.map(
lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys()))
self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"]))
del dsets, mapped_dsets_1, mapped_dsets_2
def test_iterable_map(self):
dsets = self._create_dummy_iterable_dataset_dict()
fn_kwargs = {"n": 3}
mapped_dsets: IterableDatasetDict = dsets.map(
lambda x, n: {"foo": [n] * len(x["filename"])},
batched=True,
fn_kwargs=fn_kwargs,
)
mapped_example = next(iter(mapped_dsets["train"]))
self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"]))
self.assertLessEqual(mapped_example["foo"], 3)
del dsets, mapped_dsets
def test_filter(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys()))
self.assertEqual(len(filtered_dsets_1["train"]), 10)
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
filtered_dsets_2: DatasetDict = filtered_dsets_1.filter(
lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys()))
self.assertEqual(len(filtered_dsets_2["train"]), 5)
filtered_dsets_3: DatasetDict = dsets.filter(
lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys()))
self.assertEqual(len(filtered_dsets_3["train"]), 10)
del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3
def test_iterable_filter(self):
dsets = self._create_dummy_iterable_dataset_dict()
example = next(iter(dsets["train"]))
fn_kwargs = {"n": 3}
filtered_dsets: IterableDatasetDict = dsets.filter(
lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs
)
filtered_example = next(iter(filtered_dsets["train"]))
self.assertListEqual(list(example.keys()), list(filtered_example.keys()))
self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3
del dsets, filtered_dsets
def test_sort(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
sorted_dsets_1: DatasetDict = dsets.sort("filename")
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]],
sorted(f"{x:03d}" for x in range(30)),
)
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
sorted_dsets_2: DatasetDict = sorted_dsets_1.sort(
"filename", indices_cache_file_names=indices_cache_file_names, reverse=True
)
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]],
sorted((f"{x:03d}" for x in range(30)), reverse=True),
)
del dsets, sorted_dsets_1, sorted_dsets_2
def test_shuffle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
seeds = {
"train": 1234,
"test": 1234,
}
dsets_shuffled = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"])
self.assertEqual(len(dsets_shuffled["train"]), 30)
self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028")
self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010")
self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")}))
self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")}))
# Reproducibility
indices_cache_file_names_2 = {
"train": os.path.join(tmp_dir, "train_2.arrow"),
"test": os.path.join(tmp_dir, "test_2.arrow"),
}
dsets_shuffled_2 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"])
seeds = {
"train": 1234,
"test": 1,
}
indices_cache_file_names_3 = {
"train": os.path.join(tmp_dir, "train_3.arrow"),
"test": os.path.join(tmp_dir, "test_3.arrow"),
}
dsets_shuffled_3 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False
)
self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"])
# other input types
dsets_shuffled_int = dsets.shuffle(42)
dsets_shuffled_alias = dsets.shuffle(seed=42)
dsets_shuffled_none = dsets.shuffle()
self.assertEqual(len(dsets_shuffled_int["train"]), 30)
self.assertEqual(len(dsets_shuffled_alias["train"]), 30)
self.assertEqual(len(dsets_shuffled_none["train"]), 30)
del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3
del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none
def test_flatten_indices(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
dsets_shuffled = dsets.shuffle(
seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertIsNotNone(dsets_shuffled["train"]._indices)
self.assertIsNotNone(dsets_shuffled["test"]._indices)
dsets_flat = dsets_shuffled.flatten_indices()
self.assertIsNone(dsets_flat["train"]._indices)
self.assertIsNone(dsets_flat["test"]._indices)
del dsets, dsets_shuffled, dsets_flat
def test_check_values_type(self):
dsets = self._create_dummy_dataset_dict()
dsets["bad_split"] = None
self.assertRaises(TypeError, dsets.map, lambda x: x)
self.assertRaises(TypeError, dsets.filter, lambda x: True)
self.assertRaises(TypeError, dsets.shuffle)
self.assertRaises(TypeError, dsets.sort, "filename")
del dsets
def test_serialization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
del reloaded_dsets
del dsets["test"]
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
del dsets, reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2})
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 3)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir, num_proc=2)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["train"].cache_files), 2)
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"].cache_files), 2)
del reloaded_dsets
def test_load_from_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
del dsets
dsets = load_from_disk(tmp_dir)
self.assertListEqual(sorted(dsets), ["test", "train"])
self.assertEqual(len(dsets["train"]), 30)
self.assertListEqual(dsets["train"].column_names, ["filename"])
self.assertEqual(len(dsets["test"]), 30)
self.assertListEqual(dsets["test"].column_names, ["filename"])
del dsets
def test_align_labels_with_mapping(self):
train_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
}
)
test_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]),
}
)
train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]}
test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]}
label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1}
id2label = {v: k for k, v in label2id.items()}
train_expected_labels = [2, 2, 1, 1, 0, 0]
test_expected_labels = [2, 2, 0, 0, 1, 1]
train_expected_label_names = [id2label[idx] for idx in train_expected_labels]
test_expected_label_names = [id2label[idx] for idx in test_expected_labels]
dsets = DatasetDict(
{
"train": Dataset.from_dict(train_data, features=train_features),
"test": Dataset.from_dict(test_data, features=test_features),
}
)
dsets = dsets.align_labels_with_mapping(label2id, "input_labels")
self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"])
self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"])
train_aligned_label_names = [
dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"]
]
test_aligned_label_names = [
dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"]
]
self.assertListEqual(train_expected_label_names, train_aligned_label_names)
self.assertListEqual(test_expected_label_names, test_aligned_label_names)
def test_dummy_datasetdict_serialize_fs(mockfs):
dataset_dict = DatasetDict(
{
"train": Dataset.from_dict({"a": range(30)}),
"test": Dataset.from_dict({"a": range(10)}),
}
)
dataset_path = "mock://my_dataset"
dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options)
assert mockfs.isdir(dataset_path)
assert mockfs.glob(dataset_path + "/*")
reloaded = dataset_dict.load_from_disk(dataset_path, storage_options=mockfs.storage_options)
assert list(reloaded) == list(dataset_dict)
for k in dataset_dict:
assert reloaded[k].features == dataset_dict[k].features
assert reloaded[k].to_dict() == dataset_dict[k].to_dict()
def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_csv_split(split, csv_path, tmp_path):
if split:
path = {split: csv_path}
else:
split = "train"
path = {"train": csv_path, "test": csv_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_csv(path, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path):
if split:
path = {split: jsonl_path}
else:
split = "train"
path = {"train": jsonl_path, "test": jsonl_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_json(path, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path):
if split:
path = {split: parquet_path}
else:
split = "train"
path = {"train": parquet_path, "test": parquet_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = DatasetDict.from_text(path, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
| datasets/tests/test_dataset_dict.py/0 | {
"file_path": "datasets/tests/test_dataset_dict.py",
"repo_id": "datasets",
"token_count": 17402
} | 70 |
import pickle
from copy import deepcopy
from itertools import chain, islice
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pytest
from datasets import Dataset, load_dataset
from datasets.combine import concatenate_datasets, interleave_datasets
from datasets.features import (
ClassLabel,
Features,
Image,
Value,
)
from datasets.formatting import get_format_type_from_alias
from datasets.info import DatasetInfo
from datasets.iterable_dataset import (
ArrowExamplesIterable,
BufferShuffledExamplesIterable,
CyclingMultiSourcesExamplesIterable,
ExamplesIterable,
FilteredExamplesIterable,
FormattingConfig,
HorizontallyConcatenatedMultiSourcesExamplesIterable,
IterableDataset,
MappedExamplesIterable,
RandomlyCyclingMultiSourcesExamplesIterable,
SelectColumnsIterable,
ShuffledDataSourcesArrowExamplesIterable,
ShuffledDataSourcesExamplesIterable,
ShufflingConfig,
SkipExamplesIterable,
StepExamplesIterable,
TakeExamplesIterable,
TypedExamplesIterable,
VerticallyConcatenatedMultiSourcesExamplesIterable,
_BaseExamplesIterable,
_batch_arrow_tables,
_batch_to_examples,
_convert_to_arrow,
_examples_to_batch,
)
from .utils import (
assert_arrow_memory_doesnt_increase,
is_rng_equal,
require_dill_gt_0_3_2,
require_not_windows,
require_pyspark,
require_tf,
require_torch,
)
DEFAULT_N_EXAMPLES = 20
DEFAULT_BATCH_SIZE = 4
DEFAULT_FILEPATH = "file.txt"
SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script
def generate_examples_fn(**kwargs):
kwargs = kwargs.copy()
n = kwargs.pop("n", DEFAULT_N_EXAMPLES)
filepaths = kwargs.pop("filepaths", None)
for filepath in filepaths or [DEFAULT_FILEPATH]:
if filepaths is not None:
kwargs["filepath"] = filepath
for i in range(n):
yield f"{filepath}_{i}", {"id": i, **kwargs}
def generate_tables_fn(**kwargs):
kwargs = kwargs.copy()
n = kwargs.pop("n", DEFAULT_N_EXAMPLES)
batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE)
filepaths = kwargs.pop("filepaths", None)
for filepath in filepaths or [DEFAULT_FILEPATH]:
buffer = []
batch_idx = 0
if filepaths is not None:
kwargs["filepath"] = filepath
for i in range(n):
buffer.append({"id": i, **kwargs})
if len(buffer) == batch_size:
yield f"{filepath}_{batch_idx}", pa.Table.from_pylist(buffer)
buffer = []
batch_idx += 1
yield batch_idx, pa.Table.from_pylist(buffer)
@pytest.fixture
def dataset():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
@pytest.fixture
def dataset_with_several_columns():
ex_iterable = ExamplesIterable(
generate_examples_fn,
{"filepath": ["data0.txt", "data1.txt", "data2.txt"], "metadata": {"sources": ["https://foo.bar"]}},
)
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
@pytest.fixture
def arrow_file(tmp_path_factory, dataset: IterableDataset):
filename = str(tmp_path_factory.mktemp("data") / "file.arrow")
Dataset.from_generator(dataset.__iter__).map(cache_file_name=filename)
return filename
################################
#
# Utilities tests
#
################################
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_convert_to_arrow(batch_size, drop_last_batch):
examples = [{"foo": i} for i in range(10)]
full_table = pa.Table.from_pylist(examples)
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
_convert_to_arrow(
list(enumerate(examples)),
batch_size=batch_size,
drop_last_batch=drop_last_batch,
)
)
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for _, subtable in subtables)
else:
assert all(len(subtable) == batch_size for _, subtable in subtables[:-1])
assert len(subtables[-1][1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables([subtable for _, subtable in subtables])
assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
@pytest.mark.parametrize(
"tables",
[
[pa.table({"foo": range(10)})],
[pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})],
[pa.table({"foo": [i]}) for i in range(10)],
],
)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_batch_arrow_tables(tables, batch_size, drop_last_batch):
full_table = pa.concat_tables(tables)
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
_batch_arrow_tables(list(enumerate(tables)), batch_size=batch_size, drop_last_batch=drop_last_batch)
)
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for _, subtable in subtables)
else:
assert all(len(subtable) == batch_size for _, subtable in subtables[:-1])
assert len(subtables[-1][1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables([subtable for _, subtable in subtables])
assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
################################
#
# _BaseExampleIterable tests
#
################################
def test_examples_iterable():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
expected = list(generate_examples_fn())
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert ex_iterable.iter_arrow is None
def test_examples_iterable_with_kwargs():
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"})
expected = list(generate_examples_fn(filepaths=["0.txt", "1.txt"], split="train"))
assert list(ex_iterable) == expected
assert all("split" in ex for _, ex in ex_iterable)
assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"]
def test_examples_iterable_shuffle_data_sources():
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]})
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40))
expected = list(generate_examples_fn(filepaths=["1.txt", "0.txt"])) # shuffle the filepaths
assert list(ex_iterable) == expected
def test_examples_iterable_shuffle_shards_and_metadata():
def gen(filepaths, all_metadata):
for i, (filepath, metadata) in enumerate(zip(filepaths, all_metadata)):
yield i, {"filepath": filepath, "metadata": metadata}
ex_iterable = ExamplesIterable(
gen,
{
"filepaths": [f"{i}.txt" for i in range(100)],
"all_metadata": [{"id": str(i)} for i in range(100)],
},
)
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(42))
out = list(ex_iterable)
filepaths_ids = [x["filepath"].split(".")[0] for _, x in out]
metadata_ids = [x["metadata"]["id"] for _, x in out]
assert filepaths_ids == metadata_ids, "entangled lists of shards/metadata should be shuffled the same way"
def test_arrow_examples_iterable():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {})
expected = sum([pa_table.to_pylist() for _, pa_table in generate_tables_fn()], [])
assert next(iter(ex_iterable))[1] == expected[0]
assert [example for _, example in ex_iterable] == expected
expected = list(generate_tables_fn())
assert list(ex_iterable.iter_arrow()) == expected
def test_arrow_examples_iterable_with_kwargs():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"})
expected = sum(
[pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")], []
)
assert [example for _, example in ex_iterable] == expected
assert all("split" in ex for _, ex in ex_iterable)
assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"]
expected = list(generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train"))
assert list(ex_iterable.iter_arrow()) == expected
def test_arrow_examples_iterable_shuffle_data_sources():
ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"]})
ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40))
expected = sum(
[pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["1.txt", "0.txt"])], []
) # shuffle the filepaths
assert [example for _, example in ex_iterable] == expected
expected = list(generate_tables_fn(filepaths=["1.txt", "0.txt"]))
assert list(ex_iterable.iter_arrow()) == expected
@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456])
def test_buffer_shuffled_examples_iterable(seed):
n, buffer_size = 100, 30
generator = np.random.default_rng(seed)
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = BufferShuffledExamplesIterable(base_ex_iterable, buffer_size=buffer_size, generator=generator)
rng = deepcopy(generator)
expected_indices_used_for_shuffling = list(
islice(BufferShuffledExamplesIterable._iter_random_indices(rng, buffer_size=buffer_size), n - buffer_size)
)
# indices to pick in the shuffle buffer should all be in the right range
assert all(0 <= index_to_pick < buffer_size for index_to_pick in expected_indices_used_for_shuffling)
# it should be random indices
assert expected_indices_used_for_shuffling != list(range(buffer_size))
# The final order of examples is the result of a shuffle buffer.
all_examples = list(generate_examples_fn(n=n))
# We create a buffer and we pick random examples from it.
buffer, rest = all_examples[:buffer_size], all_examples[buffer_size:]
expected = []
for i, index_to_pick in enumerate(expected_indices_used_for_shuffling):
expected.append(buffer[index_to_pick])
# The picked examples are directly replaced by the next examples from the iterable.
buffer[index_to_pick] = rest.pop(0)
# Once we have reached the end of the iterable, we shuffle the buffer and return the remaining examples.
rng.shuffle(buffer)
expected += buffer
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert sorted(ex_iterable) == sorted(all_examples)
def test_cycling_multi_sources_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = CyclingMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = list(chain(*zip(generate_examples_fn(text="foo"), generate_examples_fn(text="bar"))))
# The cycling stops as soon as one iterable is out of examples (here ex_iterable1), so the last sample from ex_iterable2 is unecessary
expected = expected[:-1]
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
assert all((x["id"], x["text"]) == (i // 2, "bar" if i % 2 else "foo") for i, (_, x) in enumerate(ex_iterable))
@pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)])
def test_randomly_cycling_multi_sources_examples_iterable(probabilities):
seed = 42
generator = np.random.default_rng(seed)
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"})
ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
[ex_iterable1, ex_iterable2], generator=generator, probabilities=probabilities
)
# The source used randomly changes at each example. It stops when one of the iterators is empty.
rng = deepcopy(generator)
iterators = (generate_examples_fn(text="foo"), generate_examples_fn(text="bar"))
indices_iterator = RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(
rng, len(iterators), p=probabilities
)
expected = []
lengths = [len(list(ex_iterable1)), len(list(ex_iterable2))]
for i in indices_iterator:
if lengths[0] == 0 or lengths[1] == 0:
break
for key, example in iterators[i]:
expected.append((key, example))
lengths[i] -= 1
break
else:
break
assert next(iter(ex_iterable)) == expected[0]
assert list(ex_iterable) == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id
(3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0
(3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [{**x, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id
(3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0
(3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_drop_last_batch(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
is_empty = False
if batched is False:
# `drop_last_batch` has no effect here
expected = [{**x, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
if len(examples) < batch_size: # ignore last batch
break
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size]
if all_examples:
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
else:
is_empty = True
if not is_empty:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
else:
with pytest.raises(StopIteration):
next(iter(ex_iterable))
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x, index: {"id+idx": x["id"] + index}, False, None), # add the index to the id
(
25,
lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]},
True,
10,
), # add the index to the id
(5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, None), # same with bs=None
(5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, -1), # same with bs<=0
],
)
def test_mapped_examples_iterable_with_indices(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [{**x, **func(x, idx)} for idx, x in enumerate(all_examples)]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
indices = list(range(batch_offset, batch_offset + len(examples)))
transformed_batch = func(batch, indices)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, remove_columns",
[
(3, lambda x: {"id+1": x["id"] + 1}, False, None, ["extra_column"]), # just add 1 to the id
(25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10, ["extra_column"]), # same with bs=10
(
50,
lambda x: {"foo": ["bar"] * np.random.default_rng(x["id"][0]).integers(0, 10)},
True,
8,
["extra_column", "id"],
), # make a duplicate of each example
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None, ["extra_column"]), # same with bs=None
(5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1, ["extra_column"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_remove_columns(n, func, batched, batch_size, remove_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns]
if batched is False:
expected = [{**{k: v for k, v in x.items() if k not in columns_to_remove}, **func(x)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = {k: v for k, v in _examples_to_batch(all_examples).items() if k not in columns_to_remove}
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, fn_kwargs",
[
(3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, None),
(3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, {"y": 3}),
(25, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, 10, {"y": 3}),
(5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, None, {"y": 3}), # same with bs=None
(5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, -1, {"y": 3}), # same with bs<=0
],
)
def test_mapped_examples_iterable_fn_kwargs(n, func, batched, batch_size, fn_kwargs):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if fn_kwargs is None:
fn_kwargs = {}
if batched is False:
expected = [{**x, **func(x, **fn_kwargs)} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(batch, **fn_kwargs)
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: {"id+1": id_ + 1}, False, None, ["id"]), # just add 1 to the id
(25, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, 10, ["id"]), # same with bs=10
(5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, None, ["id"]), # same with bs=None
(5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, -1, ["id"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_input_columns(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [{**x, **func(*[x[col] for col in columns_to_input])} for x in all_examples]
else:
# For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
transformed_batch = func(*[batch[col] for col in columns_to_input])
all_transformed_examples.extend(_batch_to_examples(transformed_batch))
expected = _examples_to_batch(all_examples)
expected.update(_examples_to_batch(all_transformed_examples))
expected = list(_batch_to_examples(expected))
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id
(3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0
(3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example
],
)
def test_mapped_examples_iterable_drop_last_batch_and_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
drop_last_batch=True,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
is_empty = False
if batched is False:
# `drop_last_batch` has no effect here
expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples]
else:
all_transformed_examples = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
if len(examples) < batch_size: # ignore last batch
break
batch = pa.Table.from_pylist(examples)
out = func(batch)
all_transformed_examples.extend(
out.to_pylist()
) # we don't merge with input since they're arrow tables and not dictionaries
all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size]
if all_examples:
expected = all_transformed_examples
else:
is_empty = True
if not is_empty:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
else:
with pytest.raises(StopIteration):
next(iter(ex_iterable))
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(
3,
lambda t, index: t.append_column("id+idx", pc.add(t["id"], index)),
False,
None,
), # add the index to the id
(
25,
lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)),
True,
10,
), # add the index to the id
(5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, None), # same with bs=None
(5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, -1), # same with bs<=0
],
)
def test_mapped_examples_iterable_with_indices_and_arrow_format(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
with_indices=True,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [func(pa.Table.from_pylist([x]), i).to_pylist()[0] for i, x in enumerate(all_examples)]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch, list(range(batch_offset, batch_offset + len(batch)))).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, remove_columns",
[
(
3,
lambda t: t.append_column("id+1", pc.add(t["id"], 1)),
False,
None,
["extra_column"],
), # just add 1 to the id
(25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10, ["extra_column"]), # same with bs=10
(
50,
lambda t: pa.table({"foo": ["bar"] * np.random.default_rng(t["id"][0].as_py()).integers(0, 10)}),
True,
8,
["extra_column", "id"],
), # make a duplicate of each example
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None, ["extra_column"]), # same with bs=None
(5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1, ["extra_column"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_remove_columns_arrow_format(n, func, batched, batch_size, remove_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
remove_columns=remove_columns,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns]
if batched is False:
expected = [
{**{k: v for k, v in func(pa.Table.from_pylist([x])).to_pylist()[0].items() if k not in columns_to_remove}}
for x in all_examples
]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(
[{k: v for k, v in x.items() if k not in columns_to_remove} for x in func(batch).to_pylist()]
)
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, fn_kwargs",
[
(3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, None),
(3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, {"y": 3}),
(25, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, 10, {"y": 3}),
(5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, None, {"y": 3}), # same with bs=None
(5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, -1, {"y": 3}), # same with bs<=0
],
)
def test_mapped_examples_iterable_fn_kwargs_and_arrow_format(n, func, batched, batch_size, fn_kwargs):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if fn_kwargs is None:
fn_kwargs = {}
if batched is False:
expected = [func(pa.Table.from_pylist([x]), **fn_kwargs).to_pylist()[0] for x in all_examples]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(batch, **fn_kwargs).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: pa.table({"id+1": pc.add(id_, 1)}), False, None, ["id"]), # just add 1 to the id
(25, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, 10, ["id"]), # same with bs=10
(5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, None, ["id"]), # same with bs=None
(5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, -1, ["id"]), # same with bs<=0
],
)
def test_mapped_examples_iterable_input_columns_and_arrow_format(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = MappedExamplesIterable(
base_ex_iterable,
func,
batched=batched,
batch_size=batch_size,
input_columns=input_columns,
formatting=FormattingConfig(format_type="arrow"),
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [
func(*[pa.Table.from_pylist([x])[col] for col in columns_to_input]).to_pylist()[0] for x in all_examples
]
else:
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = pa.Table.from_pylist(examples)
expected.extend(func(*[batch[col] for col in columns_to_input]).to_pylist())
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x: x["id"] % 2 == 0, False, None), # keep even number
(3, lambda x: [x["id"][0] % 2 == 0], True, 1), # same with bs=1
(25, lambda x: [i % 2 == 0 for i in x["id"]], True, 10), # same with bs=10
(5, lambda x: [i % 2 == 0 for i in x["id"]], True, None), # same with bs=None
(5, lambda x: [i % 2 == 0 for i in x["id"]], True, -1), # same with bs<=0
(3, lambda x: False, False, None), # return 0 examples
(3, lambda x: [False] * len(x["id"]), True, 10), # same with bs=10
],
)
def test_filtered_examples_iterable(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [x for x in all_examples if func(x)]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
mask = func(batch)
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
if expected:
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size",
[
(3, lambda x, index: index % 2 == 0, False, None), # keep even number
(25, lambda x, indices: [idx % 2 == 0 for idx in indices], True, 10), # same with bs=10
(5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, None), # same with bs=None
(5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, -1), # same with bs<=0
],
)
def test_filtered_examples_iterable_with_indices(n, func, batched, batch_size):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
if batched is False:
expected = [x for idx, x in enumerate(all_examples) if func(x, idx)]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
indices = list(range(batch_offset, batch_offset + len(examples)))
mask = func(batch, indices)
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
@pytest.mark.parametrize(
"n, func, batched, batch_size, input_columns",
[
(3, lambda id_: id_ % 2 == 0, False, None, ["id"]), # keep even number
(25, lambda ids_: [i % 2 == 0 for i in ids_], True, 10, ["id"]), # same with bs=10
(3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None
(3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None
],
)
def test_filtered_examples_iterable_input_columns(n, func, batched, batch_size, input_columns):
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
ex_iterable = FilteredExamplesIterable(
base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns
)
all_examples = [x for _, x in generate_examples_fn(n=n)]
columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
if batched is False:
expected = [x for x in all_examples if func(*[x[col] for col in columns_to_input])]
else:
# For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
expected = []
# If batch_size is None or <=0, we use the whole dataset as a single batch
if batch_size is None or batch_size <= 0:
batch_size = len(all_examples)
for batch_offset in range(0, len(all_examples), batch_size):
examples = all_examples[batch_offset : batch_offset + batch_size]
batch = _examples_to_batch(examples)
mask = func(*[batch[col] for col in columns_to_input])
expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
assert next(iter(ex_iterable))[1] == expected[0]
assert [x for _, x in ex_iterable] == expected
def test_skip_examples_iterable():
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
skip_ex_iterable = SkipExamplesIterable(base_ex_iterable, n=count)
expected = list(generate_examples_fn(n=total))[count:]
assert list(skip_ex_iterable) == expected
assert (
skip_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is skip_ex_iterable
), "skip examples makes the shards order fixed"
def test_take_examples_iterable():
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
take_ex_iterable = TakeExamplesIterable(base_ex_iterable, n=count)
expected = list(generate_examples_fn(n=total))[:count]
assert list(take_ex_iterable) == expected
assert (
take_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is take_ex_iterable
), "skip examples makes the shards order fixed"
def test_vertically_concatenated_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2]
assert [x for _, x in concatenated_ex_iterable] == expected
def test_vertically_concatenated_examples_iterable_with_different_columns():
# having different columns is supported
# Though iterable datasets fill the missing data with nulls
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2]
assert [x for _, x in concatenated_ex_iterable] == expected
def test_vertically_concatenated_examples_iterable_shuffle_data_sources():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
rng = np.random.default_rng(42)
shuffled_ex_iterable = concatenated_ex_iterable.shuffle_data_sources(rng)
# make sure the list of examples iterables is shuffled, and each examples iterable is shuffled
expected = [x for _, x in ex_iterable2.shuffle_data_sources(rng)] + [
x for _, x in ex_iterable1.shuffle_data_sources(rng)
]
assert [x for _, x in shuffled_ex_iterable] == expected
def test_horizontally_concatenated_examples_iterable():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
with pytest.raises(ValueError): # column "id" is duplicated -> raise an error
list(concatenated_ex_iterable)
ex_iterable2 = MappedExamplesIterable(ex_iterable2, lambda x: x, remove_columns=["id"])
concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2])
expected = [{**x, **y} for (_, x), (_, y) in zip(ex_iterable1, ex_iterable2)]
assert [x for _, x in concatenated_ex_iterable] == expected
assert (
concatenated_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is concatenated_ex_iterable
), "horizontally concatenated examples makes the shards order fixed"
@pytest.mark.parametrize(
"ex_iterable",
[
ExamplesIterable(generate_examples_fn, {}),
ShuffledDataSourcesExamplesIterable(generate_examples_fn, {}, np.random.default_rng(42)),
SelectColumnsIterable(ExamplesIterable(generate_examples_fn, {}), ["id"]),
StepExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 2, 0),
CyclingMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
VerticallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
HorizontallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]),
RandomlyCyclingMultiSourcesExamplesIterable(
[ExamplesIterable(generate_examples_fn, {})], np.random.default_rng(42)
),
MappedExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: x),
MappedExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: x),
FilteredExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: True),
FilteredExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: True),
BufferShuffledExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10, np.random.default_rng(42)),
SkipExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10),
TakeExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10),
TypedExamplesIterable(
ExamplesIterable(generate_examples_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={}
),
],
)
def test_no_iter_arrow(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is None
@pytest.mark.parametrize(
"ex_iterable",
[
ArrowExamplesIterable(generate_tables_fn, {}),
ShuffledDataSourcesArrowExamplesIterable(generate_tables_fn, {}, np.random.default_rng(42)),
SelectColumnsIterable(ArrowExamplesIterable(generate_tables_fn, {}), ["id"]),
# StepExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 2, 0), # not implemented
# CyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented
VerticallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]),
# HorizontallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented
# RandomlyCyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})], np.random.default_rng(42)), # not implemented
MappedExamplesIterable(
ExamplesIterable(generate_examples_fn, {}), lambda t: t, formatting=FormattingConfig(format_type="arrow")
),
MappedExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}),
lambda t: t,
formatting=FormattingConfig(format_type="arrow"),
),
FilteredExamplesIterable(
ExamplesIterable(generate_examples_fn, {}),
lambda t: True,
formatting=FormattingConfig(format_type="arrow"),
),
FilteredExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}),
lambda t: True,
formatting=FormattingConfig(format_type="arrow"),
),
# BufferShuffledExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10, np.random.default_rng(42)), # not implemented
# SkipExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented
# TakeExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented
TypedExamplesIterable(
ArrowExamplesIterable(generate_tables_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={}
),
],
)
def test_iter_arrow(ex_iterable: _BaseExamplesIterable):
assert ex_iterable.iter_arrow is not None
key, pa_table = next(ex_iterable.iter_arrow())
assert isinstance(pa_table, pa.Table)
############################
#
# IterableDataset tests
#
############################
def test_iterable_dataset():
dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {}))
expected = [x for _, x in generate_examples_fn()]
assert next(iter(dataset)) == expected[0]
assert list(dataset) == expected
def test_iterable_dataset_from_generator():
data = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
def gen():
yield from data
dataset = IterableDataset.from_generator(gen)
assert isinstance(dataset, IterableDataset)
assert list(dataset) == data
def test_iterable_dataset_from_generator_with_shards():
def gen(shard_names):
for shard_name in shard_names:
for i in range(10):
yield {"shard_name": shard_name, "i": i}
shard_names = [f"data{shard_idx}.txt" for shard_idx in range(4)]
dataset = IterableDataset.from_generator(gen, gen_kwargs={"shard_names": shard_names})
assert isinstance(dataset, IterableDataset)
assert dataset.n_shards == len(shard_names)
def test_iterable_dataset_from_file(dataset: IterableDataset, arrow_file: str):
with assert_arrow_memory_doesnt_increase():
dataset_from_file = IterableDataset.from_file(arrow_file)
expected_features = dataset._resolve_features().features
assert dataset_from_file.features.type == expected_features.type
assert dataset_from_file.features == expected_features
assert isinstance(dataset_from_file, IterableDataset)
assert list(dataset_from_file) == list(dataset)
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_streaming():
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [
("0", 0, 0.0),
("1", 1, 1.0),
("2", 2, 2.0),
("3", 3, 3.0),
]
df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float")
dataset = IterableDataset.from_spark(df)
assert isinstance(dataset, IterableDataset)
results = []
for ex in dataset:
results.append(ex)
assert results == [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_streaming_features():
import PIL.Image
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())]
df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>")
features = Features({"idx": Value("int64"), "image": Image()})
dataset = IterableDataset.from_spark(
df,
features=features,
)
assert isinstance(dataset, IterableDataset)
results = []
for ex in dataset:
results.append(ex)
assert len(results) == 1
isinstance(results[0]["image"], PIL.Image.Image)
@require_torch
def test_iterable_dataset_torch_integration():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
import torch.utils.data
assert isinstance(dataset, torch.utils.data.IterableDataset)
assert isinstance(dataset, IterableDataset)
assert dataset._ex_iterable is ex_iterable
@require_torch
def test_iterable_dataset_torch_picklable():
import pickle
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable, formatting=FormattingConfig(format_type="torch"))
reloaded_dataset = pickle.loads(pickle.dumps(dataset))
import torch.utils.data
assert isinstance(reloaded_dataset, IterableDataset)
assert isinstance(reloaded_dataset, torch.utils.data.IterableDataset)
assert reloaded_dataset._formatting.format_type == "torch"
assert len(list(dataset)) == len(list(reloaded_dataset))
@require_torch
def test_iterable_dataset_with_format_torch():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
from torch.utils.data import DataLoader
dataloader = DataLoader(dataset)
assert len(list(dataloader)) == len(list(ex_iterable))
@require_torch
def test_iterable_dataset_torch_dataloader_parallel():
from torch.utils.data import DataLoader
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
dataloader = DataLoader(dataset, num_workers=2, batch_size=None)
result = list(dataloader)
expected = [example for _, example in ex_iterable]
assert len(result) == len(expected)
assert {str(x) for x in result} == {str(x) for x in expected}
@require_torch
@pytest.mark.filterwarnings("ignore:This DataLoader will create:UserWarning")
@pytest.mark.parametrize("n_shards, num_workers", [(2, 1), (2, 2), (3, 2), (2, 3)])
def test_sharded_iterable_dataset_torch_dataloader_parallel(n_shards, num_workers):
from torch.utils.data import DataLoader
ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}.txt" for i in range(n_shards)]})
dataset = IterableDataset(ex_iterable)
dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers)
result = list(dataloader)
expected = [example for _, example in ex_iterable]
assert len(result) == len(expected)
assert {str(x) for x in result} == {str(x) for x in expected}
@require_torch
@pytest.mark.integration
@pytest.mark.parametrize("num_workers", [1, 2])
def test_iterable_dataset_from_hub_torch_dataloader_parallel(num_workers, tmp_path):
from torch.utils.data import DataLoader
dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=str(tmp_path), streaming=True, split="train")
dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers)
result = list(dataloader)
assert len(result) == 2
@pytest.mark.parametrize("batch_size", [4, 5])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_iterable_dataset_iter_batch(batch_size, drop_last_batch):
n = 25
dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {"n": n}))
all_examples = [ex for _, ex in generate_examples_fn(n=n)]
expected = []
for i in range(0, len(all_examples), batch_size):
if len(all_examples[i : i + batch_size]) < batch_size and drop_last_batch:
continue
expected.append(_examples_to_batch(all_examples[i : i + batch_size]))
assert next(iter(dataset.iter(batch_size, drop_last_batch=drop_last_batch))) == expected[0]
assert list(dataset.iter(batch_size, drop_last_batch=drop_last_batch)) == expected
def test_iterable_dataset_info():
info = DatasetInfo(description="desc", citation="@article{}", size_in_bytes=42)
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable, info=info)
assert dataset.info == info
assert dataset.description == info.description
assert dataset.citation == info.citation
assert dataset.size_in_bytes == info.size_in_bytes
def test_iterable_dataset_set_epoch(dataset: IterableDataset):
assert dataset._epoch == 0
dataset.set_epoch(42)
assert dataset._epoch == 42
@pytest.mark.parametrize("seed", [None, 42, 1337])
@pytest.mark.parametrize("epoch", [None, 0, 1, 10])
def test_iterable_dataset_set_epoch_of_shuffled_dataset(dataset: IterableDataset, seed, epoch):
buffer_size = 10
shuffled_dataset = dataset.shuffle(seed, buffer_size=buffer_size)
base_generator = shuffled_dataset._shuffling.generator
if epoch is not None:
shuffled_dataset.set_epoch(epoch)
effective_generator = shuffled_dataset._effective_generator()
assert effective_generator is not None
if epoch is None or epoch == 0:
assert is_rng_equal(base_generator, shuffled_dataset._effective_generator())
else:
assert not is_rng_equal(base_generator, shuffled_dataset._effective_generator())
effective_seed = deepcopy(base_generator).integers(0, 1 << 63) - epoch
assert is_rng_equal(np.random.default_rng(effective_seed), shuffled_dataset._effective_generator())
def test_iterable_dataset_map(
dataset: IterableDataset,
):
func = lambda x: {"id+1": x["id"] + 1} # noqa: E731
mapped_dataset = dataset.map(func)
assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable)
assert mapped_dataset._ex_iterable.function is func
assert mapped_dataset._ex_iterable.batched is False
assert next(iter(mapped_dataset)) == {**next(iter(dataset)), **func(next(iter(generate_examples_fn()))[1])}
def test_iterable_dataset_map_batched(
dataset: IterableDataset,
):
func = lambda x: {"id+1": [i + 1 for i in x["id"]]} # noqa: E731
batch_size = 3
dataset = dataset.map(func, batched=True, batch_size=batch_size)
assert isinstance(dataset._ex_iterable, MappedExamplesIterable)
assert dataset._ex_iterable.function is func
assert dataset._ex_iterable.batch_size == batch_size
assert next(iter(dataset)) == {"id": 0, "id+1": 1}
def test_iterable_dataset_map_complex_features(
dataset: IterableDataset,
):
# https://github.com/huggingface/datasets/issues/3505
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"})
features = Features(
{
"id": Value("int64"),
"label": Value("string"),
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
dataset = dataset.cast_column("label", ClassLabel(names=["negative", "positive"]))
dataset = dataset.map(lambda x: {"id+1": x["id"] + 1, **x})
assert isinstance(dataset._ex_iterable, MappedExamplesIterable)
features["label"] = ClassLabel(names=["negative", "positive"])
assert [{k: v for k, v in ex.items() if k != "id+1"} for ex in dataset] == [
features.encode_example(ex) for _, ex in ex_iterable
]
def test_iterable_dataset_map_with_features(dataset: IterableDataset) -> None:
# https://github.com/huggingface/datasets/issues/3888
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"})
features_before_map = Features(
{
"id": Value("int64"),
"label": Value("string"),
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features_before_map))
assert dataset.info.features is not None
assert dataset.info.features == features_before_map
features_after_map = Features(
{
"id": Value("int64"),
"label": Value("string"),
"target": Value("string"),
}
)
dataset = dataset.map(lambda x: {"target": x["label"]}, features=features_after_map)
assert dataset.info.features is not None
assert dataset.info.features == features_after_map
def test_iterable_dataset_map_with_fn_kwargs(dataset: IterableDataset) -> None:
fn_kwargs = {"y": 1}
mapped_dataset = dataset.map(lambda x, y: {"id+y": x["id"] + y}, fn_kwargs=fn_kwargs)
assert mapped_dataset._ex_iterable.batched is False
assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1}
batch_size = 3
mapped_dataset = dataset.map(
lambda x, y: {"id+y": [i + y for i in x["id"]]}, batched=True, batch_size=batch_size, fn_kwargs=fn_kwargs
)
assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable)
assert mapped_dataset._ex_iterable.batch_size == batch_size
assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1}
def test_iterable_dataset_filter(dataset: IterableDataset) -> None:
fn_kwargs = {"y": 1}
filtered_dataset = dataset.filter(lambda x, y: x["id"] == y, fn_kwargs=fn_kwargs)
assert filtered_dataset._ex_iterable.batched is False
assert next(iter(filtered_dataset)) == {"id": 1}
@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456])
@pytest.mark.parametrize("epoch", [None, 0, 1])
def test_iterable_dataset_shuffle(dataset: IterableDataset, seed, epoch):
buffer_size = 3
dataset = deepcopy(dataset)
dataset._ex_iterable.kwargs["filepaths"] = ["0.txt", "1.txt"]
dataset = dataset.shuffle(seed, buffer_size=buffer_size)
assert isinstance(dataset._shuffling, ShufflingConfig)
assert isinstance(dataset._shuffling.generator, np.random.Generator)
assert is_rng_equal(dataset._shuffling.generator, np.random.default_rng(seed))
# Effective seed is sum of seed and epoch
if epoch is None or epoch == 0:
effective_seed = seed
else:
dataset.set_epoch(epoch)
effective_seed = np.random.default_rng(seed).integers(0, 1 << 63) - epoch
# Shuffling adds a shuffle buffer
expected_first_example_index = next(
iter(BufferShuffledExamplesIterable._iter_random_indices(np.random.default_rng(effective_seed), buffer_size))
)
assert isinstance(dataset._ex_iterable, BufferShuffledExamplesIterable)
# It also shuffles the underlying examples iterable
expected_ex_iterable = ExamplesIterable(
generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]}
).shuffle_data_sources(np.random.default_rng(effective_seed))
assert isinstance(dataset._ex_iterable.ex_iterable, ExamplesIterable)
assert next(iter(dataset)) == list(islice(expected_ex_iterable, expected_first_example_index + 1))[-1][1]
@pytest.mark.parametrize(
"features",
[
None,
Features(
{
"id": Value("int64"),
"label": Value("int64"),
}
),
Features(
{
"id": Value("int64"),
"label": ClassLabel(names=["negative", "positive"]),
}
),
],
)
def test_iterable_dataset_features(features):
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
if features:
expected = [features.encode_example(x) for _, x in ex_iterable]
else:
expected = [x for _, x in ex_iterable]
assert list(dataset) == expected
def test_iterable_dataset_features_cast_to_python():
ex_iterable = ExamplesIterable(
generate_examples_fn, {"timestamp": pd.Timestamp(2020, 1, 1), "array": np.ones(5), "n": 1}
)
features = Features(
{
"id": Value("int64"),
"timestamp": Value("timestamp[us]"),
"array": [Value("int64")],
}
)
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
assert list(dataset) == [{"timestamp": pd.Timestamp(2020, 1, 1).to_pydatetime(), "array": [1] * 5, "id": 0}]
@pytest.mark.parametrize("format_type", [None, "torch", "python", "tf", "tensorflow", "np", "numpy", "jax"])
def test_iterable_dataset_with_format(dataset: IterableDataset, format_type):
formatted_dataset = dataset.with_format(format_type)
assert formatted_dataset._formatting.format_type == get_format_type_from_alias(format_type)
@require_torch
def test_iterable_dataset_is_torch_iterable_dataset(dataset: IterableDataset):
from torch.utils.data import DataLoader, _DatasetKind
dataloader = DataLoader(dataset)
assert dataloader._dataset_kind == _DatasetKind.Iterable
out = list(dataloader)
assert len(out) == DEFAULT_N_EXAMPLES
@pytest.mark.parametrize("n", [0, 2, int(1e10)])
def test_iterable_dataset_skip(dataset: IterableDataset, n):
skip_dataset = dataset.skip(n)
assert isinstance(skip_dataset._ex_iterable, SkipExamplesIterable)
assert skip_dataset._ex_iterable.n == n
assert list(skip_dataset) == list(dataset)[n:]
@pytest.mark.parametrize("n", [0, 2, int(1e10)])
def test_iterable_dataset_take(dataset: IterableDataset, n):
take_dataset = dataset.take(n)
assert isinstance(take_dataset._ex_iterable, TakeExamplesIterable)
assert take_dataset._ex_iterable.n == n
assert list(take_dataset) == list(dataset)[:n]
@pytest.mark.parametrize("method", ["skip", "take"])
def test_iterable_dataset_shuffle_after_skip_or_take(method):
seed = 42
n, n_shards = 3, 10
count = 7
ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(n_shards)]})
dataset = IterableDataset(ex_iterable)
dataset = dataset.skip(n) if method == "skip" else dataset.take(count)
shuffled_dataset = dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES)
# shuffling a skip/take dataset should keep the same examples and don't shuffle the shards
key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731
assert sorted(dataset, key=key) == sorted(shuffled_dataset, key=key)
def test_iterable_dataset_add_column(dataset_with_several_columns):
new_column = list(range(DEFAULT_N_EXAMPLES))
new_dataset = dataset_with_several_columns.add_column("new_column", new_column)
assert list(new_dataset) == [
{**example, "new_column": idx} for idx, example in enumerate(dataset_with_several_columns)
]
new_dataset = new_dataset._resolve_features()
assert "new_column" in new_dataset.column_names
def test_iterable_dataset_rename_column(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.rename_column("id", "new_id")
assert list(new_dataset) == [
{("new_id" if k == "id" else k): v for k, v in example.items()} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# rename the column if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().rename_column("id", "new_id")
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert "id" not in new_dataset.column_names
assert "new_id" in new_dataset.column_names
def test_iterable_dataset_rename_columns(dataset_with_several_columns):
column_mapping = {"id": "new_id", "filepath": "filename"}
new_dataset = dataset_with_several_columns.rename_columns(column_mapping)
assert list(new_dataset) == [
{column_mapping.get(k, k): v for k, v in example.items()} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# rename the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().rename_columns(column_mapping)
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c not in new_dataset.column_names for c in ["id", "filepath"])
assert all(c in new_dataset.column_names for c in ["new_id", "filename"])
def test_iterable_dataset_remove_columns(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.remove_columns("id")
assert list(new_dataset) == [
{k: v for k, v in example.items() if k != "id"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
new_dataset = dataset_with_several_columns.remove_columns(["id", "filepath"])
assert list(new_dataset) == [
{k: v for k, v in example.items() if k != "id" and k != "filepath"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
assert new_dataset.column_names is None
# remove the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().remove_columns(["id", "filepath"])
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c not in new_dataset.features for c in ["id", "filepath"])
assert all(c not in new_dataset.column_names for c in ["id", "filepath"])
def test_iterable_dataset_select_columns(dataset_with_several_columns):
new_dataset = dataset_with_several_columns.select_columns("id")
assert list(new_dataset) == [
{k: v for k, v in example.items() if k == "id"} for example in dataset_with_several_columns
]
assert new_dataset.features is None
new_dataset = dataset_with_several_columns.select_columns(["id", "filepath"])
assert list(new_dataset) == [
{k: v for k, v in example.items() if k in ("id", "filepath")} for example in dataset_with_several_columns
]
assert new_dataset.features is None
# select the columns if ds.features was not None
new_dataset = dataset_with_several_columns._resolve_features().select_columns(["id", "filepath"])
assert new_dataset.features is not None
assert new_dataset.column_names is not None
assert all(c in new_dataset.features for c in ["id", "filepath"])
assert all(c in new_dataset.column_names for c in ["id", "filepath"])
def test_iterable_dataset_cast_column():
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
casted_dataset = dataset.cast_column("label", Value("bool"))
casted_features = features.copy()
casted_features["label"] = Value("bool")
assert list(casted_dataset) == [casted_features.encode_example(ex) for _, ex in ex_iterable]
def test_iterable_dataset_cast():
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
new_features = Features({"id": Value("int64"), "label": Value("bool")})
casted_dataset = dataset.cast(new_features)
assert list(casted_dataset) == [new_features.encode_example(ex) for _, ex in ex_iterable]
def test_iterable_dataset_resolve_features():
ex_iterable = ExamplesIterable(generate_examples_fn, {})
dataset = IterableDataset(ex_iterable)
assert dataset.features is None
assert dataset.column_names is None
dataset = dataset._resolve_features()
assert dataset.features == Features(
{
"id": Value("int64"),
}
)
assert dataset.column_names == ["id"]
def test_iterable_dataset_resolve_features_keep_order():
def gen():
yield from zip(range(3), [{"a": 1}, {"c": 1}, {"b": 1}])
ex_iterable = ExamplesIterable(gen, {})
dataset = IterableDataset(ex_iterable)._resolve_features()
# columns appear in order of appearance in the dataset
assert list(dataset.features) == ["a", "c", "b"]
assert dataset.column_names == ["a", "c", "b"]
def test_iterable_dataset_with_features_fill_with_none():
def gen():
yield from zip(range(2), [{"a": 1}, {"b": 1}])
ex_iterable = ExamplesIterable(gen, {})
info = DatasetInfo(features=Features({"a": Value("int32"), "b": Value("int32")}))
dataset = IterableDataset(ex_iterable, info=info)
assert list(dataset) == [{"a": 1, "b": None}, {"b": 1, "a": None}]
def test_concatenate_datasets():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
dataset2 = IterableDataset(ex_iterable2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + list(dataset2)
def test_concatenate_datasets_resolves_features():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5})
dataset2 = IterableDataset(ex_iterable2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert concatenated_dataset.features is not None
assert sorted(concatenated_dataset.features) == ["id", "label"]
def test_concatenate_datasets_with_different_columns():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {})
dataset2 = IterableDataset(ex_iterable2)
# missing column "label" -> it should be replaced with nulls
extended_dataset2_list = [{"label": None, **x} for x in dataset2]
concatenated_dataset = concatenate_datasets([dataset1, dataset2])
assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1])
assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1)
def test_concatenate_datasets_axis_1():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
dataset2 = IterableDataset(ex_iterable2)
with pytest.raises(ValueError): # column "id" is duplicated -> raise an error
concatenate_datasets([dataset1, dataset2], axis=1)
concatenated_dataset = concatenate_datasets([dataset1, dataset2.remove_columns("id")], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, dataset2)]
def test_concatenate_datasets_axis_1_resolves_features():
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10})
dataset1 = IterableDataset(ex_iterable1)
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5})
dataset2 = IterableDataset(ex_iterable2).remove_columns("id")
concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1)
assert concatenated_dataset.features is not None
assert sorted(concatenated_dataset.features) == ["id", "label1", "label2"]
def test_concatenate_datasets_axis_1_with_different_lengths():
n1 = 10
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10, "n": n1})
dataset1 = IterableDataset(ex_iterable1)
n2 = 5
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5, "n": n2})
dataset2 = IterableDataset(ex_iterable2).remove_columns("id")
# missing rows -> they should be replaced with nulls
extended_dataset2_list = list(dataset2) + [{"label2": None}] * (n1 - n2)
concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, extended_dataset2_list)]
# change order
concatenated_dataset = concatenate_datasets([dataset2, dataset1], axis=1)
assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(extended_dataset2_list, dataset1)]
@pytest.mark.parametrize(
"probas, seed, expected_length, stopping_strategy",
[
(None, None, 3 * (DEFAULT_N_EXAMPLES - 1) + 1, "first_exhausted"),
([1, 0, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"),
([0, 1, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"),
([0.2, 0.5, 0.3], 42, None, "first_exhausted"),
([0.1, 0.1, 0.8], 1337, None, "first_exhausted"),
([0.5, 0.2, 0.3], 101010, None, "first_exhausted"),
(None, None, 3 * DEFAULT_N_EXAMPLES, "all_exhausted"),
([0.2, 0.5, 0.3], 42, None, "all_exhausted"),
([0.1, 0.1, 0.8], 1337, None, "all_exhausted"),
([0.5, 0.2, 0.3], 101010, None, "all_exhausted"),
],
)
def test_interleave_datasets(dataset: IterableDataset, probas, seed, expected_length, stopping_strategy):
d1 = dataset
d2 = dataset.map(lambda x: {"id+1": x["id"] + 1, **x})
d3 = dataset.with_format("python")
datasets = [d1, d2, d3]
merged_dataset = interleave_datasets(
datasets, probabilities=probas, seed=seed, stopping_strategy=stopping_strategy
)
def fill_default(example):
return {"id": None, "id+1": None, **example}
# Check the examples iterable
assert isinstance(
merged_dataset._ex_iterable, (CyclingMultiSourcesExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable)
)
# Check that it is deterministic
if seed is not None:
merged_dataset2 = interleave_datasets(
[d1, d2, d3], probabilities=probas, seed=seed, stopping_strategy=stopping_strategy
)
assert list(merged_dataset) == list(merged_dataset2)
# Check features
assert merged_dataset.features == Features({"id": Value("int64"), "id+1": Value("int64")})
# Check first example
if seed is not None:
rng = np.random.default_rng(seed)
i = next(iter(RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas)))
assert next(iter(merged_dataset)) == fill_default(next(iter(datasets[i])))
else:
assert any(next(iter(merged_dataset)) == fill_default(next(iter(dataset))) for dataset in datasets)
# Compute length it case it's random
if expected_length is None:
expected_length = 0
counts = np.array([len(list(d)) for d in datasets])
bool_strategy_func = np.all if stopping_strategy == "all_exhausted" else np.any
rng = np.random.default_rng(seed)
for i in RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas):
counts[i] -= 1
expected_length += 1
if bool_strategy_func(counts <= 0):
break
# Check length
assert len(list(merged_dataset)) == expected_length
def test_interleave_datasets_with_features(
dataset: IterableDataset,
):
features = Features(
{
"id": Value("int64"),
"label": ClassLabel(names=["negative", "positive"]),
}
)
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0})
dataset_with_features = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
merged_dataset = interleave_datasets([dataset, dataset_with_features])
assert merged_dataset.features == features
def test_interleave_datasets_with_oversampling():
# Test hardcoded results
d1 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [0, 1, 2]])), {}))
d2 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [10, 11, 12, 13]])), {}))
d3 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [20, 21, 22, 23, 24]])), {}))
expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
# Check oversampling strategy without probabilities
assert [x["a"] for x in interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")] == expected_values
# Check oversampling strategy with probabilities
expected_values = [20, 0, 21, 10, 1, 22, 23, 24, 2, 0, 1, 20, 11, 21, 2, 0, 12, 1, 22, 13]
values = [
x["a"]
for x in interleave_datasets(
[d1, d2, d3], probabilities=[0.5, 0.2, 0.3], seed=42, stopping_strategy="all_exhausted"
)
]
assert values == expected_values
@require_torch
def test_with_format_torch(dataset_with_several_columns: IterableDataset):
import torch
dset = dataset_with_several_columns.with_format(type="torch")
example = next(iter(dset))
batch = next(iter(dset.iter(batch_size=3)))
assert len(example) == 3
assert isinstance(example["id"], torch.Tensor)
assert list(example["id"].shape) == []
assert example["id"].item() == 0
assert isinstance(batch["id"], torch.Tensor)
assert isinstance(example["filepath"], list)
assert isinstance(example["filepath"][0], str)
assert example["filepath"][0] == "data0.txt"
assert isinstance(batch["filepath"], list)
assert isinstance(example["metadata"], dict)
assert isinstance(example["metadata"]["sources"], list)
assert isinstance(example["metadata"]["sources"][0], str)
assert isinstance(batch["metadata"], list)
@require_tf
def test_with_format_tf(dataset_with_several_columns: IterableDataset):
import tensorflow as tf
dset = dataset_with_several_columns.with_format(type="tensorflow")
example = next(iter(dset))
batch = next(iter(dset.iter(batch_size=3)))
assert isinstance(example["id"], tf.Tensor)
assert list(example["id"].shape) == []
assert example["id"].numpy().item() == 0
assert isinstance(batch["id"], tf.Tensor)
assert isinstance(example["filepath"], tf.Tensor)
assert example["filepath"][0] == b"data0.txt"
assert isinstance(batch["filepath"], tf.Tensor)
assert isinstance(example["metadata"], dict)
assert isinstance(example["metadata"]["sources"], tf.Tensor)
assert isinstance(batch["metadata"], list)
def test_map_array_are_not_converted_back_to_lists(dataset: IterableDataset):
def func(example):
return {"array": np.array([1, 2, 3])}
dset_test = dataset.map(func)
example = next(iter(dset_test))
# not aligned with Dataset.map because we don't convert back to lists after map()
assert isinstance(example["array"], np.ndarray)
def test_formatted_map(dataset: IterableDataset):
dataset = dataset.with_format("np")
assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray)
dataset = dataset.with_format(None)
assert isinstance(next(dataset.iter(batch_size=3))["id"], list)
def add_one_numpy(example):
assert isinstance(example["id"], np.ndarray)
return {"id": example["id"] + 1}
dataset = dataset.with_format("np")
dataset = dataset.map(add_one_numpy, batched=True)
assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray)
dataset = dataset.with_format(None)
assert isinstance(next(dataset.iter(batch_size=3))["id"], list)
@pytest.mark.parametrize("n_shards1, n_shards2, num_workers", [(2, 1, 1), (2, 2, 2), (1, 3, 1), (4, 3, 3)])
def test_interleave_dataset_with_sharding(n_shards1, n_shards2, num_workers):
from torch.utils.data import DataLoader
ex_iterable1 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-1.txt" for i in range(n_shards1)]})
dataset1 = IterableDataset(ex_iterable1).with_format("torch")
ex_iterable2 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-2.txt" for i in range(n_shards2)]})
dataset2 = IterableDataset(ex_iterable2).with_format("torch")
dataset_merged = interleave_datasets([dataset1, dataset2], stopping_strategy="first_exhausted")
assert dataset_merged.n_shards == min(n_shards1, n_shards2)
dataloader = DataLoader(dataset_merged, batch_size=None, num_workers=num_workers)
result = list(dataloader)
expected_length = 2 * min(
len([example for _, example in ex_iterable1]), len([example for _, example in ex_iterable2])
)
# some samples may be missing because the stopping strategy is applied per process
assert expected_length - num_workers <= len(result) <= expected_length
assert len(result) == len({str(x) for x in result})
def filter_func(batch):
return batch["id"] == 4
def map_func(batch):
batch["id"] *= 2
return batch
def test_pickle_after_many_transforms(dataset_with_several_columns):
dataset = dataset_with_several_columns
dataset = dataset.remove_columns(["filepath"])
dataset = dataset.take(5)
dataset = dataset.map(map_func)
dataset = dataset.shuffle()
dataset = dataset.skip(1)
dataset = dataset.filter(filter_func)
dataset = dataset.add_column("additional_col", ["something"])
dataset = dataset.rename_column("metadata", "metadata1")
dataset = dataset.rename_columns({"id": "id1", "metadata1": "metadata2"})
dataset = dataset.select_columns(["id1", "additional_col"])
unpickled_dataset = pickle.loads(pickle.dumps(dataset))
assert list(unpickled_dataset) == list(dataset)
| datasets/tests/test_iterable_dataset.py/0 | {
"file_path": "datasets/tests/test_iterable_dataset.py",
"repo_id": "datasets",
"token_count": 36328
} | 71 |
import unittest
from unittest.mock import patch
import pytest
from pytest import CaptureFixture
from datasets.utils import (
are_progress_bars_disabled,
disable_progress_bars,
enable_progress_bars,
tqdm,
)
class TestTqdmUtils(unittest.TestCase):
@pytest.fixture(autouse=True)
def capsys(self, capsys: CaptureFixture) -> None:
"""Workaround to make capsys work in unittest framework.
Capsys is a convenient pytest fixture to capture stdout.
See https://waylonwalker.com/pytest-capsys/.
Taken from https://github.com/pytest-dev/pytest/issues/2504#issuecomment-309475790.
"""
self.capsys = capsys
def setUp(self) -> None:
"""Get verbosity to set it back after the tests."""
self._previous_are_progress_bars_disabled = are_progress_bars_disabled()
return super().setUp()
def tearDown(self) -> None:
"""Set back progress bars verbosity as before testing."""
if self._previous_are_progress_bars_disabled:
disable_progress_bars()
else:
enable_progress_bars()
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_helpers(self) -> None:
"""Test helpers to enable/disable progress bars."""
disable_progress_bars()
self.assertTrue(are_progress_bars_disabled())
enable_progress_bars()
self.assertFalse(are_progress_bars_disabled())
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", True)
def test_cannot_enable_tqdm_when_env_variable_is_set(self) -> None:
"""
Test helpers cannot enable/disable progress bars when
`HF_DATASETS_DISABLE_PROGRESS_BARS` is set.
"""
disable_progress_bars()
self.assertTrue(are_progress_bars_disabled())
with self.assertWarns(UserWarning):
enable_progress_bars()
self.assertTrue(are_progress_bars_disabled()) # Still disabled !
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", False)
def test_cannot_disable_tqdm_when_env_variable_is_set(self) -> None:
"""
Test helpers cannot enable/disable progress bars when
`HF_DATASETS_DISABLE_PROGRESS_BARS` is set.
"""
enable_progress_bars()
self.assertFalse(are_progress_bars_disabled())
with self.assertWarns(UserWarning):
disable_progress_bars()
self.assertFalse(are_progress_bars_disabled()) # Still enabled !
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_disabled(self) -> None:
"""Test TQDM not outputting anything when globally disabled."""
disable_progress_bars()
for _ in tqdm(range(10)):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_disabled_cannot_be_forced(self) -> None:
"""Test TQDM cannot be forced when globally disabled."""
disable_progress_bars()
for _ in tqdm(range(10), disable=False):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_can_be_disabled_when_globally_enabled(self) -> None:
"""Test TQDM can still be locally disabled even when globally enabled."""
enable_progress_bars()
for _ in tqdm(range(10), disable=True):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_enabled(self) -> None:
"""Test TQDM work normally when globally enabled."""
enable_progress_bars()
for _ in tqdm(range(10)):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertIn("10/10", captured.err) # tqdm log
| datasets/tests/test_tqdm.py/0 | {
"file_path": "datasets/tests/test_tqdm.py",
"repo_id": "datasets",
"token_count": 1804
} | 72 |
# The “Deep” in Reinforcement Learning [[deep-rl]]
<Tip>
What we've talked about so far is Reinforcement Learning. But where does the "Deep" come into play?
</Tip>
Deep Reinforcement Learning introduces **deep neural networks to solve Reinforcement Learning problems** — hence the name “deep”.
For instance, in the next unit, we’ll learn about two value-based algorithms: Q-Learning (classic Reinforcement Learning) and then Deep Q-Learning.
You’ll see the difference is that, in the first approach, **we use a traditional algorithm** to create a Q table that helps us find what action to take for each state.
In the second approach, **we will use a Neural Network** (to approximate the Q value).
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/deep.jpg" alt="Value based RL"/>
<figcaption>Schema inspired by the Q learning notebook by Udacity
</figcaption>
</figure>
If you are not familiar with Deep Learning you should definitely watch [the FastAI Practical Deep Learning for Coders](https://course.fast.ai) (Free).
| deep-rl-class/units/en/unit1/deep-rl.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit1/deep-rl.mdx",
"repo_id": "deep-rl-class",
"token_count": 310
} | 73 |
# Introduction to Q-Learning [[introduction-q-learning]]
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/thumbnail.jpg" alt="Unit 2 thumbnail" width="100%">
In the first unit of this class, we learned about Reinforcement Learning (RL), the RL process, and the different methods to solve an RL problem. We also **trained our first agents and uploaded them to the Hugging Face Hub.**
In this unit, we're going to **dive deeper into one of the Reinforcement Learning methods: value-based methods** and study our first RL algorithm: **Q-Learning.**
We'll also **implement our first RL agent from scratch**, a Q-Learning agent, and will train it in two environments:
1. Frozen-Lake-v1 (non-slippery version): where our agent will need to **go from the starting state (S) to the goal state (G)** by walking only on frozen tiles (F) and avoiding holes (H).
2. An autonomous taxi: where our agent will need **to learn to navigate** a city to **transport its passengers from point A to point B.**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/envs.gif" alt="Environments"/>
Concretely, we will:
- Learn about **value-based methods**.
- Learn about the **differences between Monte Carlo and Temporal Difference Learning**.
- Study and implement **our first RL algorithm**: Q-Learning.
This unit is **fundamental if you want to be able to work on Deep Q-Learning**: the first Deep RL algorithm that played Atari games and beat the human level on some of them (breakout, space invaders, etc).
So let's get started! 🚀
| deep-rl-class/units/en/unit2/introduction.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit2/introduction.mdx",
"repo_id": "deep-rl-class",
"token_count": 466
} | 74 |
# Hands-on [[hands-on]]
<CourseFloatingBanner classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit3/unit3.ipynb"}
]}
askForHelpUrl="http://hf.co/join/discord" />
Now that you've studied the theory behind Deep Q-Learning, **you’re ready to train your Deep Q-Learning agent to play Atari Games**. We'll start with Space Invaders, but you'll be able to use any Atari game you want 🔥
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/>
We're using the [RL-Baselines-3 Zoo integration](https://github.com/DLR-RM/rl-baselines3-zoo), a vanilla version of Deep Q-Learning with no extensions such as Double-DQN, Dueling-DQN, or Prioritized Experience Replay.
Also, **if you want to learn to implement Deep Q-Learning by yourself after this hands-on**, you definitely should look at the CleanRL implementation: https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/dqn_atari.py
To validate this hands-on for the certification process, you need to push your trained model to the Hub and **get a result of >= 200**.
To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**
**If you don't find your model, go to the bottom of the page and click on the refresh button.**
For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process
And you can check your progress here 👉 https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course
**To start the hands-on click on Open In Colab button** 👇 :
[](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit3/unit3.ipynb)
# Unit 3: Deep Q-Learning with Atari Games 👾 using RL Baselines3 Zoo
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/thumbnail.jpg" alt="Unit 3 Thumbnail">
In this hands-on, **you'll train a Deep Q-Learning agent** playing Space Invaders using [RL Baselines3 Zoo](https://github.com/DLR-RM/rl-baselines3-zoo), a training framework based on [Stable-Baselines3](https://stable-baselines3.readthedocs.io/en/master/) that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results and recording videos.
We're using the [RL-Baselines-3 Zoo integration, a vanilla version of Deep Q-Learning](https://stable-baselines3.readthedocs.io/en/master/modules/dqn.html) with no extensions such as Double-DQN, Dueling-DQN, and Prioritized Experience Replay.
### 🎮 Environments:
- [SpacesInvadersNoFrameskip-v4](https://gymnasium.farama.org/environments/atari/space_invaders/)
You can see the difference between Space Invaders versions here 👉 https://gymnasium.farama.org/environments/atari/space_invaders/#variants
### 📚 RL-Library:
- [RL-Baselines3-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo)
## Objectives of this hands-on 🏆
At the end of the hands-on, you will:
- Be able to understand deeper **how RL Baselines3 Zoo works**.
- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥.
## Prerequisites 🏗️
Before diving into the hands-on, you need to:
🔲 📚 **[Study Deep Q-Learning by reading Unit 3](https://huggingface.co/deep-rl-course/unit3/introduction)** 🤗
We're constantly trying to improve our tutorials, so **if you find some issues in this hands-on**, please [open an issue on the Github Repo](https://github.com/huggingface/deep-rl-class/issues).
# Let's train a Deep Q-Learning agent playing Atari' Space Invaders 👾 and upload it to the Hub.
We strongly recommend students **to use Google Colab for the hands-on exercises instead of running them on their personal computers**.
By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects of setting up your environments**.
To validate this hands-on for the certification process, you need to push your trained model to the Hub and **get a result of >= 200**.
To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**
For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process
## Set the GPU 💪
- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type`
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1">
- `Hardware Accelerator > GPU`
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2">
# Install RL-Baselines3 Zoo and its dependencies 📚
If you see `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.` **this is normal and it's not a critical error** there's a conflict of version. But the packages we need are installed.
```python
# For now we install this update of RL-Baselines3 Zoo
pip install git+https://github.com/DLR-RM/rl-baselines3-zoo@update/hf
```
IF AND ONLY IF THE VERSION ABOVE DOES NOT EXIST ANYMORE. UNCOMMENT AND INSTALL THE ONE BELOW
```python
#pip install rl_zoo3==2.0.0a9
```
```bash
apt-get install swig cmake ffmpeg
```
To be able to use Atari games in Gymnasium we need to install atari package. And accept-rom-license to download the rom files (games files).
```python
!pip install gymnasium[atari]
!pip install gymnasium[accept-rom-license]
```
## Create a virtual display 🔽
During the hands-on, we'll need to generate a replay video. To do so, if you train it on a headless machine, **we need to have a virtual screen to be able to render the environment** (and thus record the frames).
Hence the following cell will install the librairies and create and run a virtual screen 🖥
```bash
apt install python-opengl
apt install ffmpeg
apt install xvfb
pip3 install pyvirtualdisplay
```
```python
# Virtual display
from pyvirtualdisplay import Display
virtual_display = Display(visible=0, size=(1400, 900))
virtual_display.start()
```
## Train our Deep Q-Learning Agent to Play Space Invaders 👾
To train an agent with RL-Baselines3-Zoo, we just need to do two things:
1. Create a hyperparameter config file that will contain our training hyperparameters called `dqn.yml`.
This is a template example:
```
SpaceInvadersNoFrameskip-v4:
env_wrapper:
- stable_baselines3.common.atari_wrappers.AtariWrapper
frame_stack: 4
policy: 'CnnPolicy'
n_timesteps: !!float 1e7
buffer_size: 100000
learning_rate: !!float 1e-4
batch_size: 32
learning_starts: 100000
target_update_interval: 1000
train_freq: 4
gradient_steps: 1
exploration_fraction: 0.1
exploration_final_eps: 0.01
# If True, you need to deactivate handle_timeout_termination
# in the replay_buffer_kwargs
optimize_memory_usage: False
```
Here we see that:
- We use the `Atari Wrapper` that preprocess the input (Frame reduction ,grayscale, stack 4 frames)
- We use `CnnPolicy`, since we use Convolutional layers to process the frames
- We train it for 10 million `n_timesteps`
- Memory (Experience Replay) size is 100000, aka the amount of experience steps you saved to train again your agent with.
💡 My advice is to **reduce the training timesteps to 1M,** which will take about 90 minutes on a P100. `!nvidia-smi` will tell you what GPU you're using. At 10 million steps, this will take about 9 hours. I recommend running this on your local computer (or somewhere else). Just click on: `File>Download`.
In terms of hyperparameters optimization, my advice is to focus on these 3 hyperparameters:
- `learning_rate`
- `buffer_size (Experience Memory size)`
- `batch_size`
As a good practice, you need to **check the documentation to understand what each hyperparameters does**: https://stable-baselines3.readthedocs.io/en/master/modules/dqn.html#parameters
2. We start the training and save the models on `logs` folder 📁
- Define the algorithm after `--algo`, where we save the model after `-f` and where the hyperparameter config is after `-c`.
```bash
python -m rl_zoo3.train --algo ________ --env SpaceInvadersNoFrameskip-v4 -f _________ -c _________
```
#### Solution
```bash
python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -c dqn.yml
```
## Let's evaluate our agent 👀
- RL-Baselines3-Zoo provides `enjoy.py`, a python script to evaluate our agent. In most RL libraries, we call the evaluation script `enjoy.py`.
- Let's evaluate it for 5000 timesteps 🔥
```bash
python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 --no-render --n-timesteps _________ --folder logs/
```
#### Solution
```bash
python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 --no-render --n-timesteps 5000 --folder logs/
```
## Publish our trained model on the Hub 🚀
Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit3/space-invaders-model.gif" alt="Space Invaders model">
By using `rl_zoo3.push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the hub**.
This way:
- You can **showcase our work** 🔥
- You can **visualize your agent playing** 👀
- You can **share with the community an agent that others can use** 💾
- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard
To be able to share your model with the community there are three more steps to follow:
1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join
2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.
- Create a new token (https://huggingface.co/settings/tokens) **with write role**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token">
- Copy the token
- Run the cell below and past the token
```bash
from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub.
notebook_login()
!git config --global credential.helper store
```
If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login`
3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥
Let's run push_to_hub.py file to upload our trained agent to the Hub.
`--repo-name `: The name of the repo
`-orga`: Your Hugging Face username
`-f`: Where the trained model folder is (in our case `logs`)
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit3/select-id.png" alt="Select Id">
```bash
python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 --repo-name _____________________ -orga _____________________ -f logs/
```
#### Solution
```bash
python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 --repo-name dqn-SpaceInvadersNoFrameskip-v4 -orga ThomasSimonini -f logs/
```
###.
Congrats 🥳 you've just trained and uploaded your first Deep Q-Learning agent using RL-Baselines-3 Zoo. The script above should have displayed a link to a model repository such as https://huggingface.co/ThomasSimonini/dqn-SpaceInvadersNoFrameskip-v4. When you go to this link, you can:
- See a **video preview of your agent** at the right.
- Click "Files and versions" to see all the files in the repository.
- Click "Use in stable-baselines3" to get a code snippet that shows how to load the model.
- A model card (`README.md` file) which gives a description of the model and the hyperparameters you used.
Under the hood, the Hub uses git-based repositories (don't worry if you don't know what git is), which means you can update the model with new versions as you experiment and improve your agent.
**Compare the results of your agents with your classmates** using the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) 🏆
## Load a powerful trained model 🔥
- The Stable-Baselines3 team uploaded **more than 150 trained Deep Reinforcement Learning agents on the Hub**.
You can find them here: 👉 https://huggingface.co/sb3
Some examples:
- Asteroids: https://huggingface.co/sb3/dqn-AsteroidsNoFrameskip-v4
- Beam Rider: https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4
- Breakout: https://huggingface.co/sb3/dqn-BreakoutNoFrameskip-v4
- Road Runner: https://huggingface.co/sb3/dqn-RoadRunnerNoFrameskip-v4
Let's load an agent playing Beam Rider: https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4
1. We download the model using `rl_zoo3.load_from_hub`, and place it in a new folder that we can call `rl_trained`
```bash
# Download model and save it into the logs/ folder
python -m rl_zoo3.load_from_hub --algo dqn --env BeamRiderNoFrameskip-v4 -orga sb3 -f rl_trained/
```
2. Let's evaluate if for 5000 timesteps
```bash
python -m rl_zoo3.enjoy --algo dqn --env BeamRiderNoFrameskip-v4 -n 5000 -f rl_trained/ --no-render
```
Why not trying to train your own **Deep Q-Learning Agent playing BeamRiderNoFrameskip-v4? 🏆.**
If you want to try, check https://huggingface.co/sb3/dqn-BeamRiderNoFrameskip-v4#hyperparameters **in the model card, you have the hyperparameters of the trained agent.**
But finding hyperparameters can be a daunting task. Fortunately, we'll see in the next Unit, how we can **use Optuna for optimizing the Hyperparameters 🔥.**
## Some additional challenges 🏆
The best way to learn **is to try things by your own**!
In the [Leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) you will find your agents. Can you get to the top?
Here's a list of environments you can try to train your agent with:
- BeamRiderNoFrameskip-v4
- BreakoutNoFrameskip-v4
- EnduroNoFrameskip-v4
- PongNoFrameskip-v4
Also, **if you want to learn to implement Deep Q-Learning by yourself**, you definitely should look at CleanRL implementation: https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/dqn_atari.py
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/>
________________________________________________________________________
Congrats on finishing this chapter!
If you’re still feel confused with all these elements...it's totally normal! **This was the same for me and for all people who studied RL.**
Take time to really **grasp the material before continuing and try the additional challenges**. It’s important to master these elements and having a solid foundations.
In the next unit, **we’re going to learn about [Optuna](https://optuna.org/)**. One of the most critical task in Deep Reinforcement Learning is to find a good set of training hyperparameters. And Optuna is a library that helps you to automate the search.
### This is a course built with you 👷🏿♀️
Finally, we want to improve and update the course iteratively with your feedback. If you have some, please fill this form 👉 https://forms.gle/3HgA7bEHwAmmLfwh9
We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the Github Repo](https://github.com/huggingface/deep-rl-class/issues).
See you on Bonus unit 2! 🔥
### Keep Learning, Stay Awesome 🤗
| deep-rl-class/units/en/unit3/hands-on.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit3/hands-on.mdx",
"repo_id": "deep-rl-class",
"token_count": 5087
} | 75 |
# Hands-on
<CourseFloatingBanner classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit5/unit5.ipynb"}
]}
askForHelpUrl="http://hf.co/join/discord" />
We learned what ML-Agents is and how it works. We also studied the two environments we're going to use. Now we're ready to train our agents!
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/envs.png" alt="Environments" />
To validate this hands-on for the certification process, you **just need to push your trained models to the Hub.**
There are **no minimum results to attain** in order to validate this Hands On. But if you want to get nice results, you can try to reach the following:
- For [Pyramids](https://huggingface.co/spaces/unity/ML-Agents-Pyramids): Mean Reward = 1.75
- For [SnowballTarget](https://huggingface.co/spaces/ThomasSimonini/ML-Agents-SnowballTarget): Mean Reward = 15 or 30 targets shoot in an episode.
For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process
**To start the hands-on, click on Open In Colab button** 👇 :
[](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit5/unit5.ipynb)
We strongly **recommend students use Google Colab for the hands-on exercises** instead of running them on their personal computers.
By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects** of setting up your environments.
# Unit 5: An Introduction to ML-Agents
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/thumbnail.png" alt="Thumbnail"/>
In this notebook, you'll learn about ML-Agents and train two agents.
- The first one will learn to **shoot snowballs onto spawning targets**.
- The second needs to press a button to spawn a pyramid, then navigate to the pyramid, knock it over, **and move to the gold brick at the top**. To do that, it will need to explore its environment, and we will use a technique called curiosity.
After that, you'll be able **to watch your agents playing directly on your browser**.
For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process
⬇️ Here is an example of what **you will achieve at the end of this unit.** ⬇️
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/pyramids.gif" alt="Pyramids"/>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget.gif" alt="SnowballTarget"/>
### 🎮 Environments:
- [Pyramids](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Examples.md#pyramids)
- SnowballTarget
### 📚 RL-Library:
- [ML-Agents](https://github.com/Unity-Technologies/ml-agents)
We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues).
## Objectives of this notebook 🏆
At the end of the notebook, you will:
- Understand how **ML-Agents** works and the environment library.
- Be able to **train agents in Unity Environments**.
## Prerequisites 🏗️
Before diving into the notebook, you need to:
🔲 📚 **Study [what ML-Agents is and how it works by reading Unit 5](https://huggingface.co/deep-rl-course/unit5/introduction)** 🤗
# Let's train our agents 🚀
## Set the GPU 💪
- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type`
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1">
- `Hardware Accelerator > GPU`
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2">
## Clone the repository and install the dependencies 🔽
- We need to clone the repository that **contains the experimental version of the library that allows you to push your trained agent to the Hub.**
```bash
# Clone the repository
git clone --depth 1 https://github.com/Unity-Technologies/ml-agents
```
```bash
# Go inside the repository and install the package
cd ml-agents
pip install -e ./ml-agents-envs
pip install -e ./ml-agents
```
## SnowballTarget ⛄
If you need a refresher on how this environment works check this section 👉
https://huggingface.co/deep-rl-course/unit5/snowball-target
### Download and move the environment zip file in `./training-envs-executables/linux/`
- Our environment executable is in a zip file.
- We need to download it and place it to `./training-envs-executables/linux/`
- We use a linux executable because we use colab, and colab machines OS is Ubuntu (linux)
```bash
# Here, we create training-envs-executables and linux
mkdir ./training-envs-executables
mkdir ./training-envs-executables/linux
```
We downloaded the file SnowballTarget.zip from https://github.com/huggingface/Snowball-Target using `wget`
```bash
wget "https://github.com/huggingface/Snowball-Target/raw/main/SnowballTarget.zip" -O ./training-envs-executables/linux/SnowballTarget.zip
```
We unzip the executable.zip file
```bash
unzip -d ./training-envs-executables/linux/ ./training-envs-executables/linux/SnowballTarget.zip
```
Make sure your file is accessible
```bash
chmod -R 755 ./training-envs-executables/linux/SnowballTarget
```
### Define the SnowballTarget config file
- In ML-Agents, you define the **training hyperparameters in config.yaml files.**
There are multiple hyperparameters. To understand them better, you should read the explanation for each one in [the documentation](https://github.com/Unity-Technologies/ml-agents/blob/release_20_docs/docs/Training-Configuration-File.md)
You need to create a `SnowballTarget.yaml` config file in ./content/ml-agents/config/ppo/
We'll give you a preliminary version of this config (to copy and paste into your `SnowballTarget.yaml file`), **but you should modify it**.
```yaml
behaviors:
SnowballTarget:
trainer_type: ppo
summary_freq: 10000
keep_checkpoints: 10
checkpoint_interval: 50000
max_steps: 200000
time_horizon: 64
threaded: true
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: linear
batch_size: 128
buffer_size: 2048
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
```
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballfight_config1.png" alt="Config SnowballTarget"/>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballfight_config2.png" alt="Config SnowballTarget"/>
As an experiment, try to modify some other hyperparameters. Unity provides very [good documentation explaining each of them here](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Training-Configuration-File.md).
Now that you've created the config file and understand what most hyperparameters do, we're ready to train our agent 🔥.
### Train the agent
To train our agent, we need to **launch mlagents-learn and select the executable containing the environment.**
We define four parameters:
1. `mlagents-learn <config>`: the path where the hyperparameter config file is.
2. `--env`: where the environment executable is.
3. `--run_id`: the name you want to give to your training run id.
4. `--no-graphics`: to not launch the visualization during the training.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/mlagentslearn.png" alt="MlAgents learn"/>
Train the model and use the `--resume` flag to continue training in case of interruption.
> It will fail the first time if and when you use `--resume`. Try rerunning the block to bypass the error.
The training will take 10 to 35min depending on your config. Go take a ☕️ you deserve it 🤗.
```bash
mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id="SnowballTarget1" --no-graphics
```
### Push the agent to the Hugging Face Hub
- Now that we've trained our agent, we’re **ready to push it to the Hub and visualize it playing on your browser🔥.**
To be able to share your model with the community, there are three more steps to follow:
1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join
2️⃣ Sign in and store your authentication token from the Hugging Face website.
- Create a new token (https://huggingface.co/settings/tokens) **with write role**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token">
- Copy the token
- Run the cell below and paste the token
```python
from huggingface_hub import notebook_login
notebook_login()
```
If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login`
Then we need to run `mlagents-push-to-hf`.
And we define four parameters:
1. `--run-id`: the name of the training run id.
2. `--local-dir`: where the agent was saved, it’s results/<run_id name>, so in my case results/First Training.
3. `--repo-id`: the name of the Hugging Face repo you want to create or update. It’s always <your huggingface username>/<the repo name>
If the repo does not exist **it will be created automatically**
4. `--commit-message`: since HF repos are git repositories you need to give a commit message.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/mlagentspushtohub.png" alt="Push to Hub"/>
For instance:
`mlagents-push-to-hf --run-id="SnowballTarget1" --local-dir="./results/SnowballTarget1" --repo-id="ThomasSimonini/ppo-SnowballTarget" --commit-message="First Push"`
```python
mlagents-push-to-hf --run-id= # Add your run id --local-dir= # Your local dir --repo-id= # Your repo id --commit-message= # Your commit message
```
If everything worked you should see this at the end of the process (but with a different url 😆) :
```
Your model is pushed to the hub. You can view your model here: https://huggingface.co/ThomasSimonini/ppo-SnowballTarget
```
It's the link to your model. It contains a model card that explains how to use it, your Tensorboard, and your config file. **What's awesome is that it's a git repository, which means you can have different commits, update your repository with a new push, etc.**
But now comes the best: **being able to visualize your agent online 👀.**
### Watch your agent playing 👀
This step it's simple:
1. Remember your repo-id
2. Go here: https://huggingface.co/spaces/ThomasSimonini/ML-Agents-SnowballTarget
3. Launch the game and put it in full screen by clicking on the bottom right button
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballtarget_load.png" alt="Snowballtarget load"/>
1. In step 1, choose your model repository, which is the model id (in my case ThomasSimonini/ppo-SnowballTarget).
2. In step 2, **choose what model you want to replay**:
- I have multiple ones since we saved a model every 500000 timesteps.
- But if I want the more recent I choose `SnowballTarget.onnx`
👉 It's nice to **try different model stages to see the improvement of the agent.**
And don't hesitate to share the best score your agent gets on discord in the #rl-i-made-this channel 🔥
Now let's try a more challenging environment called Pyramids.
## Pyramids 🏆
### Download and move the environment zip file in `./training-envs-executables/linux/`
- Our environment executable is in a zip file.
- We need to download it and place it into `./training-envs-executables/linux/`
- We use a linux executable because we're using colab, and the colab machine's OS is Ubuntu (linux)
Download the file Pyramids.zip from https://drive.google.com/uc?export=download&id=1UiFNdKlsH0NTu32xV-giYUEVKV4-vc7H using `wget`. Check out the full solution to download large files from GDrive [here](https://bcrf.biochem.wisc.edu/2021/02/05/download-google-drive-files-using-wget/)
```python
!wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1UiFNdKlsH0NTu32xV-giYUEVKV4-vc7H' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1UiFNdKlsH0NTu32xV-giYUEVKV4-vc7H" -O ./training-envs-executables/linux/Pyramids.zip && rm -rf /tmp/cookies.txt
```
Unzip it
```python
%%capture
!unzip -d ./training-envs-executables/linux/ ./training-envs-executables/linux/Pyramids.zip
```
Make sure your file is accessible
```bash
chmod -R 755 ./training-envs-executables/linux/Pyramids/Pyramids
```
### Modify the PyramidsRND config file
- Contrary to the first environment, which was a custom one, **Pyramids was made by the Unity team**.
- So the PyramidsRND config file already exists and is in ./content/ml-agents/config/ppo/PyramidsRND.yaml
- You might ask why "RND" is in PyramidsRND. RND stands for *random network distillation* it's a way to generate curiosity rewards. If you want to know more about that, we wrote an article explaining this technique: https://medium.com/data-from-the-trenches/curiosity-driven-learning-through-random-network-distillation-488ffd8e5938
For this training, we’ll modify one thing:
- The total training steps hyperparameter is too high since we can hit the benchmark (mean reward = 1.75) in only 1M training steps.
👉 To do that, we go to config/ppo/PyramidsRND.yaml,**and change max_steps to 1000000.**
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/pyramids-config.png" alt="Pyramids config"/>
As an experiment, you should also try to modify some other hyperparameters. Unity provides very [good documentation explaining each of them here](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Training-Configuration-File.md).
We’re now ready to train our agent 🔥.
### Train the agent
The training will take 30 to 45min depending on your machine, go take a ☕️ you deserve it 🤗.
```python
mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id="Pyramids Training" --no-graphics
```
### Push the agent to the Hugging Face Hub
- Now that we trained our agent, we’re **ready to push it to the Hub to be able to visualize it playing on your browser🔥.**
```python
mlagents-push-to-hf --run-id= # Add your run id --local-dir= # Your local dir --repo-id= # Your repo id --commit-message= # Your commit message
```
### Watch your agent playing 👀
👉 https://huggingface.co/spaces/unity/ML-Agents-Pyramids
### 🎁 Bonus: Why not train on another environment?
Now that you know how to train an agent using MLAgents, **why not try another environment?**
MLAgents provides 17 different environments and we’re building some custom ones. The best way to learn is to try things on your own, have fun.

You have the full list of the one currently available environments on Hugging Face here 👉 https://github.com/huggingface/ml-agents#the-environments
For the demos to visualize your agent 👉 https://huggingface.co/unity
For now we have integrated:
- [Worm](https://huggingface.co/spaces/unity/ML-Agents-Worm) demo where you teach a **worm to crawl**.
- [Walker](https://huggingface.co/spaces/unity/ML-Agents-Walker) demo where you teach an agent **to walk towards a goal**.
That’s all for today. Congrats on finishing this tutorial!
The best way to learn is to practice and try stuff. Why not try another environment? ML-Agents has 18 different environments, but you can also create your own. Check the documentation and have fun!
See you on Unit 6 🔥,
## Keep Learning, Stay awesome 🤗
| deep-rl-class/units/en/unit5/hands-on.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit5/hands-on.mdx",
"repo_id": "deep-rl-class",
"token_count": 5355
} | 76 |
# An introduction to Multi-Agents Reinforcement Learning (MARL)
## From single agent to multiple agents
In the first unit, we learned to train agents in a single-agent system. When our agent was alone in its environment: **it was not cooperating or collaborating with other agents**.
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/patchwork.jpg" alt="Patchwork"/>
<figcaption>
A patchwork of all the environments you've trained your agents on since the beginning of the course
</figcaption>
</figure>
When we do multi-agents reinforcement learning (MARL), we are in a situation where we have multiple agents **that share and interact in a common environment**.
For instance, you can think of a warehouse where **multiple robots need to navigate to load and unload packages**.
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/warehouse.jpg" alt="Warehouse"/>
<figcaption> [Image by upklyak](https://www.freepik.com/free-vector/robots-warehouse-interior-automated-machines_32117680.htm#query=warehouse robot&position=17&from_view=keyword) on Freepik </figcaption>
</figure>
Or a road with **several autonomous vehicles**.
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/selfdrivingcar.jpg" alt="Self driving cars"/>
<figcaption>
[Image by jcomp](https://www.freepik.com/free-vector/autonomous-smart-car-automatic-wireless-sensor-driving-road-around-car-autonomous-smart-car-goes-scans-roads-observe-distance-automatic-braking-system_26413332.htm#query=self driving cars highway&position=34&from_view=search&track=ais) on Freepik
</figcaption>
</figure>
In these examples, we have **multiple agents interacting in the environment and with the other agents**. This implies defining a multi-agents system. But first, let's understand the different types of multi-agent environments.
## Different types of multi-agent environments
Given that, in a multi-agent system, agents interact with other agents, we can have different types of environments:
- *Cooperative environments*: where your agents need **to maximize the common benefits**.
For instance, in a warehouse, **robots must collaborate to load and unload the packages efficiently (as fast as possible)**.
- *Competitive/Adversarial environments*: in this case, your agent **wants to maximize its benefits by minimizing the opponent's**.
For example, in a game of tennis, **each agent wants to beat the other agent**.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/tennis.png" alt="Tennis"/>
- *Mixed of both adversarial and cooperative*: like in our SoccerTwos environment, two agents are part of a team (blue or purple): they need to cooperate with each other and beat the opponent team.
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/>
<figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption>
</figure>
So now we might wonder: how can we design these multi-agent systems? Said differently, **how can we train agents in a multi-agent setting** ?
| deep-rl-class/units/en/unit7/introduction-to-marl.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit7/introduction-to-marl.mdx",
"repo_id": "deep-rl-class",
"token_count": 982
} | 77 |
# How Huggy works [[how-huggy-works]]
Huggy is a Deep Reinforcement Learning environment made by Hugging Face and based on [Puppo the Corgi, a project by the Unity MLAgents team](https://blog.unity.com/technology/puppo-the-corgi-cuteness-overload-with-the-unity-ml-agents-toolkit).
This environment was created using the [Unity game engine](https://unity.com/) and [MLAgents](https://github.com/Unity-Technologies/ml-agents). ML-Agents is a toolkit for the game engine from Unity that allows us to **create environments using Unity or use pre-made environments to train our agents**.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy.jpg" alt="Huggy" width="100%">
In this environment we aim to train Huggy to **fetch the stick we throw. This means he needs to move correctly toward the stick**.
## The State Space, what Huggy perceives. [[state-space]]
Huggy doesn't "see" his environment. Instead, we provide him information about the environment:
- The target (stick) position
- The relative position between himself and the target
- The orientation of his legs.
Given all this information, Huggy can **use his policy to determine which action to take next to fulfill his goal**.
## The Action Space, what moves Huggy can perform [[action-space]]
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy-action.jpg" alt="Huggy action" width="100%">
**Joint motors drive Huggy's legs**. This means that to get the target, Huggy needs to **learn to rotate the joint motors of each of his legs correctly so he can move**.
## The Reward Function [[reward-function]]
The reward function is designed so that **Huggy will fulfill his goal**: fetch the stick.
Remember that one of the foundations of Reinforcement Learning is the *reward hypothesis*: a goal can be described as the **maximization of the expected cumulative reward**.
Here, our goal is that Huggy **goes towards the stick but without spinning too much**. Hence, our reward function must translate this goal.
Our reward function:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/reward.jpg" alt="Huggy reward function" width="100%">
- *Orientation bonus*: we **reward him for getting close to the target**.
- *Time penalty*: a fixed-time penalty given at every action to **force him to get to the stick as fast as possible**.
- *Rotation penalty*: we penalize Huggy if **he spins too much and turns too quickly**.
- *Getting to the target reward*: we reward Huggy for **reaching the target**.
If you want to see what this reward function looks like mathematically, check [Puppo the Corgi presentation](https://blog.unity.com/technology/puppo-the-corgi-cuteness-overload-with-the-unity-ml-agents-toolkit).
## Train Huggy
Huggy aims **to learn to run correctly and as fast as possible toward the goal**. To do that, at every step and given the environment observation, he needs to decide how to rotate each joint motor of his legs to move correctly (not spinning too much) and towards the goal.
The training loop looks like this:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/huggy-loop.jpg" alt="Huggy loop" width="100%">
The training environment looks like this:
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/unit-bonus1/training-env.jpg" alt="Huggy training env" width="100%">
It's a place where a **stick is spawned randomly**. When Huggy reaches it, the stick get spawned somewhere else.
We built **multiple copies of the environment for the training**. This helps speed up the training by providing more diverse experiences.
Now that you have the big picture of the environment, you're ready to train Huggy to fetch the stick.
To do that, we're going to use [MLAgents](https://github.com/Unity-Technologies/ml-agents). Don't worry if you have never used it before. In this unit we'll use Google Colab to train Huggy, and then you'll be able to load your trained Huggy and play with him directly in the browser.
In a future unit, we will study MLAgents more in-depth and see how it works. But for now, we keep things simple by just using the provided implementation.
| deep-rl-class/units/en/unitbonus1/how-huggy-works.mdx/0 | {
"file_path": "deep-rl-class/units/en/unitbonus1/how-huggy-works.mdx",
"repo_id": "deep-rl-class",
"token_count": 1245
} | 78 |
# Offline vs. Online Reinforcement Learning
Deep Reinforcement Learning (RL) is a framework **to build decision-making agents**. These agents aim to learn optimal behavior (policy) by interacting with the environment through **trial and error and receiving rewards as unique feedback**.
The agent’s goal **is to maximize its cumulative reward**, called return. Because RL is based on the *reward hypothesis*: all goals can be described as the **maximization of the expected cumulative reward**.
Deep Reinforcement Learning agents **learn with batches of experience**. The question is, how do they collect it?:
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/offlinevsonlinerl.gif" alt="Unit bonus 3 thumbnail">
<figcaption>A comparison between Reinforcement Learning in an Online and Offline setting, figure taken from <a href="https://offline-rl.github.io/">this post</a></figcaption>
</figure>
- In *online reinforcement learning*, which is what we've learned during this course, the agent **gathers data directly**: it collects a batch of experience by **interacting with the environment**. Then, it uses this experience immediately (or via some replay buffer) to learn from it (update its policy).
But this implies that either you **train your agent directly in the real world or have a simulator**. If you don’t have one, you need to build it, which can be very complex (how to reflect the complex reality of the real world in an environment?), expensive, and insecure (if the simulator has flaws that may provide a competitive advantage, the agent will exploit them).
- On the other hand, in *offline reinforcement learning*, the agent only **uses data collected from other agents or human demonstrations**. It does **not interact with the environment**.
The process is as follows:
- **Create a dataset** using one or more policies and/or human interactions.
- Run **offline RL on this dataset** to learn a policy
This method has one drawback: the *counterfactual queries problem*. What do we do if our agent **decides to do something for which we don’t have the data?** For instance, turning right on an intersection but we don’t have this trajectory.
There exist some solutions on this topic, but if you want to know more about offline reinforcement learning, you can [watch this video](https://www.youtube.com/watch?v=k08N5a0gG0A)
## Further reading
For more information, we recommend you check out the following resources:
- [Offline Reinforcement Learning, Talk by Sergei Levine](https://www.youtube.com/watch?v=qgZPZREor5I)
- [Offline Reinforcement Learning: Tutorial, Review, and Perspectives on Open Problems](https://arxiv.org/abs/2005.01643)
## Author
This section was written by <a href="https://twitter.com/ThomasSimonini"> Thomas Simonini</a>
| deep-rl-class/units/en/unitbonus3/offline-online.mdx/0 | {
"file_path": "deep-rl-class/units/en/unitbonus3/offline-online.mdx",
"repo_id": "deep-rl-class",
"token_count": 708
} | 79 |
# Files for typos
# Instruction: https://github.com/marketplace/actions/typos-action#getting-started
[default.extend-identifiers]
[default.extend-words]
NIN="NIN" # NIN is used in scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py
nd="np" # nd may be np (numpy)
parms="parms" # parms is used in scripts/convert_original_stable_diffusion_to_diffusers.py
[files]
extend-exclude = ["_typos.toml"]
| diffusers/_typos.toml/0 | {
"file_path": "diffusers/_typos.toml",
"repo_id": "diffusers",
"token_count": 151
} | 80 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Textual Inversion
Textual Inversion is a training method for personalizing models by learning new text embeddings from a few example images. The file produced from training is extremely small (a few KBs) and the new embeddings can be loaded into the text encoder.
[`TextualInversionLoaderMixin`] provides a function for loading Textual Inversion embeddings from Diffusers and Automatic1111 into the text encoder and loading a special token to activate the embeddings.
<Tip>
To learn more about how to load Textual Inversion embeddings, see the [Textual Inversion](../../using-diffusers/loading_adapters#textual-inversion) loading guide.
</Tip>
## TextualInversionLoaderMixin
[[autodoc]] loaders.textual_inversion.TextualInversionLoaderMixin | diffusers/docs/source/en/api/loaders/textual_inversion.md/0 | {
"file_path": "diffusers/docs/source/en/api/loaders/textual_inversion.md",
"repo_id": "diffusers",
"token_count": 341
} | 81 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# DDIM
[Denoising Diffusion Implicit Models](https://huggingface.co/papers/2010.02502) (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon.
The abstract from the paper is:
*Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples 10× to 50× faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.*
The original codebase can be found at [ermongroup/ddim](https://github.com/ermongroup/ddim).
## DDIMPipeline
[[autodoc]] DDIMPipeline
- all
- __call__
## ImagePipelineOutput
[[autodoc]] pipelines.ImagePipelineOutput
| diffusers/docs/source/en/api/pipelines/ddim.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/ddim.md",
"repo_id": "diffusers",
"token_count": 478
} | 82 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# InstructPix2Pix
[InstructPix2Pix: Learning to Follow Image Editing Instructions](https://huggingface.co/papers/2211.09800) is by Tim Brooks, Aleksander Holynski and Alexei A. Efros.
The abstract from the paper is:
*We propose a method for editing images from human instructions: given an input image and a written instruction that tells the model what to do, our model follows these instructions to edit the image. To obtain training data for this problem, we combine the knowledge of two large pretrained models -- a language model (GPT-3) and a text-to-image model (Stable Diffusion) -- to generate a large dataset of image editing examples. Our conditional diffusion model, InstructPix2Pix, is trained on our generated data, and generalizes to real images and user-written instructions at inference time. Since it performs edits in the forward pass and does not require per example fine-tuning or inversion, our model edits images quickly, in a matter of seconds. We show compelling editing results for a diverse collection of input images and written instructions.*
You can find additional information about InstructPix2Pix on the [project page](https://www.timothybrooks.com/instruct-pix2pix), [original codebase](https://github.com/timothybrooks/instruct-pix2pix), and try it out in a [demo](https://huggingface.co/spaces/timbrooks/instruct-pix2pix).
<Tip>
Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
</Tip>
## StableDiffusionInstructPix2PixPipeline
[[autodoc]] StableDiffusionInstructPix2PixPipeline
- __call__
- all
- load_textual_inversion
- load_lora_weights
- save_lora_weights
## StableDiffusionXLInstructPix2PixPipeline
[[autodoc]] StableDiffusionXLInstructPix2PixPipeline
- __call__
- all
| diffusers/docs/source/en/api/pipelines/pix2pix.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/pix2pix.md",
"repo_id": "diffusers",
"token_count": 708
} | 83 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Stable Diffusion 2
Stable Diffusion 2 is a text-to-image _latent diffusion_ model built upon the work of the original [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release), and it was led by Robin Rombach and Katherine Crowson from [Stability AI](https://stability.ai/) and [LAION](https://laion.ai/).
*The Stable Diffusion 2.0 release includes robust text-to-image models trained using a brand new text encoder (OpenCLIP), developed by LAION with support from Stability AI, which greatly improves the quality of the generated images compared to earlier V1 releases. The text-to-image models in this release can generate images with default resolutions of both 512x512 pixels and 768x768 pixels.
These models are trained on an aesthetic subset of the [LAION-5B dataset](https://laion.ai/blog/laion-5b/) created by the DeepFloyd team at Stability AI, which is then further filtered to remove adult content using [LAION’s NSFW filter](https://openreview.net/forum?id=M3Y74vmsMcY).*
For more details about how Stable Diffusion 2 works and how it differs from the original Stable Diffusion, please refer to the official [announcement post](https://stability.ai/blog/stable-diffusion-v2-release).
The architecture of Stable Diffusion 2 is more or less identical to the original [Stable Diffusion model](./text2img) so check out it's API documentation for how to use Stable Diffusion 2. We recommend using the [`DPMSolverMultistepScheduler`] as it gives a reasonable speed/quality trade-off and can be run with as little as 20 steps.
Stable Diffusion 2 is available for tasks like text-to-image, inpainting, super-resolution, and depth-to-image:
| Task | Repository |
|-------------------------|---------------------------------------------------------------------------------------------------------------|
| text-to-image (512x512) | [stabilityai/stable-diffusion-2-base](https://huggingface.co/stabilityai/stable-diffusion-2-base) |
| text-to-image (768x768) | [stabilityai/stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) |
| inpainting | [stabilityai/stable-diffusion-2-inpainting](https://huggingface.co/stabilityai/stable-diffusion-2-inpainting) |
| super-resolution | [stable-diffusion-x4-upscaler](https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler) |
| depth-to-image | [stabilityai/stable-diffusion-2-depth](https://huggingface.co/stabilityai/stable-diffusion-2-depth) |
Here are some examples for how to use Stable Diffusion 2 for each task:
<Tip>
Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!
If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations!
</Tip>
## Text-to-image
```py
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
import torch
repo_id = "stabilityai/stable-diffusion-2-base"
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to("cuda")
prompt = "High quality photo of an astronaut riding a horse in space"
image = pipe(prompt, num_inference_steps=25).images[0]
image
```
## Inpainting
```py
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import load_image, make_image_grid
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = load_image(img_url).resize((512, 512))
mask_image = load_image(mask_url).resize((512, 512))
repo_id = "stabilityai/stable-diffusion-2-inpainting"
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to("cuda")
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=25).images[0]
make_image_grid([init_image, mask_image, image], rows=1, cols=3)
```
## Super-resolution
```py
from diffusers import StableDiffusionUpscalePipeline
from diffusers.utils import load_image, make_image_grid
import torch
# load model and scheduler
model_id = "stabilityai/stable-diffusion-x4-upscaler"
pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipeline = pipeline.to("cuda")
# let's download an image
url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale/low_res_cat.png"
low_res_img = load_image(url)
low_res_img = low_res_img.resize((128, 128))
prompt = "a white cat"
upscaled_image = pipeline(prompt=prompt, image=low_res_img).images[0]
make_image_grid([low_res_img.resize((512, 512)), upscaled_image.resize((512, 512))], rows=1, cols=2)
```
## Depth-to-image
```py
import torch
from diffusers import StableDiffusionDepth2ImgPipeline
from diffusers.utils import load_image, make_image_grid
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth",
torch_dtype=torch.float16,
).to("cuda")
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
init_image = load_image(url)
prompt = "two tigers"
negative_prompt = "bad, deformed, ugly, bad anotomy"
image = pipe(prompt=prompt, image=init_image, negative_prompt=negative_prompt, strength=0.7).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
| diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_2.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_2.md",
"repo_id": "diffusers",
"token_count": 2284
} | 84 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# DDPMScheduler
[Denoising Diffusion Probabilistic Models](https://huggingface.co/papers/2006.11239) (DDPM) by Jonathan Ho, Ajay Jain and Pieter Abbeel proposes a diffusion based model of the same name. In the context of the 🤗 Diffusers library, DDPM refers to the discrete denoising scheduler from the paper as well as the pipeline.
The abstract from the paper is:
*We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at [this https URL](https://github.com/hojonathanho/diffusion).*
## DDPMScheduler
[[autodoc]] DDPMScheduler
## DDPMSchedulerOutput
[[autodoc]] schedulers.scheduling_ddpm.DDPMSchedulerOutput
| diffusers/docs/source/en/api/schedulers/ddpm.md/0 | {
"file_path": "diffusers/docs/source/en/api/schedulers/ddpm.md",
"repo_id": "diffusers",
"token_count": 471
} | 85 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# DeepCache
[DeepCache](https://huggingface.co/papers/2312.00858) accelerates [`StableDiffusionPipeline`] and [`StableDiffusionXLPipeline`] by strategically caching and reusing high-level features while efficiently updating low-level features by taking advantage of the U-Net architecture.
Start by installing [DeepCache](https://github.com/horseee/DeepCache):
```bash
pip install DeepCache
```
Then load and enable the [`DeepCacheSDHelper`](https://github.com/horseee/DeepCache#usage):
```diff
import torch
from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5', torch_dtype=torch.float16).to("cuda")
+ from DeepCache import DeepCacheSDHelper
+ helper = DeepCacheSDHelper(pipe=pipe)
+ helper.set_params(
+ cache_interval=3,
+ cache_branch_id=0,
+ )
+ helper.enable()
image = pipe("a photo of an astronaut on a moon").images[0]
```
The `set_params` method accepts two arguments: `cache_interval` and `cache_branch_id`. `cache_interval` means the frequency of feature caching, specified as the number of steps between each cache operation. `cache_branch_id` identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes.
Opting for a lower `cache_branch_id` or a larger `cache_interval` can lead to faster inference speed at the expense of reduced image quality (ablation experiments of these two hyperparameters can be found in the [paper](https://arxiv.org/abs/2312.00858)). Once those arguments are set, use the `enable` or `disable` methods to activate or deactivate the `DeepCacheSDHelper`.
<div class="flex justify-center">
<img src="https://github.com/horseee/Diffusion_DeepCache/raw/master/static/images/example.png">
</div>
You can find more generated samples (original pipeline vs DeepCache) and the corresponding inference latency in the [WandB report](https://wandb.ai/horseee/DeepCache/runs/jwlsqqgt?workspace=user-horseee). The prompts are randomly selected from the [MS-COCO 2017](https://cocodataset.org/#home) dataset.
## Benchmark
We tested how much faster DeepCache accelerates [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) with 50 inference steps on an NVIDIA RTX A5000, using different configurations for resolution, batch size, cache interval (I), and cache branch (B).
| **Resolution** | **Batch size** | **Original** | **DeepCache(I=3, B=0)** | **DeepCache(I=5, B=0)** | **DeepCache(I=5, B=1)** |
|----------------|----------------|--------------|-------------------------|-------------------------|-------------------------|
| 512| 8| 15.96| 6.88(2.32x)| 5.03(3.18x)| 7.27(2.20x)|
| | 4| 8.39| 3.60(2.33x)| 2.62(3.21x)| 3.75(2.24x)|
| | 1| 2.61| 1.12(2.33x)| 0.81(3.24x)| 1.11(2.35x)|
| 768| 8| 43.58| 18.99(2.29x)| 13.96(3.12x)| 21.27(2.05x)|
| | 4| 22.24| 9.67(2.30x)| 7.10(3.13x)| 10.74(2.07x)|
| | 1| 6.33| 2.72(2.33x)| 1.97(3.21x)| 2.98(2.12x)|
| 1024| 8| 101.95| 45.57(2.24x)| 33.72(3.02x)| 53.00(1.92x)|
| | 4| 49.25| 21.86(2.25x)| 16.19(3.04x)| 25.78(1.91x)|
| | 1| 13.83| 6.07(2.28x)| 4.43(3.12x)| 7.15(1.93x)|
| diffusers/docs/source/en/optimization/deepcache.md/0 | {
"file_path": "diffusers/docs/source/en/optimization/deepcache.md",
"repo_id": "diffusers",
"token_count": 1913
} | 86 |
<!--Copyright 2023 Custom Diffusion authors The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Custom Diffusion
[Custom Diffusion](https://huggingface.co/papers/2212.04488) is a training technique for personalizing image generation models. Like Textual Inversion, DreamBooth, and LoRA, Custom Diffusion only requires a few (~4-5) example images. This technique works by only training weights in the cross-attention layers, and it uses a special word to represent the newly learned concept. Custom Diffusion is unique because it can also learn multiple concepts at the same time.
If you're training on a GPU with limited vRAM, you should try enabling xFormers with `--enable_xformers_memory_efficient_attention` for faster training with lower vRAM requirements (16GB). To save even more memory, add `--set_grads_to_none` in the training argument to set the gradients to `None` instead of zero (this option can cause some issues, so if you experience any, try removing this parameter).
This guide will explore the [train_custom_diffusion.py](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion/train_custom_diffusion.py) script to help you become more familiar with it, and how you can adapt it for your own use-case.
Before running the script, make sure you install the library from source:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Navigate to the example folder with the training script and install the required dependencies:
```bash
cd examples/custom_diffusion
pip install -r requirements.txt
pip install clip-retrieval
```
<Tip>
🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more.
</Tip>
Initialize an 🤗 Accelerate environment:
```bash
accelerate config
```
To setup a default 🤗 Accelerate environment without choosing any configurations:
```bash
accelerate config default
```
Or if your environment doesn't support an interactive shell, like a notebook, you can use:
```bash
from accelerate.utils import write_basic_config
write_basic_config()
```
Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script.
<Tip>
The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion/train_custom_diffusion.py) and let us know if you have any questions or concerns.
</Tip>
## Script parameters
The training script contains all the parameters to help you customize your training run. These are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L319) function. The function comes with default values, but you can also set your own values in the training command if you'd like.
For example, to change the resolution of the input image:
```bash
accelerate launch train_custom_diffusion.py \
--resolution=256
```
Many of the basic parameters are described in the [DreamBooth](dreambooth#script-parameters) training guide, so this guide focuses on the parameters unique to Custom Diffusion:
- `--freeze_model`: freezes the key and value parameters in the cross-attention layer; the default is `crossattn_kv`, but you can set it to `crossattn` to train all the parameters in the cross-attention layer
- `--concepts_list`: to learn multiple concepts, provide a path to a JSON file containing the concepts
- `--modifier_token`: a special word used to represent the learned concept
- `--initializer_token`:
### Prior preservation loss
Prior preservation loss is a method that uses a model's own generated samples to help it learn how to generate more diverse images. Because these generated sample images belong to the same class as the images you provided, they help the model retain what it has learned about the class and how it can use what it already knows about the class to make new compositions.
Many of the parameters for prior preservation loss are described in the [DreamBooth](dreambooth#prior-preservation-loss) training guide.
### Regularization
Custom Diffusion includes training the target images with a small set of real images to prevent overfitting. As you can imagine, this can be easy to do when you're only training on a few images! Download 200 real images with `clip_retrieval`. The `class_prompt` should be the same category as the target images. These images are stored in `class_data_dir`.
```bash
python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200
```
To enable regularization, add the following parameters:
- `--with_prior_preservation`: whether to use prior preservation loss
- `--prior_loss_weight`: controls the influence of the prior preservation loss on the model
- `--real_prior`: whether to use a small set of real images to prevent overfitting
```bash
accelerate launch train_custom_diffusion.py \
--with_prior_preservation \
--prior_loss_weight=1.0 \
--class_data_dir="./real_reg/samples_cat" \
--class_prompt="cat" \
--real_prior=True \
```
## Training script
<Tip>
A lot of the code in the Custom Diffusion training script is similar to the [DreamBooth](dreambooth#training-script) script. This guide instead focuses on the code that is relevant to Custom Diffusion.
</Tip>
The Custom Diffusion training script has two dataset classes:
- [`CustomDiffusionDataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L165): preprocesses the images, class images, and prompts for training
- [`PromptDataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L148): prepares the prompts for generating class images
Next, the `modifier_token` is [added to the tokenizer](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L811), converted to token ids, and the token embeddings are resized to account for the new `modifier_token`. Then the `modifier_token` embeddings are initialized with the embeddings of the `initializer_token`. All parameters in the text encoder are frozen, except for the token embeddings since this is what the model is trying to learn to associate with the concepts.
```py
params_to_freeze = itertools.chain(
text_encoder.text_model.encoder.parameters(),
text_encoder.text_model.final_layer_norm.parameters(),
text_encoder.text_model.embeddings.position_embedding.parameters(),
)
freeze_params(params_to_freeze)
```
Now you'll need to add the [Custom Diffusion weights](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L911C3-L911C3) to the attention layers. This is a really important step for getting the shape and size of the attention weights correct, and for setting the appropriate number of attention processors in each UNet block.
```py
st = unet.state_dict()
for name, _ in unet.attn_processors.items():
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
if name.startswith("mid_block"):
hidden_size = unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = unet.config.block_out_channels[block_id]
layer_name = name.split(".processor")[0]
weights = {
"to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"],
"to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"],
}
if train_q_out:
weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"]
weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"]
weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"]
if cross_attention_dim is not None:
custom_diffusion_attn_procs[name] = attention_class(
train_kv=train_kv,
train_q_out=train_q_out,
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
).to(unet.device)
custom_diffusion_attn_procs[name].load_state_dict(weights)
else:
custom_diffusion_attn_procs[name] = attention_class(
train_kv=False,
train_q_out=False,
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
)
del st
unet.set_attn_processor(custom_diffusion_attn_procs)
custom_diffusion_layers = AttnProcsLayers(unet.attn_processors)
```
The [optimizer](https://github.com/huggingface/diffusers/blob/84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2/examples/custom_diffusion/train_custom_diffusion.py#L982) is initialized to update the cross-attention layer parameters:
```py
optimizer = optimizer_class(
itertools.chain(text_encoder.get_input_embeddings().parameters(), custom_diffusion_layers.parameters())
if args.modifier_token is not None
else custom_diffusion_layers.parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
```
In the [training loop](https://github.com/huggingface/diffusers/blob/84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2/examples/custom_diffusion/train_custom_diffusion.py#L1048), it is important to only update the embeddings for the concept you're trying to learn. This means setting the gradients of all the other token embeddings to zero:
```py
if args.modifier_token is not None:
if accelerator.num_processes > 1:
grads_text_encoder = text_encoder.module.get_input_embeddings().weight.grad
else:
grads_text_encoder = text_encoder.get_input_embeddings().weight.grad
index_grads_to_zero = torch.arange(len(tokenizer)) != modifier_token_id[0]
for i in range(len(modifier_token_id[1:])):
index_grads_to_zero = index_grads_to_zero & (
torch.arange(len(tokenizer)) != modifier_token_id[i]
)
grads_text_encoder.data[index_grads_to_zero, :] = grads_text_encoder.data[
index_grads_to_zero, :
].fill_(0)
```
## Launch the script
Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀
In this guide, you'll download and use these example [cat images](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip). You can also create and use your own dataset if you want (see the [Create a dataset for training](create_dataset) guide).
Set the environment variable `MODEL_NAME` to a model id on the Hub or a path to a local model, `INSTANCE_DIR` to the path where you just downloaded the cat images to, and `OUTPUT_DIR` to where you want to save the model. You'll use `<new1>` as the special word to tie the newly learned embeddings to. The script creates and saves model checkpoints and a pytorch_custom_diffusion_weights.bin file to your repository.
To monitor training progress with Weights and Biases, add the `--report_to=wandb` parameter to the training command and specify a validation prompt with `--validation_prompt`. This is useful for debugging and saving intermediate results.
<Tip>
If you're training on human faces, the Custom Diffusion team has found the following parameters to work well:
- `--learning_rate=5e-6`
- `--max_train_steps` can be anywhere between 1000 and 2000
- `--freeze_model=crossattn`
- use at least 15-20 images to train with
</Tip>
<hfoptions id="training-inference">
<hfoption id="single concept">
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export OUTPUT_DIR="path-to-save-model"
export INSTANCE_DIR="./data/cat"
accelerate launch train_custom_diffusion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--class_data_dir=./real_reg/samples_cat/ \
--with_prior_preservation \
--real_prior \
--prior_loss_weight=1.0 \
--class_prompt="cat" \
--num_class_images=200 \
--instance_prompt="photo of a <new1> cat" \
--resolution=512 \
--train_batch_size=2 \
--learning_rate=1e-5 \
--lr_warmup_steps=0 \
--max_train_steps=250 \
--scale_lr \
--hflip \
--modifier_token "<new1>" \
--validation_prompt="<new1> cat sitting in a bucket" \
--report_to="wandb" \
--push_to_hub
```
</hfoption>
<hfoption id="multiple concepts">
Custom Diffusion can also learn multiple concepts if you provide a [JSON](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with some details about each concept it should learn.
Run clip-retrieval to collect some real images to use for regularization:
```bash
pip install clip-retrieval
python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200
```
Then you can launch the script:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export OUTPUT_DIR="path-to-save-model"
accelerate launch train_custom_diffusion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--output_dir=$OUTPUT_DIR \
--concepts_list=./concept_list.json \
--with_prior_preservation \
--real_prior \
--prior_loss_weight=1.0 \
--resolution=512 \
--train_batch_size=2 \
--learning_rate=1e-5 \
--lr_warmup_steps=0 \
--max_train_steps=500 \
--num_class_images=200 \
--scale_lr \
--hflip \
--modifier_token "<new1>+<new2>" \
--push_to_hub
```
</hfoption>
</hfoptions>
Once training is finished, you can use your new Custom Diffusion model for inference.
<hfoptions id="training-inference">
<hfoption id="single concept">
```py
import torch
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16,
).to("cuda")
pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
pipeline.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin")
image = pipeline(
"<new1> cat sitting in a bucket",
num_inference_steps=100,
guidance_scale=6.0,
eta=1.0,
).images[0]
image.save("cat.png")
```
</hfoption>
<hfoption id="multiple concepts">
```py
import torch
from huggingface_hub.repocard import RepoCard
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/custom-diffusion-cat-wooden-pot", torch_dtype=torch.float16).to("cuda")
pipeline.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
pipeline.load_textual_inversion(model_id, weight_name="<new1>.bin")
pipeline.load_textual_inversion(model_id, weight_name="<new2>.bin")
image = pipeline(
"the <new1> cat sculpture in the style of a <new2> wooden pot",
num_inference_steps=100,
guidance_scale=6.0,
eta=1.0,
).images[0]
image.save("multi-subject.png")
```
</hfoption>
</hfoptions>
## Next steps
Congratulations on training a model with Custom Diffusion! 🎉 To learn more:
- Read the [Multi-Concept Customization of Text-to-Image Diffusion](https://www.cs.cmu.edu/~custom-diffusion/) blog post to learn more details about the experimental results from the Custom Diffusion team. | diffusers/docs/source/en/training/custom_diffusion.md/0 | {
"file_path": "diffusers/docs/source/en/training/custom_diffusion.md",
"repo_id": "diffusers",
"token_count": 5471
} | 87 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
[[open-in-colab]]
# Train a diffusion model
Unconditional image generation is a popular application of diffusion models that generates images that look like those in the dataset used for training. Typically, the best results are obtained from finetuning a pretrained model on a specific dataset. You can find many of these checkpoints on the [Hub](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model), but if you can't find one you like, you can always train your own!
This tutorial will teach you how to train a [`UNet2DModel`] from scratch on a subset of the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset to generate your own 🦋 butterflies 🦋.
<Tip>
💡 This training tutorial is based on the [Training with 🧨 Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) notebook. For additional details and context about diffusion models like how they work, check out the notebook!
</Tip>
Before you begin, make sure you have 🤗 Datasets installed to load and preprocess image datasets, and 🤗 Accelerate, to simplify training on any number of GPUs. The following command will also install [TensorBoard](https://www.tensorflow.org/tensorboard) to visualize training metrics (you can also use [Weights & Biases](https://docs.wandb.ai/) to track your training).
```py
# uncomment to install the necessary libraries in Colab
#!pip install diffusers[training]
```
We encourage you to share your model with the community, and in order to do that, you'll need to login to your Hugging Face account (create one [here](https://hf.co/join) if you don't already have one!). You can login from a notebook and enter your token when prompted. Make sure your token has the write role.
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
Or login in from the terminal:
```bash
huggingface-cli login
```
Since the model checkpoints are quite large, install [Git-LFS](https://git-lfs.com/) to version these large files:
```bash
!sudo apt -qq install git-lfs
!git config --global credential.helper store
```
## Training configuration
For convenience, create a `TrainingConfig` class containing the training hyperparameters (feel free to adjust them):
```py
>>> from dataclasses import dataclass
>>> @dataclass
... class TrainingConfig:
... image_size = 128 # the generated image resolution
... train_batch_size = 16
... eval_batch_size = 16 # how many images to sample during evaluation
... num_epochs = 50
... gradient_accumulation_steps = 1
... learning_rate = 1e-4
... lr_warmup_steps = 500
... save_image_epochs = 10
... save_model_epochs = 30
... mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision
... output_dir = "ddpm-butterflies-128" # the model name locally and on the HF Hub
... push_to_hub = True # whether to upload the saved model to the HF Hub
... hub_model_id = "<your-username>/<my-awesome-model>" # the name of the repository to create on the HF Hub
... hub_private_repo = False
... overwrite_output_dir = True # overwrite the old model when re-running the notebook
... seed = 0
>>> config = TrainingConfig()
```
## Load the dataset
You can easily load the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset with the 🤗 Datasets library:
```py
>>> from datasets import load_dataset
>>> config.dataset_name = "huggan/smithsonian_butterflies_subset"
>>> dataset = load_dataset(config.dataset_name, split="train")
```
<Tip>
💡 You can find additional datasets from the [HugGan Community Event](https://huggingface.co/huggan) or you can use your own dataset by creating a local [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder). Set `config.dataset_name` to the repository id of the dataset if it is from the HugGan Community Event, or `imagefolder` if you're using your own images.
</Tip>
🤗 Datasets uses the [`~datasets.Image`] feature to automatically decode the image data and load it as a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html) which we can visualize:
```py
>>> import matplotlib.pyplot as plt
>>> fig, axs = plt.subplots(1, 4, figsize=(16, 4))
>>> for i, image in enumerate(dataset[:4]["image"]):
... axs[i].imshow(image)
... axs[i].set_axis_off()
>>> fig.show()
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_ds.png"/>
</div>
The images are all different sizes though, so you'll need to preprocess them first:
* `Resize` changes the image size to the one defined in `config.image_size`.
* `RandomHorizontalFlip` augments the dataset by randomly mirroring the images.
* `Normalize` is important to rescale the pixel values into a [-1, 1] range, which is what the model expects.
```py
>>> from torchvision import transforms
>>> preprocess = transforms.Compose(
... [
... transforms.Resize((config.image_size, config.image_size)),
... transforms.RandomHorizontalFlip(),
... transforms.ToTensor(),
... transforms.Normalize([0.5], [0.5]),
... ]
... )
```
Use 🤗 Datasets' [`~datasets.Dataset.set_transform`] method to apply the `preprocess` function on the fly during training:
```py
>>> def transform(examples):
... images = [preprocess(image.convert("RGB")) for image in examples["image"]]
... return {"images": images}
>>> dataset.set_transform(transform)
```
Feel free to visualize the images again to confirm that they've been resized. Now you're ready to wrap the dataset in a [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader) for training!
```py
>>> import torch
>>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True)
```
## Create a UNet2DModel
Pretrained models in 🧨 Diffusers are easily created from their model class with the parameters you want. For example, to create a [`UNet2DModel`]:
```py
>>> from diffusers import UNet2DModel
>>> model = UNet2DModel(
... sample_size=config.image_size, # the target image resolution
... in_channels=3, # the number of input channels, 3 for RGB images
... out_channels=3, # the number of output channels
... layers_per_block=2, # how many ResNet layers to use per UNet block
... block_out_channels=(128, 128, 256, 256, 512, 512), # the number of output channels for each UNet block
... down_block_types=(
... "DownBlock2D", # a regular ResNet downsampling block
... "DownBlock2D",
... "DownBlock2D",
... "DownBlock2D",
... "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention
... "DownBlock2D",
... ),
... up_block_types=(
... "UpBlock2D", # a regular ResNet upsampling block
... "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention
... "UpBlock2D",
... "UpBlock2D",
... "UpBlock2D",
... "UpBlock2D",
... ),
... )
```
It is often a good idea to quickly check the sample image shape matches the model output shape:
```py
>>> sample_image = dataset[0]["images"].unsqueeze(0)
>>> print("Input shape:", sample_image.shape)
Input shape: torch.Size([1, 3, 128, 128])
>>> print("Output shape:", model(sample_image, timestep=0).sample.shape)
Output shape: torch.Size([1, 3, 128, 128])
```
Great! Next, you'll need a scheduler to add some noise to the image.
## Create a scheduler
The scheduler behaves differently depending on whether you're using the model for training or inference. During inference, the scheduler generates image from the noise. During training, the scheduler takes a model output - or a sample - from a specific point in the diffusion process and applies noise to the image according to a *noise schedule* and an *update rule*.
Let's take a look at the [`DDPMScheduler`] and use the `add_noise` method to add some random noise to the `sample_image` from before:
```py
>>> import torch
>>> from PIL import Image
>>> from diffusers import DDPMScheduler
>>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000)
>>> noise = torch.randn(sample_image.shape)
>>> timesteps = torch.LongTensor([50])
>>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps)
>>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0])
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/noisy_butterfly.png"/>
</div>
The training objective of the model is to predict the noise added to the image. The loss at this step can be calculated by:
```py
>>> import torch.nn.functional as F
>>> noise_pred = model(noisy_image, timesteps).sample
>>> loss = F.mse_loss(noise_pred, noise)
```
## Train the model
By now, you have most of the pieces to start training the model and all that's left is putting everything together.
First, you'll need an optimizer and a learning rate scheduler:
```py
>>> from diffusers.optimization import get_cosine_schedule_with_warmup
>>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)
>>> lr_scheduler = get_cosine_schedule_with_warmup(
... optimizer=optimizer,
... num_warmup_steps=config.lr_warmup_steps,
... num_training_steps=(len(train_dataloader) * config.num_epochs),
... )
```
Then, you'll need a way to evaluate the model. For evaluation, you can use the [`DDPMPipeline`] to generate a batch of sample images and save it as a grid:
```py
>>> from diffusers import DDPMPipeline
>>> from diffusers.utils import make_image_grid
>>> import os
>>> def evaluate(config, epoch, pipeline):
... # Sample some images from random noise (this is the backward diffusion process).
... # The default pipeline output type is `List[PIL.Image]`
... images = pipeline(
... batch_size=config.eval_batch_size,
... generator=torch.manual_seed(config.seed),
... ).images
... # Make a grid out of the images
... image_grid = make_image_grid(images, rows=4, cols=4)
... # Save the images
... test_dir = os.path.join(config.output_dir, "samples")
... os.makedirs(test_dir, exist_ok=True)
... image_grid.save(f"{test_dir}/{epoch:04d}.png")
```
Now you can wrap all these components together in a training loop with 🤗 Accelerate for easy TensorBoard logging, gradient accumulation, and mixed precision training. To upload the model to the Hub, write a function to get your repository name and information and then push it to the Hub.
<Tip>
💡 The training loop below may look intimidating and long, but it'll be worth it later when you launch your training in just one line of code! If you can't wait and want to start generating images, feel free to copy and run the code below. You can always come back and examine the training loop more closely later, like when you're waiting for your model to finish training. 🤗
</Tip>
```py
>>> from accelerate import Accelerator
>>> from huggingface_hub import create_repo, upload_folder
>>> from tqdm.auto import tqdm
>>> from pathlib import Path
>>> import os
>>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler):
... # Initialize accelerator and tensorboard logging
... accelerator = Accelerator(
... mixed_precision=config.mixed_precision,
... gradient_accumulation_steps=config.gradient_accumulation_steps,
... log_with="tensorboard",
... project_dir=os.path.join(config.output_dir, "logs"),
... )
... if accelerator.is_main_process:
... if config.output_dir is not None:
... os.makedirs(config.output_dir, exist_ok=True)
... if config.push_to_hub:
... repo_id = create_repo(
... repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True
... ).repo_id
... accelerator.init_trackers("train_example")
... # Prepare everything
... # There is no specific order to remember, you just need to unpack the
... # objects in the same order you gave them to the prepare method.
... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
... model, optimizer, train_dataloader, lr_scheduler
... )
... global_step = 0
... # Now you train the model
... for epoch in range(config.num_epochs):
... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process)
... progress_bar.set_description(f"Epoch {epoch}")
... for step, batch in enumerate(train_dataloader):
... clean_images = batch["images"]
... # Sample noise to add to the images
... noise = torch.randn(clean_images.shape, device=clean_images.device)
... bs = clean_images.shape[0]
... # Sample a random timestep for each image
... timesteps = torch.randint(
... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device,
... dtype=torch.int64
... )
... # Add noise to the clean images according to the noise magnitude at each timestep
... # (this is the forward diffusion process)
... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps)
... with accelerator.accumulate(model):
... # Predict the noise residual
... noise_pred = model(noisy_images, timesteps, return_dict=False)[0]
... loss = F.mse_loss(noise_pred, noise)
... accelerator.backward(loss)
... accelerator.clip_grad_norm_(model.parameters(), 1.0)
... optimizer.step()
... lr_scheduler.step()
... optimizer.zero_grad()
... progress_bar.update(1)
... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step}
... progress_bar.set_postfix(**logs)
... accelerator.log(logs, step=global_step)
... global_step += 1
... # After each epoch you optionally sample some demo images with evaluate() and save the model
... if accelerator.is_main_process:
... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler)
... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1:
... evaluate(config, epoch, pipeline)
... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1:
... if config.push_to_hub:
... upload_folder(
... repo_id=repo_id,
... folder_path=config.output_dir,
... commit_message=f"Epoch {epoch}",
... ignore_patterns=["step_*", "epoch_*"],
... )
... else:
... pipeline.save_pretrained(config.output_dir)
```
Phew, that was quite a bit of code! But you're finally ready to launch the training with 🤗 Accelerate's [`~accelerate.notebook_launcher`] function. Pass the function the training loop, all the training arguments, and the number of processes (you can change this value to the number of GPUs available to you) to use for training:
```py
>>> from accelerate import notebook_launcher
>>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler)
>>> notebook_launcher(train_loop, args, num_processes=1)
```
Once training is complete, take a look at the final 🦋 images 🦋 generated by your diffusion model!
```py
>>> import glob
>>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png"))
>>> Image.open(sample_images[-1])
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_final.png"/>
</div>
## Next steps
Unconditional image generation is one example of a task that can be trained. You can explore other tasks and training techniques by visiting the [🧨 Diffusers Training Examples](../training/overview) page. Here are some examples of what you can learn:
* [Textual Inversion](../training/text_inversion), an algorithm that teaches a model a specific visual concept and integrates it into the generated image.
* [DreamBooth](../training/dreambooth), a technique for generating personalized images of a subject given several input images of the subject.
* [Guide](../training/text2image) to finetuning a Stable Diffusion model on your own dataset.
* [Guide](../training/lora) to using LoRA, a memory-efficient technique for finetuning really large models faster.
| diffusers/docs/source/en/tutorials/basic_training.md/0 | {
"file_path": "diffusers/docs/source/en/tutorials/basic_training.md",
"repo_id": "diffusers",
"token_count": 6187
} | 88 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Image-to-image
[[open-in-colab]]
Image-to-image is similar to [text-to-image](conditional_image_generation), but in addition to a prompt, you can also pass an initial image as a starting point for the diffusion process. The initial image is encoded to latent space and noise is added to it. Then the latent diffusion model takes a prompt and the noisy latent image, predicts the added noise, and removes the predicted noise from the initial latent image to get the new latent image. Lastly, a decoder decodes the new latent image back into an image.
With 🤗 Diffusers, this is as easy as 1-2-3:
1. Load a checkpoint into the [`AutoPipelineForImage2Image`] class; this pipeline automatically handles loading the correct pipeline class based on the checkpoint:
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import load_image, make_image_grid
pipeline = AutoPipelineForImage2Image.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
```
<Tip>
You'll notice throughout the guide, we use [`~DiffusionPipeline.enable_model_cpu_offload`] and [`~DiffusionPipeline.enable_xformers_memory_efficient_attention`], to save memory and increase inference speed. If you're using PyTorch 2.0, then you don't need to call [`~DiffusionPipeline.enable_xformers_memory_efficient_attention`] on your pipeline because it'll already be using PyTorch 2.0's native [scaled-dot product attention](../optimization/torch2.0#scaled-dot-product-attention).
</Tip>
2. Load an image to pass to the pipeline:
```py
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
```
3. Pass a prompt and image to the pipeline to generate an image:
```py
prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k"
image = pipeline(prompt, image=init_image).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
<div class="flex gap-4">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
## Popular models
The most popular image-to-image models are [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [Stable Diffusion XL (SDXL)](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and [Kandinsky 2.2](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder). The results from the Stable Diffusion and Kandinsky models vary due to their architecture differences and training process; you can generally expect SDXL to produce higher quality images than Stable Diffusion v1.5. Let's take a quick look at how to use each of these models and compare their results.
### Stable Diffusion v1.5
Stable Diffusion v1.5 is a latent diffusion model initialized from an earlier checkpoint, and further finetuned for 595K steps on 512x512 images. To use this pipeline for image-to-image, you'll need to prepare an initial image to pass to the pipeline. Then you can pass a prompt and the image to the pipeline to generate a new image:
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
init_image = load_image(url)
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# pass prompt and image to pipeline
image = pipeline(prompt, image=init_image).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
<div class="flex gap-4">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdv1.5.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
### Stable Diffusion XL (SDXL)
SDXL is a more powerful version of the Stable Diffusion model. It uses a larger base model, and an additional refiner model to increase the quality of the base model's output. Read the [SDXL](sdxl) guide for a more detailed walkthrough of how to use this model, and other techniques it uses to produce high quality images.
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl-init.png"
init_image = load_image(url)
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# pass prompt and image to pipeline
image = pipeline(prompt, image=init_image, strength=0.5).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
<div class="flex gap-4">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl-init.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
### Kandinsky 2.2
The Kandinsky model is different from the Stable Diffusion models because it uses an image prior model to create image embeddings. The embeddings help create a better alignment between text and images, allowing the latent diffusion model to generate better images.
The simplest way to use Kandinsky 2.2 is:
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
init_image = load_image(url)
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# pass prompt and image to pipeline
image = pipeline(prompt, image=init_image).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
<div class="flex gap-4">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-kandinsky.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
## Configure pipeline parameters
There are several important parameters you can configure in the pipeline that'll affect the image generation process and image quality. Let's take a closer look at what these parameters do and how changing them affects the output.
### Strength
`strength` is one of the most important parameters to consider and it'll have a huge impact on your generated image. It determines how much the generated image resembles the initial image. In other words:
- 📈 a higher `strength` value gives the model more "creativity" to generate an image that's different from the initial image; a `strength` value of 1.0 means the initial image is more or less ignored
- 📉 a lower `strength` value means the generated image is more similar to the initial image
The `strength` and `num_inference_steps` parameters are related because `strength` determines the number of noise steps to add. For example, if the `num_inference_steps` is 50 and `strength` is 0.8, then this means adding 40 (50 * 0.8) steps of noise to the initial image and then denoising for 40 steps to get the newly generated image.
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
init_image = load_image(url)
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# pass prompt and image to pipeline
image = pipeline(prompt, image=init_image, strength=0.8).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-strength-0.4.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">strength = 0.4</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-strength-0.6.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">strength = 0.6</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-strength-1.0.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">strength = 1.0</figcaption>
</div>
</div>
### Guidance scale
The `guidance_scale` parameter is used to control how closely aligned the generated image and text prompt are. A higher `guidance_scale` value means your generated image is more aligned with the prompt, while a lower `guidance_scale` value means your generated image has more space to deviate from the prompt.
You can combine `guidance_scale` with `strength` for even more precise control over how expressive the model is. For example, combine a high `strength + guidance_scale` for maximum creativity or use a combination of low `strength` and low `guidance_scale` to generate an image that resembles the initial image but is not as strictly bound to the prompt.
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
init_image = load_image(url)
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# pass prompt and image to pipeline
image = pipeline(prompt, image=init_image, guidance_scale=8.0).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-guidance-0.1.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 0.1</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-guidance-3.0.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 5.0</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-guidance-7.5.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 10.0</figcaption>
</div>
</div>
### Negative prompt
A negative prompt conditions the model to *not* include things in an image, and it can be used to improve image quality or modify an image. For example, you can improve image quality by including negative prompts like "poor details" or "blurry" to encourage the model to generate a higher quality image. Or you can modify an image by specifying things to exclude from an image.
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
init_image = load_image(url)
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy"
# pass prompt and image to pipeline
image = pipeline(prompt, negative_prompt=negative_prompt, image=init_image).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-negative-1.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy"</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-negative-2.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">negative_prompt = "jungle"</figcaption>
</div>
</div>
## Chained image-to-image pipelines
There are some other interesting ways you can use an image-to-image pipeline aside from just generating an image (although that is pretty cool too). You can take it a step further and chain it with other pipelines.
### Text-to-image-to-image
Chaining a text-to-image and image-to-image pipeline allows you to generate an image from text and use the generated image as the initial image for the image-to-image pipeline. This is useful if you want to generate an image entirely from scratch. For example, let's chain a Stable Diffusion and a Kandinsky model.
Start by generating an image with the text-to-image pipeline:
```py
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
import torch
from diffusers.utils import make_image_grid
pipeline = AutoPipelineForText2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
text2image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k").images[0]
text2image
```
Now you can pass this generated image to the image-to-image pipeline:
```py
pipeline = AutoPipelineForImage2Image.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
image2image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=text2image).images[0]
make_image_grid([text2image, image2image], rows=1, cols=2)
```
### Image-to-image-to-image
You can also chain multiple image-to-image pipelines together to create more interesting images. This can be useful for iteratively performing style transfer on an image, generating short GIFs, restoring color to an image, or restoring missing areas of an image.
Start by generating an image:
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
init_image = load_image(url)
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# pass prompt and image to pipeline
image = pipeline(prompt, image=init_image, output_type="latent").images[0]
```
<Tip>
It is important to specify `output_type="latent"` in the pipeline to keep all the outputs in latent space to avoid an unnecessary decode-encode step. This only works if the chained pipelines are using the same VAE.
</Tip>
Pass the latent output from this pipeline to the next pipeline to generate an image in a [comic book art style](https://huggingface.co/ogkalu/Comic-Diffusion):
```py
pipeline = AutoPipelineForImage2Image.from_pretrained(
"ogkalu/Comic-Diffusion", torch_dtype=torch.float16
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# need to include the token "charliebo artstyle" in the prompt to use this checkpoint
image = pipeline("Astronaut in a jungle, charliebo artstyle", image=image, output_type="latent").images[0]
```
Repeat one more time to generate the final image in a [pixel art style](https://huggingface.co/kohbanye/pixel-art-style):
```py
pipeline = AutoPipelineForImage2Image.from_pretrained(
"kohbanye/pixel-art-style", torch_dtype=torch.float16
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# need to include the token "pixelartstyle" in the prompt to use this checkpoint
image = pipeline("Astronaut in a jungle, pixelartstyle", image=image).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
### Image-to-upscaler-to-super-resolution
Another way you can chain your image-to-image pipeline is with an upscaler and super-resolution pipeline to really increase the level of details in an image.
Start with an image-to-image pipeline:
```py
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
init_image = load_image(url)
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# pass prompt and image to pipeline
image_1 = pipeline(prompt, image=init_image, output_type="latent").images[0]
```
<Tip>
It is important to specify `output_type="latent"` in the pipeline to keep all the outputs in *latent* space to avoid an unnecessary decode-encode step. This only works if the chained pipelines are using the same VAE.
</Tip>
Chain it to an upscaler pipeline to increase the image resolution:
```py
from diffusers import StableDiffusionLatentUpscalePipeline
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
upscaler.enable_model_cpu_offload()
upscaler.enable_xformers_memory_efficient_attention()
image_2 = upscaler(prompt, image=image_1, output_type="latent").images[0]
```
Finally, chain it to a super-resolution pipeline to further enhance the resolution:
```py
from diffusers import StableDiffusionUpscalePipeline
super_res = StableDiffusionUpscalePipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
super_res.enable_model_cpu_offload()
super_res.enable_xformers_memory_efficient_attention()
image_3 = super_res(prompt, image=image_2).images[0]
make_image_grid([init_image, image_3.resize((512, 512))], rows=1, cols=2)
```
## Control image generation
Trying to generate an image that looks exactly the way you want can be difficult, which is why controlled generation techniques and models are so useful. While you can use the `negative_prompt` to partially control image generation, there are more robust methods like prompt weighting and ControlNets.
### Prompt weighting
Prompt weighting allows you to scale the representation of each concept in a prompt. For example, in a prompt like "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", you can choose to increase or decrease the embeddings of "astronaut" and "jungle". The [Compel](https://github.com/damian0815/compel) library provides a simple syntax for adjusting prompt weights and generating the embeddings. You can learn how to create the embeddings in the [Prompt weighting](weighted_prompts) guide.
[`AutoPipelineForImage2Image`] has a `prompt_embeds` (and `negative_prompt_embeds` if you're using a negative prompt) parameter where you can pass the embeddings which replaces the `prompt` parameter.
```py
from diffusers import AutoPipelineForImage2Image
import torch
pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
image = pipeline(prompt_embeds=prompt_embeds, # generated from Compel
negative_prompt_embeds=negative_prompt_embeds, # generated from Compel
image=init_image,
).images[0]
```
### ControlNet
ControlNets provide a more flexible and accurate way to control image generation because you can use an additional conditioning image. The conditioning image can be a canny image, depth map, image segmentation, and even scribbles! Whatever type of conditioning image you choose, the ControlNet generates an image that preserves the information in it.
For example, let's condition an image with a depth map to keep the spatial information in the image.
```py
from diffusers.utils import load_image, make_image_grid
# prepare image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"
init_image = load_image(url)
init_image = init_image.resize((958, 960)) # resize to depth image dimensions
depth_image = load_image("https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png")
make_image_grid([init_image, depth_image], rows=1, cols=2)
```
Load a ControlNet model conditioned on depth maps and the [`AutoPipelineForImage2Image`]:
```py
from diffusers import ControlNetModel, AutoPipelineForImage2Image
import torch
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
pipeline = AutoPipelineForImage2Image.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
```
Now generate a new image conditioned on the depth map, initial image, and prompt:
```py
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image_control_net = pipeline(prompt, image=init_image, control_image=depth_image).images[0]
make_image_grid([init_image, depth_image, image_control_net], rows=1, cols=3)
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">depth image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-controlnet.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">ControlNet image</figcaption>
</div>
</div>
Let's apply a new [style](https://huggingface.co/nitrosocke/elden-ring-diffusion) to the image generated from the ControlNet by chaining it with an image-to-image pipeline:
```py
pipeline = AutoPipelineForImage2Image.from_pretrained(
"nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16,
)
pipeline.enable_model_cpu_offload()
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed
pipeline.enable_xformers_memory_efficient_attention()
prompt = "elden ring style astronaut in a jungle" # include the token "elden ring style" in the prompt
negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy"
image_elden_ring = pipeline(prompt, negative_prompt=negative_prompt, image=image_control_net, strength=0.45, guidance_scale=10.5).images[0]
make_image_grid([init_image, depth_image, image_control_net, image_elden_ring], rows=2, cols=2)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-elden-ring.png">
</div>
## Optimize
Running diffusion models is computationally expensive and intensive, but with a few optimization tricks, it is entirely possible to run them on consumer and free-tier GPUs. For example, you can use a more memory-efficient form of attention such as PyTorch 2.0's [scaled-dot product attention](../optimization/torch2.0#scaled-dot-product-attention) or [xFormers](../optimization/xformers) (you can use one or the other, but there's no need to use both). You can also offload the model to the GPU while the other pipeline components wait on the CPU.
```diff
+ pipeline.enable_model_cpu_offload()
+ pipeline.enable_xformers_memory_efficient_attention()
```
With [`torch.compile`](../optimization/torch2.0#torchcompile), you can boost your inference speed even more by wrapping your UNet with it:
```py
pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True)
```
To learn more, take a look at the [Reduce memory usage](../optimization/memory) and [Torch 2.0](../optimization/torch2.0) guides.
| diffusers/docs/source/en/using-diffusers/img2img.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/img2img.md",
"repo_id": "diffusers",
"token_count": 9650
} | 89 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Stable Diffusion XL Turbo
[[open-in-colab]]
SDXL Turbo is an adversarial time-distilled [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) (SDXL) model capable
of running inference in as little as 1 step.
This guide will show you how to use SDXL-Turbo for text-to-image and image-to-image.
Before you begin, make sure you have the following libraries installed:
```py
# uncomment to install the necessary libraries in Colab
#!pip install -q diffusers transformers accelerate
```
## Load model checkpoints
Model weights may be stored in separate subfolders on the Hub or locally, in which case, you should use the [`~StableDiffusionXLPipeline.from_pretrained`] method:
```py
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
pipeline = pipeline.to("cuda")
```
You can also use the [`~StableDiffusionXLPipeline.from_single_file`] method to load a model checkpoint stored in a single file format (`.ckpt` or `.safetensors`) from the Hub or locally:
```py
from diffusers import StableDiffusionXLPipeline
import torch
pipeline = StableDiffusionXLPipeline.from_single_file(
"https://huggingface.co/stabilityai/sdxl-turbo/blob/main/sd_xl_turbo_1.0_fp16.safetensors", torch_dtype=torch.float16)
pipeline = pipeline.to("cuda")
```
## Text-to-image
For text-to-image, pass a text prompt. By default, SDXL Turbo generates a 512x512 image, and that resolution gives the best results. You can try setting the `height` and `width` parameters to 768x768 or 1024x1024, but you should expect quality degradations when doing so.
Make sure to set `guidance_scale` to 0.0 to disable, as the model was trained without it. A single inference step is enough to generate high quality images.
Increasing the number of steps to 2, 3 or 4 should improve image quality.
```py
from diffusers import AutoPipelineForText2Image
import torch
pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
pipeline_text2image = pipeline_text2image.to("cuda")
prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe."
image = pipeline_text2image(prompt=prompt, guidance_scale=0.0, num_inference_steps=1).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/sdxl-turbo-text2img.png" alt="generated image of a racoon in a robe"/>
</div>
## Image-to-image
For image-to-image generation, make sure that `num_inference_steps * strength` is larger or equal to 1.
The image-to-image pipeline will run for `int(num_inference_steps * strength)` steps, e.g. `0.5 * 2.0 = 1` step in
our example below.
```py
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import load_image, make_image_grid
# use from_pipe to avoid consuming additional memory when loading a checkpoint
pipeline = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda")
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
init_image = init_image.resize((512, 512))
prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k"
image = pipeline(prompt, image=init_image, strength=0.5, guidance_scale=0.0, num_inference_steps=2).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/sdxl-turbo-img2img.png" alt="Image-to-image generation sample using SDXL Turbo"/>
</div>
## Speed-up SDXL Turbo even more
- Compile the UNet if you are using PyTorch version 2 or better. The first inference run will be very slow, but subsequent ones will be much faster.
```py
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
```
- When using the default VAE, keep it in `float32` to avoid costly `dtype` conversions before and after each generation. You only need to do this one before your first generation:
```py
pipe.upcast_vae()
```
As an alternative, you can also use a [16-bit VAE](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix) created by community member [`@madebyollin`](https://huggingface.co/madebyollin) that does not need to be upcasted to `float32`.
| diffusers/docs/source/en/using-diffusers/sdxl_turbo.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/sdxl_turbo.md",
"repo_id": "diffusers",
"token_count": 1614
} | 90 |
- sections:
- local: index
title: "🧨 Diffusers"
- local: quicktour
title: "훑어보기"
- local: stable_diffusion
title: Stable Diffusion
- local: installation
title: "설치"
title: "시작하기"
- sections:
- local: tutorials/tutorial_overview
title: 개요
- local: using-diffusers/write_own_pipeline
title: 모델과 스케줄러 이해하기
- local: in_translation
title: AutoPipeline
- local: tutorials/basic_training
title: Diffusion 모델 학습하기
title: Tutorials
- sections:
- sections:
- local: using-diffusers/loading_overview
title: 개요
- local: using-diffusers/loading
title: 파이프라인, 모델, 스케줄러 불러오기
- local: using-diffusers/schedulers
title: 다른 스케줄러들을 가져오고 비교하기
- local: using-diffusers/custom_pipeline_overview
title: 커뮤니티 파이프라인 불러오기
- local: using-diffusers/using_safetensors
title: 세이프텐서 불러오기
- local: using-diffusers/other-formats
title: 다른 형식의 Stable Diffusion 불러오기
- local: in_translation
title: Hub에 파일 push하기
title: 불러오기 & 허브
- sections:
- local: using-diffusers/pipeline_overview
title: 개요
- local: using-diffusers/unconditional_image_generation
title: Unconditional 이미지 생성
- local: using-diffusers/conditional_image_generation
title: Text-to-image 생성
- local: using-diffusers/img2img
title: Text-guided image-to-image
- local: using-diffusers/inpaint
title: Text-guided 이미지 인페인팅
- local: using-diffusers/depth2img
title: Text-guided depth-to-image
- local: using-diffusers/textual_inversion_inference
title: Textual inversion
- local: training/distributed_inference
title: 여러 GPU를 사용한 분산 추론
- local: in_translation
title: Distilled Stable Diffusion 추론
- local: using-diffusers/reusing_seeds
title: Deterministic 생성으로 이미지 퀄리티 높이기
- local: using-diffusers/control_brightness
title: 이미지 밝기 조정하기
- local: using-diffusers/reproducibility
title: 재현 가능한 파이프라인 생성하기
- local: using-diffusers/custom_pipeline_examples
title: 커뮤니티 파이프라인들
- local: using-diffusers/contribute_pipeline
title: 커뮤티니 파이프라인에 기여하는 방법
- local: using-diffusers/stable_diffusion_jax_how_to
title: JAX/Flax에서의 Stable Diffusion
- local: using-diffusers/weighted_prompts
title: Weighting Prompts
title: 추론을 위한 파이프라인
- sections:
- local: training/overview
title: 개요
- local: training/create_dataset
title: 학습을 위한 데이터셋 생성하기
- local: training/adapt_a_model
title: 새로운 태스크에 모델 적용하기
- local: training/unconditional_training
title: Unconditional 이미지 생성
- local: training/text_inversion
title: Textual Inversion
- local: training/dreambooth
title: DreamBooth
- local: training/text2image
title: Text-to-image
- local: training/lora
title: Low-Rank Adaptation of Large Language Models (LoRA)
- local: training/controlnet
title: ControlNet
- local: training/instructpix2pix
title: InstructPix2Pix 학습
- local: training/custom_diffusion
title: Custom Diffusion
title: Training
title: Diffusers 사용하기
- sections:
- local: optimization/opt_overview
title: 개요
- local: optimization/fp16
title: 메모리와 속도
- local: optimization/torch2.0
title: Torch2.0 지원
- local: optimization/xformers
title: xFormers
- local: optimization/onnx
title: ONNX
- local: optimization/open_vino
title: OpenVINO
- local: optimization/coreml
title: Core ML
- local: optimization/mps
title: MPS
- local: optimization/habana
title: Habana Gaudi
- local: optimization/tome
title: Token Merging
title: 최적화/특수 하드웨어
- sections:
- local: using-diffusers/controlling_generation
title: 제어된 생성
- local: in_translation
title: Diffusion Models 평가하기
title: 개념 가이드
- sections:
- sections:
- sections:
- local: api/pipelines/stable_diffusion/stable_diffusion_xl
title: Stable Diffusion XL
title: Stable Diffusion
title: Pipelines
title: API | diffusers/docs/source/ko/_toctree.yml/0 | {
"file_path": "diffusers/docs/source/ko/_toctree.yml",
"repo_id": "diffusers",
"token_count": 2235
} | 91 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# 효과적이고 효율적인 Diffusion
[[open-in-colab]]
특정 스타일로 이미지를 생성하거나 원하는 내용을 포함하도록[`DiffusionPipeline`]을 설정하는 것은 까다로울 수 있습니다. 종종 만족스러운 이미지를 얻기까지 [`DiffusionPipeline`]을 여러 번 실행해야 하는 경우가 많습니다. 그러나 무에서 유를 창조하는 것은 특히 추론을 반복해서 실행하는 경우 계산 집약적인 프로세스입니다.
그렇기 때문에 파이프라인에서 *계산*(속도) 및 *메모리*(GPU RAM) 효율성을 극대화하여 추론 주기 사이의 시간을 단축하여 더 빠르게 반복할 수 있도록 하는 것이 중요합니다.
이 튜토리얼에서는 [`DiffusionPipeline`]을 사용하여 더 빠르고 효과적으로 생성하는 방법을 안내합니다.
[`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) 모델을 불러와서 시작합니다:
```python
from diffusers import DiffusionPipeline
model_id = "runwayml/stable-diffusion-v1-5"
pipeline = DiffusionPipeline.from_pretrained(model_id)
```
예제 프롬프트는 "portrait of an old warrior chief" 이지만, 자유롭게 자신만의 프롬프트를 사용해도 됩니다:
```python
prompt = "portrait photo of a old warrior chief"
```
## 속도
<Tip>
💡 GPU에 액세스할 수 없는 경우 다음과 같은 GPU 제공업체에서 무료로 사용할 수 있습니다!. [Colab](https://colab.research.google.com/)
</Tip>
추론 속도를 높이는 가장 간단한 방법 중 하나는 Pytorch 모듈을 사용할 때와 같은 방식으로 GPU에 파이프라인을 배치하는 것입니다:
```python
pipeline = pipeline.to("cuda")
```
동일한 이미지를 사용하고 개선할 수 있는지 확인하려면 [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html)를 사용하고 [재현성](./using-diffusers/reproducibility)에 대한 시드를 설정하세요:
```python
import torch
generator = torch.Generator("cuda").manual_seed(0)
```
이제 이미지를 생성할 수 있습니다:
```python
image = pipeline(prompt, generator=generator).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png">
</div>
이 프로세스는 T4 GPU에서 약 30초가 소요되었습니다(할당된 GPU가 T4보다 나은 경우 더 빠를 수 있음). 기본적으로 [`DiffusionPipeline`]은 50개의 추론 단계에 대해 전체 `float32` 정밀도로 추론을 실행합니다. `float16`과 같은 더 낮은 정밀도로 전환하거나 추론 단계를 더 적게 실행하여 속도를 높일 수 있습니다.
`float16`으로 모델을 로드하고 이미지를 생성해 보겠습니다:
```python
import torch
pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipeline = pipeline.to("cuda")
generator = torch.Generator("cuda").manual_seed(0)
image = pipeline(prompt, generator=generator).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png">
</div>
이번에는 이미지를 생성하는 데 약 11초밖에 걸리지 않아 이전보다 3배 가까이 빨라졌습니다!
<Tip>
💡 파이프라인은 항상 `float16`에서 실행할 것을 강력히 권장하며, 지금까지 출력 품질이 저하되는 경우는 거의 없었습니다.
</Tip>
또 다른 옵션은 추론 단계의 수를 줄이는 것입니다. 보다 효율적인 스케줄러를 선택하면 출력 품질 저하 없이 단계 수를 줄이는 데 도움이 될 수 있습니다. 현재 모델과 호환되는 스케줄러는 `compatibles` 메서드를 호출하여 [`DiffusionPipeline`]에서 찾을 수 있습니다:
```python
pipeline.scheduler.compatibles
[
diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler,
diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler,
diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler,
diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler,
diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler,
diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler,
diffusers.schedulers.scheduling_ddpm.DDPMScheduler,
diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler,
diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler,
diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler,
diffusers.schedulers.scheduling_pndm.PNDMScheduler,
diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler,
diffusers.schedulers.scheduling_ddim.DDIMScheduler,
]
```
Stable Diffusion 모델은 일반적으로 약 50개의 추론 단계가 필요한 [`PNDMScheduler`]를 기본으로 사용하지만, [`DPMSolverMultistepScheduler`]와 같이 성능이 더 뛰어난 스케줄러는 약 20개 또는 25개의 추론 단계만 필요로 합니다. 새 스케줄러를 로드하려면 [`ConfigMixin.from_config`] 메서드를 사용합니다:
```python
from diffusers import DPMSolverMultistepScheduler
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
```
`num_inference_steps`를 20으로 설정합니다:
```python
generator = torch.Generator("cuda").manual_seed(0)
image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png">
</div>
추론시간을 4초로 단축할 수 있었습니다! ⚡️
## 메모리
파이프라인 성능 향상의 또 다른 핵심은 메모리 사용량을 줄이는 것인데, 초당 생성되는 이미지 수를 최대화하려고 하는 경우가 많기 때문에 간접적으로 더 빠른 속도를 의미합니다. 한 번에 생성할 수 있는 이미지 수를 확인하는 가장 쉬운 방법은 `OutOfMemoryError`(OOM)이 발생할 때까지 다양한 배치 크기를 시도해 보는 것입니다.
프롬프트 목록과 `Generators`에서 이미지 배치를 생성하는 함수를 만듭니다. 좋은 결과를 생성하는 경우 재사용할 수 있도록 각 `Generator`에 시드를 할당해야 합니다.
```python
def get_inputs(batch_size=1):
generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)]
prompts = batch_size * [prompt]
num_inference_steps = 20
return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps}
```
또한 각 이미지 배치를 보여주는 기능이 필요합니다:
```python
from PIL import Image
def image_grid(imgs, rows=2, cols=2):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
```
`batch_size=4`부터 시작해 얼마나 많은 메모리를 소비했는지 확인합니다:
```python
images = pipeline(**get_inputs(batch_size=4)).images
image_grid(images)
```
RAM이 더 많은 GPU가 아니라면 위의 코드에서 `OOM` 오류가 반환되었을 것입니다! 대부분의 메모리는 cross-attention 레이어가 차지합니다. 이 작업을 배치로 실행하는 대신 순차적으로 실행하면 상당한 양의 메모리를 절약할 수 있습니다. 파이프라인을 구성하여 [`~DiffusionPipeline.enable_attention_slicing`] 함수를 사용하기만 하면 됩니다:
```python
pipeline.enable_attention_slicing()
```
이제 `batch_size`를 8로 늘려보세요!
```python
images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png">
</div>
이전에는 4개의 이미지를 배치로 생성할 수도 없었지만, 이제는 이미지당 약 3.5초 만에 8개의 이미지를 배치로 생성할 수 있습니다! 이는 아마도 품질 저하 없이 T4 GPU에서 가장 빠른 속도일 것입니다.
## 품질
지난 두 섹션에서는 `fp16`을 사용하여 파이프라인의 속도를 최적화하고, 더 성능이 좋은 스케줄러를 사용하여 추론 단계의 수를 줄이고, attention slicing을 활성화하여 메모리 소비를 줄이는 방법을 배웠습니다. 이제 생성된 이미지의 품질을 개선하는 방법에 대해 집중적으로 알아보겠습니다.
### 더 나은 체크포인트
가장 확실한 단계는 더 나은 체크포인트를 사용하는 것입니다. Stable Diffusion 모델은 좋은 출발점이며, 공식 출시 이후 몇 가지 개선된 버전도 출시되었습니다. 하지만 최신 버전을 사용한다고 해서 자동으로 더 나은 결과를 얻을 수 있는 것은 아닙니다. 여전히 다양한 체크포인트를 직접 실험해보고, [negative prompts](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/) 사용 등 약간의 조사를 통해 최상의 결과를 얻어야 합니다.
이 분야가 성장함에 따라 특정 스타일을 연출할 수 있도록 세밀하게 조정된 고품질 체크포인트가 점점 더 많아지고 있습니다. [Hub](https://huggingface.co/models?library=diffusers&sort=downloads)와 [Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery)를 둘러보고 관심 있는 것을 찾아보세요!
### 더 나은 파이프라인 구성 요소
현재 파이프라인 구성 요소를 최신 버전으로 교체해 볼 수도 있습니다. Stability AI의 최신 [autodecoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae)를 파이프라인에 로드하고 몇 가지 이미지를 생성해 보겠습니다:
```python
from diffusers import AutoencoderKL
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda")
pipeline.vae = vae
images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png">
</div>
### 더 나은 프롬프트 엔지니어링
이미지를 생성하는 데 사용하는 텍스트 프롬프트는 *prompt engineering*이라고 할 정도로 매우 중요합니다. 프롬프트 엔지니어링 시 고려해야 할 몇 가지 사항은 다음과 같습니다:
- 생성하려는 이미지 또는 유사한 이미지가 인터넷에 어떻게 저장되어 있는가?
- 내가 원하는 스타일로 모델을 유도하기 위해 어떤 추가 세부 정보를 제공할 수 있는가?
이를 염두에 두고 색상과 더 높은 품질의 디테일을 포함하도록 프롬프트를 개선해 봅시다:
```python
prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes"
prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta"
```
새로운 프롬프트로 이미지 배치를 생성합니다:
```python
images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png">
</div>
꽤 인상적입니다! `1`의 시드를 가진 `Generator`에 해당하는 두 번째 이미지에 피사체의 나이에 대한 텍스트를 추가하여 조금 더 조정해 보겠습니다:
```python
prompts = [
"portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
]
generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))]
images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images
image_grid(images)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png">
</div>
## 다음 단계
이 튜토리얼에서는 계산 및 메모리 효율을 높이고 생성된 출력의 품질을 개선하기 위해 [`DiffusionPipeline`]을 최적화하는 방법을 배웠습니다. 파이프라인을 더 빠르게 만드는 데 관심이 있다면 다음 리소스를 살펴보세요:
- [PyTorch 2.0](./optimization/torch2.0) 및 [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html)이 어떻게 추론 속도를 5~300% 향상시킬 수 있는지 알아보세요. A100 GPU에서는 추론 속도가 최대 50%까지 빨라질 수 있습니다!
- PyTorch 2를 사용할 수 없는 경우, [xFormers](./optimization/xformers)를 설치하는 것이 좋습니다. 메모리 효율적인 어텐션 메커니즘은 PyTorch 1.13.1과 함께 사용하면 속도가 빨라지고 메모리 소비가 줄어듭니다.
- 모델 오프로딩과 같은 다른 최적화 기법은 [이 가이드](./optimization/fp16)에서 다루고 있습니다. | diffusers/docs/source/ko/stable_diffusion.md/0 | {
"file_path": "diffusers/docs/source/ko/stable_diffusion.md",
"repo_id": "diffusers",
"token_count": 8950
} | 92 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# 커뮤니티 파이프라인에 기여하는 방법
<Tip>
💡 모든 사람이 속도 저하 없이 쉽게 작업을 공유할 수 있도록 커뮤니티 파이프라인을 추가하는 이유에 대한 자세한 내용은 GitHub 이슈 [#841](https://github.com/huggingface/diffusers/issues/841)를 참조하세요.
</Tip>
커뮤니티 파이프라인을 사용하면 [`DiffusionPipeline`] 위에 원하는 추가 기능을 추가할 수 있습니다. `DiffusionPipeline` 위에 구축할 때의 가장 큰 장점은 누구나 인수를 하나만 추가하면 파이프라인을 로드하고 사용할 수 있어 커뮤니티가 매우 쉽게 접근할 수 있다는 것입니다.
이번 가이드에서는 커뮤니티 파이프라인을 생성하는 방법과 작동 원리를 설명합니다.
간단하게 설명하기 위해 `UNet`이 단일 forward pass를 수행하고 스케줄러를 한 번 호출하는 "one-step" 파이프라인을 만들겠습니다.
## 파이프라인 초기화
커뮤니티 파이프라인을 위한 `one_step_unet.py` 파일을 생성하는 것으로 시작합니다. 이 파일에서, Hub에서 모델 가중치와 스케줄러 구성을 로드할 수 있도록 [`DiffusionPipeline`]을 상속하는 파이프라인 클래스를 생성합니다. one-step 파이프라인에는 `UNet`과 스케줄러가 필요하므로 이를 `__init__` 함수에 인수로 추가해야합니다:
```python
from diffusers import DiffusionPipeline
import torch
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
def __init__(self, unet, scheduler):
super().__init__()
```
파이프라인과 그 구성요소(`unet` and `scheduler`)를 [`~DiffusionPipeline.save_pretrained`]으로 저장할 수 있도록 하려면 `register_modules` 함수에 추가하세요:
```diff
from diffusers import DiffusionPipeline
import torch
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
def __init__(self, unet, scheduler):
super().__init__()
+ self.register_modules(unet=unet, scheduler=scheduler)
```
이제 '초기화' 단계가 완료되었으니 forward pass로 이동할 수 있습니다! 🔥
## Forward pass 정의
Forward pass 에서는(`__call__`로 정의하는 것이 좋습니다) 원하는 기능을 추가할 수 있는 완전한 창작 자유가 있습니다. 우리의 놀라운 one-step 파이프라인의 경우, 임의의 이미지를 생성하고 `timestep=1`을 설정하여 `unet`과 `scheduler`를 한 번만 호출합니다:
```diff
from diffusers import DiffusionPipeline
import torch
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
+ def __call__(self):
+ image = torch.randn(
+ (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
+ )
+ timestep = 1
+ model_output = self.unet(image, timestep).sample
+ scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
+ return scheduler_output
```
끝났습니다! 🚀 이제 이 파이프라인에 `unet`과 `scheduler`를 전달하여 실행할 수 있습니다:
```python
from diffusers import DDPMScheduler, UNet2DModel
scheduler = DDPMScheduler()
unet = UNet2DModel()
pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler)
output = pipeline()
```
하지만 파이프라인 구조가 동일한 경우 기존 가중치를 파이프라인에 로드할 수 있다는 장점이 있습니다. 예를 들어 one-step 파이프라인에 [`google/ddpm-cifar10-32`](https://huggingface.co/google/ddpm-cifar10-32) 가중치를 로드할 수 있습니다:
```python
pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32")
output = pipeline()
```
## 파이프라인 공유
🧨Diffusers [리포지토리](https://github.com/huggingface/diffusers)에서 Pull Request를 열어 [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) 하위 폴더에 `one_step_unet.py`의 멋진 파이프라인을 추가하세요.
병합이 되면, `diffusers >= 0.4.0`이 설치된 사용자라면 누구나 `custom_pipeline` 인수에 지정하여 이 파이프라인을 마술처럼 🪄 사용할 수 있습니다:
```python
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="one_step_unet")
pipe()
```
커뮤니티 파이프라인을 공유하는 또 다른 방법은 Hub 에서 선호하는 [모델 리포지토리](https://huggingface.co/docs/hub/models-uploading)에 직접 `one_step_unet.py` 파일을 업로드하는 것입니다. `one_step_unet.py` 파일을 지정하는 대신 모델 저장소 id를 `custom_pipeline` 인수에 전달하세요:
```python
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="stevhliu/one_step_unet")
```
다음 표에서 두 가지 공유 워크플로우를 비교하여 자신에게 가장 적합한 옵션을 결정하는 데 도움이 되는 정보를 확인하세요:
| | GitHub 커뮤니티 파이프라인 | HF Hub 커뮤니티 파이프라인 |
|----------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
| 사용법 | 동일 | 동일 |
| 리뷰 과정 | 병합하기 전에 GitHub에서 Pull Request를 열고 Diffusers 팀의 검토 과정을 거칩니다. 속도가 느릴 수 있습니다. | 검토 없이 Hub 저장소에 바로 업로드합니다. 가장 빠른 워크플로우 입니다. |
| 가시성 | 공식 Diffusers 저장소 및 문서에 포함되어 있습니다. | HF 허브 프로필에 포함되며 가시성을 확보하기 위해 자신의 사용량/프로모션에 의존합니다. |
<Tip>
💡 커뮤니티 파이프라인 파일에 원하는 패키지를 사용할 수 있습니다. 사용자가 패키지를 설치하기만 하면 모든 것이 정상적으로 작동합니다. 파이프라인이 자동으로 감지되므로 `DiffusionPipeline`에서 상속하는 파이프라인 클래스가 하나만 있는지 확인하세요.
</Tip>
## 커뮤니티 파이프라인은 어떻게 작동하나요?
커뮤니티 파이프라인은 [`DiffusionPipeline`]을 상속하는 클래스입니다:
- [`custom_pipeline`] 인수로 로드할 수 있습니다.
- 모델 가중치 및 스케줄러 구성은 [`pretrained_model_name_or_path`]에서 로드됩니다.
- 커뮤니티 파이프라인에서 기능을 구현하는 코드는 `pipeline.py` 파일에 정의되어 있습니다.
공식 저장소에서 모든 파이프라인 구성 요소 가중치를 로드할 수 없는 경우가 있습니다. 이 경우 다른 구성 요소는 파이프라인에 직접 전달해야 합니다:
```python
from diffusers import DiffusionPipeline
from transformers import CLIPFeatureExtractor, CLIPModel
model_id = "CompVis/stable-diffusion-v1-4"
clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id)
clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16)
pipeline = DiffusionPipeline.from_pretrained(
model_id,
custom_pipeline="clip_guided_stable_diffusion",
clip_model=clip_model,
feature_extractor=feature_extractor,
scheduler=scheduler,
torch_dtype=torch.float16,
)
```
커뮤니티 파이프라인의 마법은 다음 코드에 담겨 있습니다. 이 코드를 통해 커뮤니티 파이프라인을 GitHub 또는 Hub에서 로드할 수 있으며, 모든 🧨 Diffusers 패키지에서 사용할 수 있습니다.
```python
# 2. 파이프라인 클래스를 로드합니다. 사용자 지정 모듈을 사용하는 경우 Hub에서 로드합니다
# 명시적 클래스에서 로드하는 경우, 이를 사용해 보겠습니다.
if custom_pipeline is not None:
pipeline_class = get_class_from_dynamic_module(
custom_pipeline, module_file=CUSTOM_PIPELINE_FILE_NAME, cache_dir=custom_pipeline
)
elif cls != DiffusionPipeline:
pipeline_class = cls
else:
diffusers_module = importlib.import_module(cls.__module__.split(".")[0])
pipeline_class = getattr(diffusers_module, config_dict["_class_name"])
```
| diffusers/docs/source/ko/using-diffusers/contribute_pipeline.md/0 | {
"file_path": "diffusers/docs/source/ko/using-diffusers/contribute_pipeline.md",
"repo_id": "diffusers",
"token_count": 5977
} | 93 |
# Textual inversion
[[open-in-colab]]
[`StableDiffusionPipeline`]은 textual-inversion을 지원하는데, 이는 몇 개의 샘플 이미지만으로 stable diffusion과 같은 모델이 새로운 컨셉을 학습할 수 있도록 하는 기법입니다. 이를 통해 생성된 이미지를 더 잘 제어하고 특정 컨셉에 맞게 모델을 조정할 수 있습니다. 커뮤니티에서 만들어진 컨셉들의 컬렉션은 [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer)를 통해 빠르게 사용해볼 수 있습니다.
이 가이드에서는 Stable Diffusion Conceptualizer에서 사전학습한 컨셉을 사용하여 textual-inversion으로 추론을 실행하는 방법을 보여드립니다. textual-inversion으로 모델에 새로운 컨셉을 학습시키는 데 관심이 있으시다면, [Textual Inversion](./training/text_inversion) 훈련 가이드를 참조하세요.
Hugging Face 계정으로 로그인하세요:
```py
from huggingface_hub import notebook_login
notebook_login()
```
필요한 라이브러리를 불러오고 생성된 이미지를 시각화하기 위한 도우미 함수 `image_grid`를 만듭니다:
```py
import os
import torch
import PIL
from PIL import Image
from diffusers import StableDiffusionPipeline
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
def image_grid(imgs, rows, cols):
assert len(imgs) == rows * cols
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
```
Stable Diffusion과 [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer)에서 사전학습된 컨셉을 선택합니다:
```py
pretrained_model_name_or_path = "runwayml/stable-diffusion-v1-5"
repo_id_embeds = "sd-concepts-library/cat-toy"
```
이제 파이프라인을 로드하고 사전학습된 컨셉을 파이프라인에 전달할 수 있습니다:
```py
pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16).to("cuda")
pipeline.load_textual_inversion(repo_id_embeds)
```
특별한 placeholder token '`<cat-toy>`'를 사용하여 사전학습된 컨셉으로 프롬프트를 만들고, 생성할 샘플의 수와 이미지 행의 수를 선택합니다:
```py
prompt = "a grafitti in a favela wall with a <cat-toy> on it"
num_samples = 2
num_rows = 2
```
그런 다음 파이프라인을 실행하고, 생성된 이미지들을 저장합니다. 그리고 처음에 만들었던 도우미 함수 `image_grid`를 사용하여 생성 결과들을 시각화합니다. 이 때 `num_inference_steps`와 `guidance_scale`과 같은 매개 변수들을 조정하여, 이것들이 이미지 품질에 어떠한 영향을 미치는지를 자유롭게 확인해보시기 바랍니다.
```py
all_images = []
for _ in range(num_rows):
images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, guidance_scale=7.5).images
all_images.extend(images)
grid = image_grid(all_images, num_samples, num_rows)
grid
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png">
</div>
| diffusers/docs/source/ko/using-diffusers/textual_inversion_inference.md/0 | {
"file_path": "diffusers/docs/source/ko/using-diffusers/textual_inversion_inference.md",
"repo_id": "diffusers",
"token_count": 2018
} | 94 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
MBart50TokenizerFast,
MBartForConditionalGeneration,
pipeline,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def detect_language(pipe, prompt, batch_size):
"""helper function to detect language(s) of prompt"""
if batch_size == 1:
preds = pipe(prompt, top_k=1, truncation=True, max_length=128)
return preds[0]["label"]
else:
detected_languages = []
for p in prompt:
preds = pipe(p, top_k=1, truncation=True, max_length=128)
detected_languages.append(preds[0]["label"])
return detected_languages
def translate_prompt(prompt, translation_tokenizer, translation_model, device):
"""helper function to translate prompt to English"""
encoded_prompt = translation_tokenizer(prompt, return_tensors="pt").to(device)
generated_tokens = translation_model.generate(**encoded_prompt, max_new_tokens=1000)
en_trans = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
return en_trans[0]
class MultilingualStableDiffusion(DiffusionPipeline):
r"""
Pipeline for text-to-image generation using Stable Diffusion in different languages.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
detection_pipeline ([`pipeline`]):
Transformers pipeline to detect prompt's language.
translation_model ([`MBartForConditionalGeneration`]):
Model to translate prompt to English, if necessary. Please refer to the
[model card](https://huggingface.co/docs/transformers/model_doc/mbart) for details.
translation_tokenizer ([`MBart50TokenizerFast`]):
Tokenizer of the translation model.
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def __init__(
self,
detection_pipeline: pipeline,
translation_model: MBartForConditionalGeneration,
translation_tokenizer: MBart50TokenizerFast,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
self.register_modules(
detection_pipeline=detection_pipeline,
translation_model=translation_model,
translation_tokenizer=translation_tokenizer,
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
`attention_head_dim` must be a multiple of `slice_size`.
"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(slice_size)
def disable_attention_slicing(self):
r"""
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
back to computing attention in one step.
"""
# set slice_size = `None` to disable `attention slicing`
self.enable_attention_slicing(None)
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
height: int = 512,
width: int = 512,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[torch.Generator] = None,
latents: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation. Can be in different languages.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
# detect language and translate if necessary
prompt_language = detect_language(self.detection_pipeline, prompt, batch_size)
if batch_size == 1 and prompt_language != "en":
prompt = translate_prompt(prompt, self.translation_tokenizer, self.translation_model, self.device)
if isinstance(prompt, list):
for index in range(batch_size):
if prompt_language[index] != "en":
p = translate_prompt(
prompt[index], self.translation_tokenizer, self.translation_model, self.device
)
prompt[index] = p
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = text_embeddings.shape
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
# detect language and translate it if necessary
negative_prompt_language = detect_language(self.detection_pipeline, negative_prompt, batch_size)
if negative_prompt_language != "en":
negative_prompt = translate_prompt(
negative_prompt, self.translation_tokenizer, self.translation_model, self.device
)
if isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
# detect language and translate it if necessary
if isinstance(negative_prompt, list):
negative_prompt_languages = detect_language(self.detection_pipeline, negative_prompt, batch_size)
for index in range(batch_size):
if negative_prompt_languages[index] != "en":
p = translate_prompt(
negative_prompt[index], self.translation_tokenizer, self.translation_model, self.device
)
negative_prompt[index] = p
uncond_tokens = negative_prompt
max_length = text_input_ids.shape[-1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = uncond_embeddings.shape[1]
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
latents_dtype = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
self.device
)
else:
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
latents = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
timesteps_tensor = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
self.device
)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
)
else:
has_nsfw_concept = None
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| diffusers/examples/community/multilingual_stable_diffusion.py/0 | {
"file_path": "diffusers/examples/community/multilingual_stable_diffusion.py",
"repo_id": "diffusers",
"token_count": 9667
} | 95 |
import argparse
import inspect
import os
import time
import warnings
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import PIL.Image
import torch
from PIL import Image
from transformers import CLIPTokenizer
from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
deprecate,
logging,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> # !pip install opencv-python transformers accelerate
>>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
>>> from diffusers.utils import load_image
>>> import numpy as np
>>> import torch
>>> import cv2
>>> from PIL import Image
>>> # download an image
>>> image = load_image(
... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
... )
>>> np_image = np.array(image)
>>> # get canny image
>>> np_image = cv2.Canny(np_image, 100, 200)
>>> np_image = np_image[:, :, None]
>>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
>>> canny_image = Image.fromarray(np_image)
>>> # load control net and stable diffusion v1-5
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
... )
>>> # speed up diffusion process with faster scheduler and memory optimization
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
>>> pipe.enable_model_cpu_offload()
>>> # generate image
>>> generator = torch.manual_seed(0)
>>> image = pipe(
... "futuristic-looking woman",
... num_inference_steps=20,
... generator=generator,
... image=image,
... control_image=canny_image,
... ).images[0]
```
"""
def prepare_image(image):
if isinstance(image, torch.Tensor):
# Batch single image
if image.ndim == 3:
image = image.unsqueeze(0)
image = image.to(dtype=torch.float32)
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
return image
class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
vae_encoder: OnnxRuntimeModel
vae_decoder: OnnxRuntimeModel
text_encoder: OnnxRuntimeModel
tokenizer: CLIPTokenizer
unet: OnnxRuntimeModel
scheduler: KarrasDiffusionSchedulers
def __init__(
self,
vae_encoder: OnnxRuntimeModel,
vae_decoder: OnnxRuntimeModel,
text_encoder: OnnxRuntimeModel,
tokenizer: CLIPTokenizer,
unet: OnnxRuntimeModel,
scheduler: KarrasDiffusionSchedulers,
):
super().__init__()
self.register_modules(
vae_encoder=vae_encoder,
vae_decoder=vae_decoder,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
)
self.vae_scale_factor = 2 ** (4 - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
self.control_image_processor = VaeImageProcessor(
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
)
def _encode_prompt(
self,
prompt: Union[str, List[str]],
num_images_per_prompt: Optional[int],
do_classifier_free_guidance: bool,
negative_prompt: Optional[str],
prompt_embeds: Optional[np.ndarray] = None,
negative_prompt_embeds: Optional[np.ndarray] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`):
prompt to be encoded
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
prompt_embeds (`np.ndarray`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`np.ndarray`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
"""
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="np",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
if not np.array_equal(text_input_ids, untruncated_ids):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt] * batch_size
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="np",
)
negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
if do_classifier_free_guidance:
negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
def decode_latents(self, latents):
warnings.warn(
"The decode_latents method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor instead",
FutureWarning,
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
num_controlnet,
prompt,
image,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
controlnet_conditioning_scale=1.0,
control_guidance_start=0.0,
control_guidance_end=1.0,
):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
# Check `image`
if num_controlnet == 1:
self.check_image(image, prompt, prompt_embeds)
elif num_controlnet > 1:
if not isinstance(image, list):
raise TypeError("For multiple controlnets: `image` must be type `list`")
# When `image` is a nested list:
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
elif any(isinstance(i, list) for i in image):
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
elif len(image) != num_controlnet:
raise ValueError(
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
)
for image_ in image:
self.check_image(image_, prompt, prompt_embeds)
else:
assert False
# Check `controlnet_conditioning_scale`
if num_controlnet == 1:
if not isinstance(controlnet_conditioning_scale, float):
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
elif num_controlnet > 1:
if isinstance(controlnet_conditioning_scale, list):
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
elif (
isinstance(controlnet_conditioning_scale, list)
and len(controlnet_conditioning_scale) != num_controlnet
):
raise ValueError(
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
" the same length as the number of controlnets"
)
else:
assert False
if len(control_guidance_start) != len(control_guidance_end):
raise ValueError(
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
)
if num_controlnet > 1:
if len(control_guidance_start) != num_controlnet:
raise ValueError(
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
)
for start, end in zip(control_guidance_start, control_guidance_end):
if start >= end:
raise ValueError(
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
)
if start < 0.0:
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
if end > 1.0:
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
def check_image(self, image, prompt, prompt_embeds):
image_is_pil = isinstance(image, PIL.Image.Image)
image_is_tensor = isinstance(image, torch.Tensor)
image_is_np = isinstance(image, np.ndarray)
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
if (
not image_is_pil
and not image_is_tensor
and not image_is_np
and not image_is_pil_list
and not image_is_tensor_list
and not image_is_np_list
):
raise TypeError(
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
)
if image_is_pil:
image_batch_size = 1
else:
image_batch_size = len(image)
if prompt is not None and isinstance(prompt, str):
prompt_batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt_batch_size = len(prompt)
elif prompt_embeds is not None:
prompt_batch_size = prompt_embeds.shape[0]
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
raise ValueError(
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
)
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
def prepare_control_image(
self,
image,
width,
height,
batch_size,
num_images_per_prompt,
device,
dtype,
do_classifier_free_guidance=False,
guess_mode=False,
):
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
image_batch_size = image.shape[0]
if image_batch_size == 1:
repeat_by = batch_size
else:
# image batch size is the same as prompt batch size
repeat_by = num_images_per_prompt
image = image.repeat_interleave(repeat_by, dim=0)
image = image.to(device=device, dtype=dtype)
if do_classifier_free_guidance and not guess_mode:
image = torch.cat([image] * 2)
return image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
image = image.to(device=device, dtype=dtype)
batch_size = batch_size * num_images_per_prompt
if image.shape[1] == 4:
init_latents = image
else:
_image = image.cpu().detach().numpy()
init_latents = self.vae_encoder(sample=_image)[0]
init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
init_latents = 0.18215 * init_latents
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
# expand init_latents for batch_size
deprecation_message = (
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
" your script to pass as many initial images as text prompts to suppress this warning."
)
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
additional_image_per_prompt = batch_size // init_latents.shape[0]
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
raise ValueError(
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
)
else:
init_latents = torch.cat([init_latents], dim=0)
shape = init_latents.shape
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# get latents
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
latents = init_latents
return latents
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
num_controlnet: int,
fp16: bool = True,
prompt: Union[str, List[str]] = None,
image: Union[
torch.FloatTensor,
PIL.Image.Image,
np.ndarray,
List[torch.FloatTensor],
List[PIL.Image.Image],
List[np.ndarray],
] = None,
control_image: Union[
torch.FloatTensor,
PIL.Image.Image,
np.ndarray,
List[torch.FloatTensor],
List[PIL.Image.Image],
List[np.ndarray],
] = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 0.8,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
guess_mode: bool = False,
control_guidance_start: Union[float, List[float]] = 0.0,
control_guidance_end: Union[float, List[float]] = 1.0,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
The initial image will be used as the starting point for the image generation process. Can also accept
image latents as `image`, if passing latents directly, it will not be encoded again.
control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
specified in init, images must be passed as a list such that each element of the list can be correctly
batched for input to a single controlnet.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
than for [`~StableDiffusionControlNetPipeline.__call__`].
guess_mode (`bool`, *optional*, defaults to `False`):
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
The percentage of total steps at which the controlnet starts applying.
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
The percentage of total steps at which the controlnet stops applying.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
if fp16:
torch_dtype = torch.float16
np_dtype = np.float16
else:
torch_dtype = torch.float32
np_dtype = np.float32
# align format for control guidance
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = num_controlnet
control_guidance_start, control_guidance_end = (
mult * [control_guidance_start],
mult * [control_guidance_end],
)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
num_controlnet,
prompt,
control_image,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
controlnet_conditioning_scale,
control_guidance_start,
control_guidance_end,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
# 3. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 4. Prepare image
image = self.image_processor.preprocess(image).to(dtype=torch.float32)
# 5. Prepare controlnet_conditioning_image
if num_controlnet == 1:
control_image = self.prepare_control_image(
image=control_image,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=torch_dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
elif num_controlnet > 1:
control_images = []
for control_image_ in control_image:
control_image_ = self.prepare_control_image(
image=control_image_,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=torch_dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
control_images.append(control_image_)
control_image = control_images
else:
assert False
# 5. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
# 6. Prepare latent variables
latents = self.prepare_latents(
image,
latent_timestep,
batch_size,
num_images_per_prompt,
torch_dtype,
device,
generator,
)
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7.1 Create tensor stating which controlnets to keep
controlnet_keep = []
for i in range(len(timesteps)):
keeps = [
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
for s, e in zip(control_guidance_start, control_guidance_end)
]
controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
# 8. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
# predict the noise residual
_latent_model_input = latent_model_input.cpu().detach().numpy()
_prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
_t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
if num_controlnet == 1:
control_images = np.array([control_image], dtype=np_dtype)
else:
control_images = []
for _control_img in control_image:
_control_img = _control_img.cpu().detach().numpy()
control_images.append(_control_img)
control_images = np.array(control_images, dtype=np_dtype)
control_scales = np.array(cond_scale, dtype=np_dtype)
control_scales = np.resize(control_scales, (num_controlnet, 1))
noise_pred = self.unet(
sample=_latent_model_input,
timestep=_t,
encoder_hidden_states=_prompt_embeds,
controlnet_conds=control_images,
conditioning_scales=control_scales,
)[0]
noise_pred = torch.from_numpy(noise_pred).to(device)
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
_latents = latents.cpu().detach().numpy() / 0.18215
_latents = np.array(_latents, dtype=np_dtype)
image = self.vae_decoder(latent_sample=_latents)[0]
image = torch.from_numpy(image).to(device, dtype=torch.float32)
has_nsfw_concept = None
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--sd_model",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument(
"--onnx_model_dir",
type=str,
required=True,
help="Path to the ONNX directory",
)
parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
args = parser.parse_args()
qr_image = Image.open(args.qr_img_path)
qr_image = qr_image.resize((512, 512))
# init stable diffusion pipeline
pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
onnx_pipeline = OnnxStableDiffusionControlNetImg2ImgPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(
os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
),
vae_decoder=OnnxRuntimeModel.from_pretrained(
os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
),
text_encoder=OnnxRuntimeModel.from_pretrained(
os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
),
tokenizer=pipeline.tokenizer,
unet=OnnxRuntimeModel.from_pretrained(os.path.join(args.onnx_model_dir, "unet"), provider=provider),
scheduler=pipeline.scheduler,
)
onnx_pipeline = onnx_pipeline.to("cuda")
prompt = "a cute cat fly to the moon"
negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
for i in range(10):
start_time = time.time()
image = onnx_pipeline(
num_controlnet=2,
prompt=prompt,
negative_prompt=negative_prompt,
image=qr_image,
control_image=[qr_image, qr_image],
width=512,
height=512,
strength=0.75,
num_inference_steps=20,
num_images_per_prompt=1,
controlnet_conditioning_scale=[0.8, 0.8],
control_guidance_start=[0.3, 0.3],
control_guidance_end=[0.9, 0.9],
).images[0]
print(time.time() - start_time)
image.save("output_qr_code.png")
| diffusers/examples/community/run_onnx_controlnet.py/0 | {
"file_path": "diffusers/examples/community/run_onnx_controlnet.py",
"repo_id": "diffusers",
"token_count": 19745
} | 96 |
#
# Copyright 2023 The HuggingFace Inc. team.
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import os
from collections import OrderedDict
from copy import copy
from typing import List, Optional, Union
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import PIL.Image
import tensorrt as trt
import torch
from huggingface_hub import snapshot_download
from huggingface_hub.utils import validate_hf_hub_args
from onnx import shape_inference
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.onnx.loader import fold_constants
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion import (
StableDiffusionImg2ImgPipeline,
StableDiffusionPipelineOutput,
StableDiffusionSafetyChecker,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import retrieve_latents
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import logging
"""
Installation instructions
python3 -m pip install --upgrade transformers diffusers>=0.16.0
python3 -m pip install --upgrade tensorrt>=8.6.1
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
python3 -m pip install onnxruntime
"""
TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
# Map of numpy dtype -> torch dtype
numpy_to_torch_dtype_dict = {
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128,
}
if np.version.full_version >= "1.24.0":
numpy_to_torch_dtype_dict[np.bool_] = torch.bool
else:
numpy_to_torch_dtype_dict[np.bool] = torch.bool
# Map of torch dtype -> numpy dtype
torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
def device_view(t):
return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
def preprocess_image(image):
"""
image: torch.Tensor
"""
w, h = image.size
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
image = image.resize((w, h))
image = np.array(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image).contiguous()
return 2.0 * image - 1.0
class Engine:
def __init__(self, engine_path):
self.engine_path = engine_path
self.engine = None
self.context = None
self.buffers = OrderedDict()
self.tensors = OrderedDict()
def __del__(self):
[buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
del self.engine
del self.context
del self.buffers
del self.tensors
def build(
self,
onnx_path,
fp16,
input_profile=None,
enable_preview=False,
enable_all_tactics=False,
timing_cache=None,
workspace_size=0,
):
logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
p = Profile()
if input_profile:
for name, dims in input_profile.items():
assert len(dims) == 3
p.add(name, min=dims[0], opt=dims[1], max=dims[2])
config_kwargs = {}
config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
if enable_preview:
# Faster dynamic shapes made optional since it increases engine build time.
config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
if workspace_size > 0:
config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
if not enable_all_tactics:
config_kwargs["tactic_sources"] = []
engine = engine_from_network(
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
save_timing_cache=timing_cache,
)
save_engine(engine, path=self.engine_path)
def load(self):
logger.warning(f"Loading TensorRT engine: {self.engine_path}")
self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
def activate(self):
self.context = self.engine.create_execution_context()
def allocate_buffers(self, shape_dict=None, device="cuda"):
for idx in range(trt_util.get_bindings_per_profile(self.engine)):
binding = self.engine[idx]
if shape_dict and binding in shape_dict:
shape = shape_dict[binding]
else:
shape = self.engine.get_binding_shape(binding)
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
if self.engine.binding_is_input(binding):
self.context.set_binding_shape(idx, shape)
tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
self.tensors[binding] = tensor
self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
def infer(self, feed_dict, stream):
start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
# shallow copy of ordered dict
device_buffers = copy(self.buffers)
for name, buf in feed_dict.items():
assert isinstance(buf, cuda.DeviceView)
device_buffers[name] = buf
bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]
noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)
if not noerror:
raise ValueError("ERROR: inference failed.")
return self.tensors
class Optimizer:
def __init__(self, onnx_graph):
self.graph = gs.import_onnx(onnx_graph)
def cleanup(self, return_onnx=False):
self.graph.cleanup().toposort()
if return_onnx:
return gs.export_onnx(self.graph)
def select_outputs(self, keep, names=None):
self.graph.outputs = [self.graph.outputs[o] for o in keep]
if names:
for i, name in enumerate(names):
self.graph.outputs[i].name = name
def fold_constants(self, return_onnx=False):
onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
self.graph = gs.import_onnx(onnx_graph)
if return_onnx:
return onnx_graph
def infer_shapes(self, return_onnx=False):
onnx_graph = gs.export_onnx(self.graph)
if onnx_graph.ByteSize() > 2147483648:
raise TypeError("ERROR: model size exceeds supported 2GB limit")
else:
onnx_graph = shape_inference.infer_shapes(onnx_graph)
self.graph = gs.import_onnx(onnx_graph)
if return_onnx:
return onnx_graph
class BaseModel:
def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77):
self.model = model
self.name = "SD Model"
self.fp16 = fp16
self.device = device
self.min_batch = 1
self.max_batch = max_batch_size
self.min_image_shape = 256 # min image resolution: 256x256
self.max_image_shape = 1024 # max image resolution: 1024x1024
self.min_latent_shape = self.min_image_shape // 8
self.max_latent_shape = self.max_image_shape // 8
self.embedding_dim = embedding_dim
self.text_maxlen = text_maxlen
def get_model(self):
return self.model
def get_input_names(self):
pass
def get_output_names(self):
pass
def get_dynamic_axes(self):
return None
def get_sample_input(self, batch_size, image_height, image_width):
pass
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
return None
def get_shape_dict(self, batch_size, image_height, image_width):
return None
def optimize(self, onnx_graph):
opt = Optimizer(onnx_graph)
opt.cleanup()
opt.fold_constants()
opt.infer_shapes()
onnx_opt_graph = opt.cleanup(return_onnx=True)
return onnx_opt_graph
def check_dims(self, batch_size, image_height, image_width):
assert batch_size >= self.min_batch and batch_size <= self.max_batch
assert image_height % 8 == 0 or image_width % 8 == 0
latent_height = image_height // 8
latent_width = image_width // 8
assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
return (latent_height, latent_width)
def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
min_batch = batch_size if static_batch else self.min_batch
max_batch = batch_size if static_batch else self.max_batch
latent_height = image_height // 8
latent_width = image_width // 8
min_image_height = image_height if static_shape else self.min_image_shape
max_image_height = image_height if static_shape else self.max_image_shape
min_image_width = image_width if static_shape else self.min_image_shape
max_image_width = image_width if static_shape else self.max_image_shape
min_latent_height = latent_height if static_shape else self.min_latent_shape
max_latent_height = latent_height if static_shape else self.max_latent_shape
min_latent_width = latent_width if static_shape else self.min_latent_shape
max_latent_width = latent_width if static_shape else self.max_latent_shape
return (
min_batch,
max_batch,
min_image_height,
max_image_height,
min_image_width,
max_image_width,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
)
def getOnnxPath(model_name, onnx_dir, opt=True):
return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx")
def getEnginePath(model_name, engine_dir):
return os.path.join(engine_dir, model_name + ".plan")
def build_engines(
models: dict,
engine_dir,
onnx_dir,
onnx_opset,
opt_image_height,
opt_image_width,
opt_batch_size=1,
force_engine_rebuild=False,
static_batch=False,
static_shape=True,
enable_preview=False,
enable_all_tactics=False,
timing_cache=None,
max_workspace_size=0,
):
built_engines = {}
if not os.path.isdir(onnx_dir):
os.makedirs(onnx_dir)
if not os.path.isdir(engine_dir):
os.makedirs(engine_dir)
# Export models to ONNX
for model_name, model_obj in models.items():
engine_path = getEnginePath(model_name, engine_dir)
if force_engine_rebuild or not os.path.exists(engine_path):
logger.warning("Building Engines...")
logger.warning("Engine build can take a while to complete")
onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
onnx_opt_path = getOnnxPath(model_name, onnx_dir)
if force_engine_rebuild or not os.path.exists(onnx_opt_path):
if force_engine_rebuild or not os.path.exists(onnx_path):
logger.warning(f"Exporting model: {onnx_path}")
model = model_obj.get_model()
with torch.inference_mode(), torch.autocast("cuda"):
inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
torch.onnx.export(
model,
inputs,
onnx_path,
export_params=True,
opset_version=onnx_opset,
do_constant_folding=True,
input_names=model_obj.get_input_names(),
output_names=model_obj.get_output_names(),
dynamic_axes=model_obj.get_dynamic_axes(),
)
del model
torch.cuda.empty_cache()
gc.collect()
else:
logger.warning(f"Found cached model: {onnx_path}")
# Optimize onnx
if force_engine_rebuild or not os.path.exists(onnx_opt_path):
logger.warning(f"Generating optimizing model: {onnx_opt_path}")
onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))
onnx.save(onnx_opt_graph, onnx_opt_path)
else:
logger.warning(f"Found cached optimized model: {onnx_opt_path} ")
# Build TensorRT engines
for model_name, model_obj in models.items():
engine_path = getEnginePath(model_name, engine_dir)
engine = Engine(engine_path)
onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
onnx_opt_path = getOnnxPath(model_name, onnx_dir)
if force_engine_rebuild or not os.path.exists(engine.engine_path):
engine.build(
onnx_opt_path,
fp16=True,
input_profile=model_obj.get_input_profile(
opt_batch_size,
opt_image_height,
opt_image_width,
static_batch=static_batch,
static_shape=static_shape,
),
enable_preview=enable_preview,
timing_cache=timing_cache,
workspace_size=max_workspace_size,
)
built_engines[model_name] = engine
# Load and activate TensorRT engines
for model_name, model_obj in models.items():
engine = built_engines[model_name]
engine.load()
engine.activate()
return built_engines
def runEngine(engine, feed_dict, stream):
return engine.infer(feed_dict, stream)
class CLIP(BaseModel):
def __init__(self, model, device, max_batch_size, embedding_dim):
super(CLIP, self).__init__(
model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
)
self.name = "CLIP"
def get_input_names(self):
return ["input_ids"]
def get_output_names(self):
return ["text_embeddings", "pooler_output"]
def get_dynamic_axes(self):
return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}}
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
self.check_dims(batch_size, image_height, image_width)
min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(
batch_size, image_height, image_width, static_batch, static_shape
)
return {
"input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]
}
def get_shape_dict(self, batch_size, image_height, image_width):
self.check_dims(batch_size, image_height, image_width)
return {
"input_ids": (batch_size, self.text_maxlen),
"text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim),
}
def get_sample_input(self, batch_size, image_height, image_width):
self.check_dims(batch_size, image_height, image_width)
return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)
def optimize(self, onnx_graph):
opt = Optimizer(onnx_graph)
opt.select_outputs([0]) # delete graph output#1
opt.cleanup()
opt.fold_constants()
opt.infer_shapes()
opt.select_outputs([0], names=["text_embeddings"]) # rename network output
opt_onnx_graph = opt.cleanup(return_onnx=True)
return opt_onnx_graph
def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):
return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
class UNet(BaseModel):
def __init__(
self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4
):
super(UNet, self).__init__(
model=model,
fp16=fp16,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
text_maxlen=text_maxlen,
)
self.unet_dim = unet_dim
self.name = "UNet"
def get_input_names(self):
return ["sample", "timestep", "encoder_hidden_states"]
def get_output_names(self):
return ["latent"]
def get_dynamic_axes(self):
return {
"sample": {0: "2B", 2: "H", 3: "W"},
"encoder_hidden_states": {0: "2B"},
"latent": {0: "2B", 2: "H", 3: "W"},
}
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
(
min_batch,
max_batch,
_,
_,
_,
_,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
return {
"sample": [
(2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),
(2 * batch_size, self.unet_dim, latent_height, latent_width),
(2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),
],
"encoder_hidden_states": [
(2 * min_batch, self.text_maxlen, self.embedding_dim),
(2 * batch_size, self.text_maxlen, self.embedding_dim),
(2 * max_batch, self.text_maxlen, self.embedding_dim),
],
}
def get_shape_dict(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
return {
"sample": (2 * batch_size, self.unet_dim, latent_height, latent_width),
"encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim),
"latent": (2 * batch_size, 4, latent_height, latent_width),
}
def get_sample_input(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
dtype = torch.float16 if self.fp16 else torch.float32
return (
torch.randn(
2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device
),
torch.tensor([1.0], dtype=torch.float32, device=self.device),
torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),
)
def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False):
return UNet(
model,
fp16=True,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
unet_dim=(9 if inpaint else 4),
)
class VAE(BaseModel):
def __init__(self, model, device, max_batch_size, embedding_dim):
super(VAE, self).__init__(
model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
)
self.name = "VAE decoder"
def get_input_names(self):
return ["latent"]
def get_output_names(self):
return ["images"]
def get_dynamic_axes(self):
return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}}
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
(
min_batch,
max_batch,
_,
_,
_,
_,
min_latent_height,
max_latent_height,
min_latent_width,
max_latent_width,
) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
return {
"latent": [
(min_batch, 4, min_latent_height, min_latent_width),
(batch_size, 4, latent_height, latent_width),
(max_batch, 4, max_latent_height, max_latent_width),
]
}
def get_shape_dict(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
return {
"latent": (batch_size, 4, latent_height, latent_width),
"images": (batch_size, 3, image_height, image_width),
}
def get_sample_input(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)
def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):
return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
class TorchVAEEncoder(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.vae_encoder = model
def forward(self, x):
return retrieve_latents(self.vae_encoder.encode(x))
class VAEEncoder(BaseModel):
def __init__(self, model, device, max_batch_size, embedding_dim):
super(VAEEncoder, self).__init__(
model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
)
self.name = "VAE encoder"
def get_model(self):
vae_encoder = TorchVAEEncoder(self.model)
return vae_encoder
def get_input_names(self):
return ["images"]
def get_output_names(self):
return ["latent"]
def get_dynamic_axes(self):
return {"images": {0: "B", 2: "8H", 3: "8W"}, "latent": {0: "B", 2: "H", 3: "W"}}
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
assert batch_size >= self.min_batch and batch_size <= self.max_batch
min_batch = batch_size if static_batch else self.min_batch
max_batch = batch_size if static_batch else self.max_batch
self.check_dims(batch_size, image_height, image_width)
(
min_batch,
max_batch,
min_image_height,
max_image_height,
min_image_width,
max_image_width,
_,
_,
_,
_,
) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
return {
"images": [
(min_batch, 3, min_image_height, min_image_width),
(batch_size, 3, image_height, image_width),
(max_batch, 3, max_image_height, max_image_width),
]
}
def get_shape_dict(self, batch_size, image_height, image_width):
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
return {
"images": (batch_size, 3, image_height, image_width),
"latent": (batch_size, 4, latent_height, latent_width),
}
def get_sample_input(self, batch_size, image_height, image_width):
self.check_dims(batch_size, image_height, image_width)
return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device)
def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False):
return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
r"""
Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion.
This model inherits from [`StableDiffusionImg2ImgPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: DDIMScheduler,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPFeatureExtractor,
image_encoder: CLIPVisionModelWithProjection = None,
requires_safety_checker: bool = True,
stages=["clip", "unet", "vae", "vae_encoder"],
image_height: int = 512,
image_width: int = 512,
max_batch_size: int = 16,
# ONNX export parameters
onnx_opset: int = 17,
onnx_dir: str = "onnx",
# TensorRT engine build parameters
engine_dir: str = "engine",
build_preview_features: bool = True,
force_engine_rebuild: bool = False,
timing_cache: str = "timing_cache",
):
super().__init__(
vae,
text_encoder,
tokenizer,
unet,
scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
requires_safety_checker=requires_safety_checker,
)
self.vae.forward = self.vae.decode
self.stages = stages
self.image_height, self.image_width = image_height, image_width
self.inpaint = False
self.onnx_opset = onnx_opset
self.onnx_dir = onnx_dir
self.engine_dir = engine_dir
self.force_engine_rebuild = force_engine_rebuild
self.timing_cache = timing_cache
self.build_static_batch = False
self.build_dynamic_shape = False
self.build_preview_features = build_preview_features
self.max_batch_size = max_batch_size
# TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:
self.max_batch_size = 4
self.stream = None # loaded in loadResources()
self.models = {} # loaded in __loadModels()
self.engine = {} # loaded in build_engines()
def __loadModels(self):
# Load pipeline models
self.embedding_dim = self.text_encoder.config.hidden_size
models_args = {
"device": self.torch_device,
"max_batch_size": self.max_batch_size,
"embedding_dim": self.embedding_dim,
"inpaint": self.inpaint,
}
if "clip" in self.stages:
self.models["clip"] = make_CLIP(self.text_encoder, **models_args)
if "unet" in self.stages:
self.models["unet"] = make_UNet(self.unet, **models_args)
if "vae" in self.stages:
self.models["vae"] = make_VAE(self.vae, **models_args)
if "vae_encoder" in self.stages:
self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
@classmethod
@validate_hf_hub_args
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
token = kwargs.pop("token", None)
revision = kwargs.pop("revision", None)
cls.cached_folder = (
pretrained_model_name_or_path
if os.path.isdir(pretrained_model_name_or_path)
else snapshot_download(
pretrained_model_name_or_path,
cache_dir=cache_dir,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
)
)
def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):
super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)
self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)
self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)
self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)
# set device
self.torch_device = self._execution_device
logger.warning(f"Running inference on device: {self.torch_device}")
# load models
self.__loadModels()
# build engines
self.engine = build_engines(
self.models,
self.engine_dir,
self.onnx_dir,
self.onnx_opset,
opt_image_height=self.image_height,
opt_image_width=self.image_width,
force_engine_rebuild=self.force_engine_rebuild,
static_batch=self.build_static_batch,
static_shape=not self.build_dynamic_shape,
enable_preview=self.build_preview_features,
timing_cache=self.timing_cache,
)
return self
def __initialize_timesteps(self, timesteps, strength):
self.scheduler.set_timesteps(timesteps)
offset = self.scheduler.steps_offset if hasattr(self.scheduler, "steps_offset") else 0
init_timestep = int(timesteps * strength) + offset
init_timestep = min(init_timestep, timesteps)
t_start = max(timesteps - init_timestep + offset, 0)
timesteps = self.scheduler.timesteps[t_start:].to(self.torch_device)
return timesteps, t_start
def __preprocess_images(self, batch_size, images=()):
init_images = []
for image in images:
image = image.to(self.torch_device).float()
image = image.repeat(batch_size, 1, 1, 1)
init_images.append(image)
return tuple(init_images)
def __encode_image(self, init_image):
init_latents = runEngine(self.engine["vae_encoder"], {"images": device_view(init_image)}, self.stream)[
"latent"
]
init_latents = 0.18215 * init_latents
return init_latents
def __encode_prompt(self, prompt, negative_prompt):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
"""
# Tokenize prompt
text_input_ids = (
self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
.input_ids.type(torch.int32)
.to(self.torch_device)
)
text_input_ids_inp = device_view(text_input_ids)
# NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[
"text_embeddings"
].clone()
# Tokenize negative prompt
uncond_input_ids = (
self.tokenizer(
negative_prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
.input_ids.type(torch.int32)
.to(self.torch_device)
)
uncond_input_ids_inp = device_view(uncond_input_ids)
uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
"text_embeddings"
]
# Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)
return text_embeddings
def __denoise_latent(
self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None
):
if not isinstance(timesteps, torch.Tensor):
timesteps = self.scheduler.timesteps
for step_index, timestep in enumerate(timesteps):
# Expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2)
latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
if isinstance(mask, torch.Tensor):
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
# Predict the noise residual
timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
sample_inp = device_view(latent_model_input)
timestep_inp = device_view(timestep_float)
embeddings_inp = device_view(text_embeddings)
noise_pred = runEngine(
self.engine["unet"],
{"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp},
self.stream,
)["latent"]
# Perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
latents = 1.0 / 0.18215 * latents
return latents
def __decode_latent(self, latents):
images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"]
images = (images / 2 + 0.5).clamp(0, 1)
return images.cpu().permute(0, 2, 3, 1).float().numpy()
def __loadResources(self, image_height, image_width, batch_size):
self.stream = cuda.Stream()
# Allocate buffers for TensorRT engine bindings
for model_name, obj in self.models.items():
self.engine[model_name].allocate_buffers(
shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device
)
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
strength: float = 0.8,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
strength (`float`, *optional*, defaults to 0.8):
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
"""
self.generator = generator
self.denoising_steps = num_inference_steps
self._guidance_scale = guidance_scale
# Pre-compute latent input scales and linear multistep coefficients
self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)
# Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
prompt = [prompt]
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}")
if negative_prompt is None:
negative_prompt = [""] * batch_size
if negative_prompt is not None and isinstance(negative_prompt, str):
negative_prompt = [negative_prompt]
assert len(prompt) == len(negative_prompt)
if batch_size > self.max_batch_size:
raise ValueError(
f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4"
)
# load resources
self.__loadResources(self.image_height, self.image_width, batch_size)
with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER):
# Initialize timesteps
timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength)
latent_timestep = timesteps[:1].repeat(batch_size)
# Pre-process input image
if isinstance(image, PIL.Image.Image):
image = preprocess_image(image)
init_image = self.__preprocess_images(batch_size, (image,))[0]
# VAE encode init image
init_latents = self.__encode_image(init_image)
# Add noise to latents using timesteps
noise = torch.randn(
init_latents.shape, generator=self.generator, device=self.torch_device, dtype=torch.float32
)
latents = self.scheduler.add_noise(init_latents, noise, latent_timestep)
# CLIP text encoder
text_embeddings = self.__encode_prompt(prompt, negative_prompt)
# UNet denoiser
latents = self.__denoise_latent(latents, text_embeddings, timesteps=timesteps, step_offset=t_start)
# VAE decode latent
images = self.__decode_latent(latents)
images = self.numpy_to_pil(images)
return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None)
| diffusers/examples/community/stable_diffusion_tensorrt_img2img.py/0 | {
"file_path": "diffusers/examples/community/stable_diffusion_tensorrt_img2img.py",
"repo_id": "diffusers",
"token_count": 19791
} | 97 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The LCM team and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import copy
import functools
import gc
import logging
import math
import os
import random
import shutil
from pathlib import Path
import accelerate
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from peft import LoraConfig, get_peft_model_state_dict
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
import diffusers
from diffusers import (
AutoencoderKL,
DDPMScheduler,
LCMScheduler,
StableDiffusionXLPipeline,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
from diffusers.training_utils import cast_training_params, resolve_interpolation_mode
from diffusers.utils import check_min_version, convert_state_dict_to_diffusers, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
if is_wandb_available():
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.26.0.dev0")
logger = get_logger(__name__)
DATASET_NAME_MAPPING = {
"lambdalabs/pokemon-blip-captions": ("image", "text"),
}
class DDIMSolver:
def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50):
# DDIM sampling parameters
step_ratio = timesteps // ddim_timesteps
self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1
self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps]
self.ddim_alpha_cumprods_prev = np.asarray(
[alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist()
)
# convert to torch tensors
self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long()
self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods)
self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev)
def to(self, device):
self.ddim_timesteps = self.ddim_timesteps.to(device)
self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device)
self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device)
return self
def ddim_step(self, pred_x0, pred_noise, timestep_index):
alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape)
dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise
x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt
return x_prev
def log_validation(vae, args, accelerator, weight_dtype, step, unet=None, is_final_validation=False):
logger.info("Running validation... ")
pipeline = StableDiffusionXLPipeline.from_pretrained(
args.pretrained_teacher_model,
vae=vae,
scheduler=LCMScheduler.from_pretrained(args.pretrained_teacher_model, subfolder="scheduler"),
revision=args.revision,
torch_dtype=weight_dtype,
).to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
to_load = None
if not is_final_validation:
if unet is None:
raise ValueError("Must provide a `unet` when doing intermediate validation.")
unet = accelerator.unwrap_model(unet)
state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
to_load = state_dict
else:
to_load = args.output_dir
pipeline.load_lora_weights(to_load)
pipeline.fuse_lora()
if args.enable_xformers_memory_efficient_attention:
pipeline.enable_xformers_memory_efficient_attention()
if args.seed is None:
generator = None
else:
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
validation_prompts = [
"cute sundar pichai character",
"robotic cat with wings",
"a photo of yoda",
"a cute creature with blue eyes",
]
image_logs = []
for _, prompt in enumerate(validation_prompts):
images = []
with torch.autocast("cuda", dtype=weight_dtype):
images = pipeline(
prompt=prompt,
num_inference_steps=4,
num_images_per_prompt=4,
generator=generator,
guidance_scale=0.0,
).images
image_logs.append({"validation_prompt": prompt, "images": images})
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
for log in image_logs:
images = log["images"]
validation_prompt = log["validation_prompt"]
formatted_images = []
for image in images:
formatted_images.append(np.asarray(image))
formatted_images = np.stack(formatted_images)
tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC")
elif tracker.name == "wandb":
formatted_images = []
for log in image_logs:
images = log["images"]
validation_prompt = log["validation_prompt"]
for image in images:
image = wandb.Image(image, caption=validation_prompt)
formatted_images.append(image)
logger_name = "test" if is_final_validation else "validation"
tracker.log({logger_name: formatted_images})
else:
logger.warn(f"image logging not implemented for {tracker.name}")
del pipeline
gc.collect()
torch.cuda.empty_cache()
return image_logs
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
return x[(...,) + (None,) * dims_to_append]
# From LCMScheduler.get_scalings_for_boundary_condition_discrete
def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0):
scaled_timestep = timestep_scaling * timestep
c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2)
c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5
return c_skip, c_out
# Compare LCMScheduler.step, Step 4
def get_predicted_original_sample(model_output, timesteps, sample, prediction_type, alphas, sigmas):
alphas = extract_into_tensor(alphas, timesteps, sample.shape)
sigmas = extract_into_tensor(sigmas, timesteps, sample.shape)
if prediction_type == "epsilon":
pred_x_0 = (sample - sigmas * model_output) / alphas
elif prediction_type == "sample":
pred_x_0 = model_output
elif prediction_type == "v_prediction":
pred_x_0 = alphas * sample - sigmas * model_output
else:
raise ValueError(
f"Prediction type {prediction_type} is not supported; currently, `epsilon`, `sample`, and `v_prediction`"
f" are supported."
)
return pred_x_0
# Based on step 4 in DDIMScheduler.step
def get_predicted_noise(model_output, timesteps, sample, prediction_type, alphas, sigmas):
alphas = extract_into_tensor(alphas, timesteps, sample.shape)
sigmas = extract_into_tensor(sigmas, timesteps, sample.shape)
if prediction_type == "epsilon":
pred_epsilon = model_output
elif prediction_type == "sample":
pred_epsilon = (sample - alphas * model_output) / sigmas
elif prediction_type == "v_prediction":
pred_epsilon = alphas * model_output + sigmas * sample
else:
raise ValueError(
f"Prediction type {prediction_type} is not supported; currently, `epsilon`, `sample`, and `v_prediction`"
f" are supported."
)
return pred_epsilon
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def import_model_class_from_model_name_or_path(
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "CLIPTextModelWithProjection":
from transformers import CLIPTextModelWithProjection
return CLIPTextModelWithProjection
else:
raise ValueError(f"{model_class} is not supported.")
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
# ----------Model Checkpoint Loading Arguments----------
parser.add_argument(
"--pretrained_teacher_model",
type=str,
default=None,
required=True,
help="Path to pretrained LDM teacher model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--pretrained_vae_model_name_or_path",
type=str,
default=None,
help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.",
)
parser.add_argument(
"--teacher_revision",
type=str,
default=None,
required=False,
help="Revision of pretrained LDM teacher model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained LDM model identifier from huggingface.co/models.",
)
# ----------Training Arguments----------
# ----General Training Arguments----
parser.add_argument(
"--output_dir",
type=str,
default="lcm-xl-distilled",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
# ----Logging----
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
# ----Checkpointing----
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
# ----Image Processing----
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The config of the Dataset, leave as None if there's only one config.",
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
help=(
"A folder containing the training data. Folder contents must follow the structure described in"
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
),
)
parser.add_argument(
"--image_column", type=str, default="image", help="The column of the dataset containing an image."
)
parser.add_argument(
"--caption_column",
type=str,
default="text",
help="The column of the dataset containing a caption or a list of captions.",
)
parser.add_argument(
"--resolution",
type=int,
default=1024,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--interpolation_type",
type=str,
default="bilinear",
help=(
"The interpolation function used when resizing images to the desired resolution. Choose between `bilinear`,"
" `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`."
),
)
parser.add_argument(
"--center_crop",
default=False,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--random_flip",
action="store_true",
help="whether to randomly flip images horizontally",
)
parser.add_argument(
"--encode_batch_size",
type=int,
default=8,
help="Batch size to use for VAE encoding of the images for efficient processing.",
)
# ----Dataloader----
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
# ----Batch Size and Training Steps----
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
# ----Learning Rate----
parser.add_argument(
"--learning_rate",
type=float,
default=1e-6,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
# ----Optimizer (Adam)----
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
# ----Diffusion Training Arguments----
# ----Latent Consistency Distillation (LCD) Specific Arguments----
parser.add_argument(
"--w_min",
type=float,
default=3.0,
required=False,
help=(
"The minimum guidance scale value for guidance scale sampling. Note that we are using the Imagen CFG"
" formulation rather than the LCM formulation, which means all guidance scales have 1 added to them as"
" compared to the original paper."
),
)
parser.add_argument(
"--w_max",
type=float,
default=15.0,
required=False,
help=(
"The maximum guidance scale value for guidance scale sampling. Note that we are using the Imagen CFG"
" formulation rather than the LCM formulation, which means all guidance scales have 1 added to them as"
" compared to the original paper."
),
)
parser.add_argument(
"--num_ddim_timesteps",
type=int,
default=50,
help="The number of timesteps to use for DDIM sampling.",
)
parser.add_argument(
"--loss_type",
type=str,
default="l2",
choices=["l2", "huber"],
help="The type of loss to use for the LCD loss.",
)
parser.add_argument(
"--huber_c",
type=float,
default=0.001,
help="The huber loss parameter. Only used if `--loss_type=huber`.",
)
parser.add_argument(
"--lora_rank",
type=int,
default=64,
help="The rank of the LoRA projection matrix.",
)
parser.add_argument(
"--lora_alpha",
type=int,
default=64,
help=(
"The value of the LoRA alpha parameter, which controls the scaling factor in front of the LoRA weight"
" update delta_W. No scaling will be performed if this value is equal to `lora_rank`."
),
)
parser.add_argument(
"--lora_dropout",
type=float,
default=0.0,
help="The dropout probability for the dropout layer added before applying the LoRA to each layer input.",
)
parser.add_argument(
"--lora_target_modules",
type=str,
default=None,
help=(
"A comma-separated string of target module keys to add LoRA to. If not set, a default list of modules will"
" be used. By default, LoRA will be applied to all conv and linear layers."
),
)
parser.add_argument(
"--vae_encode_batch_size",
type=int,
default=8,
required=False,
help=(
"The batch size used when encoding (and decoding) images to latents (and vice versa) using the VAE."
" Encoding or decoding the whole batch at once may run into OOM issues."
),
)
parser.add_argument(
"--timestep_scaling_factor",
type=float,
default=10.0,
help=(
"The multiplicative timestep scaling factor used when calculating the boundary scalings for LCM. The"
" higher the scaling is, the lower the approximation error, but the default value of 10.0 should typically"
" suffice."
),
)
# ----Mixed Precision----
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
# ----Training Optimizations----
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
# ----Distributed Training----
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
# ----------Validation Arguments----------
parser.add_argument(
"--validation_steps",
type=int,
default=200,
help="Run validation every X steps.",
)
# ----------Huggingface Hub Arguments-----------
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
# ----------Accelerate Arguments----------
parser.add_argument(
"--tracker_project_name",
type=str,
default="text2image-fine-tune",
help=(
"The `project_name` argument passed to Accelerator.init_trackers for"
" more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
),
)
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args
# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
def encode_prompt(prompt_batch, text_encoders, tokenizers, is_train=True):
prompt_embeds_list = []
captions = []
for caption in prompt_batch:
if isinstance(caption, str):
captions.append(caption)
elif isinstance(caption, (list, np.ndarray)):
# take a random caption if there are multiple
captions.append(random.choice(caption) if is_train else caption[0])
with torch.no_grad():
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
text_inputs = tokenizer(
captions,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
prompt_embeds = text_encoder(
text_input_ids.to(text_encoder.device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
bs_embed, seq_len, _ = prompt_embeds.shape
prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
return prompt_embeds, pooled_prompt_embeds
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
split_batches=True, # It's important to set this to True when using webdataset to get the right number of steps for lr scheduling. If set to False, the number of steps will be devide by the number of processes assuming batches are multiplied by the number of processes
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name,
exist_ok=True,
token=args.hub_token,
private=True,
).repo_id
# 1. Create the noise scheduler and the desired noise schedule.
noise_scheduler = DDPMScheduler.from_pretrained(
args.pretrained_teacher_model, subfolder="scheduler", revision=args.teacher_revision
)
# DDPMScheduler calculates the alpha and sigma noise schedules (based on the alpha bars) for us
alpha_schedule = torch.sqrt(noise_scheduler.alphas_cumprod)
sigma_schedule = torch.sqrt(1 - noise_scheduler.alphas_cumprod)
# Initialize the DDIM ODE solver for distillation.
solver = DDIMSolver(
noise_scheduler.alphas_cumprod.numpy(),
timesteps=noise_scheduler.config.num_train_timesteps,
ddim_timesteps=args.num_ddim_timesteps,
)
# 2. Load tokenizers from SDXL checkpoint.
tokenizer_one = AutoTokenizer.from_pretrained(
args.pretrained_teacher_model, subfolder="tokenizer", revision=args.teacher_revision, use_fast=False
)
tokenizer_two = AutoTokenizer.from_pretrained(
args.pretrained_teacher_model, subfolder="tokenizer_2", revision=args.teacher_revision, use_fast=False
)
# 3. Load text encoders from SDXL checkpoint.
# import correct text encoder classes
text_encoder_cls_one = import_model_class_from_model_name_or_path(
args.pretrained_teacher_model, args.teacher_revision
)
text_encoder_cls_two = import_model_class_from_model_name_or_path(
args.pretrained_teacher_model, args.teacher_revision, subfolder="text_encoder_2"
)
text_encoder_one = text_encoder_cls_one.from_pretrained(
args.pretrained_teacher_model, subfolder="text_encoder", revision=args.teacher_revision
)
text_encoder_two = text_encoder_cls_two.from_pretrained(
args.pretrained_teacher_model, subfolder="text_encoder_2", revision=args.teacher_revision
)
# 4. Load VAE from SDXL checkpoint (or more stable VAE)
vae_path = (
args.pretrained_teacher_model
if args.pretrained_vae_model_name_or_path is None
else args.pretrained_vae_model_name_or_path
)
vae = AutoencoderKL.from_pretrained(
vae_path,
subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
revision=args.teacher_revision,
)
# 6. Freeze teacher vae, text_encoders.
vae.requires_grad_(False)
text_encoder_one.requires_grad_(False)
text_encoder_two.requires_grad_(False)
# 7. Create online student U-Net.
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_teacher_model, subfolder="unet", revision=args.teacher_revision
)
unet.requires_grad_(False)
# Check that all trainable models are in full precision
low_precision_error_string = (
" Please make sure to always have all model weights in full float32 precision when starting training - even if"
" doing mixed precision training, copy of the weights should still be float32."
)
if accelerator.unwrap_model(unet).dtype != torch.float32:
raise ValueError(
f"Controlnet loaded as datatype {accelerator.unwrap_model(unet).dtype}. {low_precision_error_string}"
)
# 8. Handle mixed precision and device placement
# For mixed precision training we cast all non-trainable weigths to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to weight_dtype
# The VAE is in float32 to avoid NaN losses.
unet.to(accelerator.device, dtype=weight_dtype)
if args.pretrained_vae_model_name_or_path is None:
vae.to(accelerator.device, dtype=torch.float32)
else:
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder_one.to(accelerator.device, dtype=weight_dtype)
text_encoder_two.to(accelerator.device, dtype=weight_dtype)
# 9. Add LoRA to the student U-Net, only the LoRA projection matrix will be updated by the optimizer.
if args.lora_target_modules is not None:
lora_target_modules = [module_key.strip() for module_key in args.lora_target_modules.split(",")]
else:
lora_target_modules = [
"to_q",
"to_k",
"to_v",
"to_out.0",
"proj_in",
"proj_out",
"ff.net.0.proj",
"ff.net.2",
"conv1",
"conv2",
"conv_shortcut",
"downsamplers.0.conv",
"upsamplers.0.conv",
"time_emb_proj",
]
lora_config = LoraConfig(
r=args.lora_rank,
target_modules=lora_target_modules,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
)
unet.add_adapter(lora_config)
# Make sure the trainable params are in float32.
if args.mixed_precision == "fp16":
# only upcast trainable parameters (LoRA) into fp32
cast_training_params(unet, dtype=torch.float32)
# Also move the alpha and sigma noise schedules to accelerator.device.
alpha_schedule = alpha_schedule.to(accelerator.device)
sigma_schedule = sigma_schedule.to(accelerator.device)
solver = solver.to(accelerator.device)
# 10. Handle saving and loading of checkpoints
# `accelerate` 0.16.0 will have better support for customized saving
if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
unet_ = accelerator.unwrap_model(unet)
# also save the checkpoints in native `diffusers` format so that it can be easily
# be independently loaded via `load_lora_weights()`.
state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet_))
StableDiffusionXLPipeline.save_lora_weights(output_dir, unet_lora_layers=state_dict)
for _, model in enumerate(models):
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
def load_model_hook(models, input_dir):
# load the LoRA into the model
unet_ = accelerator.unwrap_model(unet)
lora_state_dict, network_alphas = StableDiffusionXLPipeline.lora_state_dict(input_dir)
StableDiffusionXLPipeline.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
for _ in range(len(models)):
# pop models so that they are not loaded again
models.pop()
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# 11. Enable optimizations
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# 12. Optimizer creation
params_to_optimize = filter(lambda p: p.requires_grad, unet.parameters())
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# 13. Dataset creation and data processing
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
args.dataset_name,
args.dataset_config_name,
cache_dir=args.cache_dir,
)
else:
data_files = {}
if args.train_data_dir is not None:
data_files["train"] = os.path.join(args.train_data_dir, "**")
dataset = load_dataset(
"imagefolder",
data_files=data_files,
cache_dir=args.cache_dir,
)
# See more about loading custom images at
# https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
# Preprocessing the datasets.
column_names = dataset["train"].column_names
# Get the column names for input/target.
dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
if args.image_column is None:
image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
image_column = args.image_column
if image_column not in column_names:
raise ValueError(
f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}"
)
if args.caption_column is None:
caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
caption_column = args.caption_column
if caption_column not in column_names:
raise ValueError(
f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}"
)
# Preprocessing the datasets.
interpolation_mode = resolve_interpolation_mode(args.interpolation_type)
train_resize = transforms.Resize(args.resolution, interpolation=interpolation_mode)
train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution)
train_flip = transforms.RandomHorizontalFlip(p=1.0)
train_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
def preprocess_train(examples):
images = [image.convert("RGB") for image in examples[image_column]]
# image aug
original_sizes = []
all_images = []
crop_top_lefts = []
for image in images:
original_sizes.append((image.height, image.width))
image = train_resize(image)
if args.center_crop:
y1 = max(0, int(round((image.height - args.resolution) / 2.0)))
x1 = max(0, int(round((image.width - args.resolution) / 2.0)))
image = train_crop(image)
else:
y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution))
image = crop(image, y1, x1, h, w)
if args.random_flip and random.random() < 0.5:
# flip
x1 = image.width - x1
image = train_flip(image)
crop_top_left = (y1, x1)
crop_top_lefts.append(crop_top_left)
image = train_transforms(image)
all_images.append(image)
examples["original_sizes"] = original_sizes
examples["crop_top_lefts"] = crop_top_lefts
examples["pixel_values"] = all_images
examples["captions"] = list(examples[caption_column])
return examples
with accelerator.main_process_first():
if args.max_train_samples is not None:
dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
# Set the training transforms
train_dataset = dataset["train"].with_transform(preprocess_train)
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
original_sizes = [example["original_sizes"] for example in examples]
crop_top_lefts = [example["crop_top_lefts"] for example in examples]
captions = [example["captions"] for example in examples]
return {
"pixel_values": pixel_values,
"captions": captions,
"original_sizes": original_sizes,
"crop_top_lefts": crop_top_lefts,
}
# DataLoaders creation:
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
shuffle=True,
collate_fn=collate_fn,
batch_size=args.train_batch_size,
num_workers=args.dataloader_num_workers,
)
# 14. Embeddings for the UNet.
# Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
def compute_embeddings(prompt_batch, original_sizes, crop_coords, text_encoders, tokenizers, is_train=True):
def compute_time_ids(original_size, crops_coords_top_left):
target_size = (args.resolution, args.resolution)
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_time_ids = torch.tensor([add_time_ids])
add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype)
return add_time_ids
prompt_embeds, pooled_prompt_embeds = encode_prompt(prompt_batch, text_encoders, tokenizers, is_train)
add_text_embeds = pooled_prompt_embeds
add_time_ids = torch.cat([compute_time_ids(s, c) for s, c in zip(original_sizes, crop_coords)])
prompt_embeds = prompt_embeds.to(accelerator.device)
add_text_embeds = add_text_embeds.to(accelerator.device)
unet_added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
return {"prompt_embeds": prompt_embeds, **unet_added_cond_kwargs}
text_encoders = [text_encoder_one, text_encoder_two]
tokenizers = [tokenizer_one, tokenizer_two]
compute_embeddings_fn = functools.partial(compute_embeddings, text_encoders=text_encoders, tokenizers=tokenizers)
# 15. LR Scheduler creation
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
)
# 16. Prepare for training
# Prepare everything with our `accelerator`.
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, optimizer, train_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
tracker_config = dict(vars(args))
accelerator.init_trackers(args.tracker_project_name, config=tracker_config)
# 17. Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
unet.train()
for epoch in range(first_epoch, args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet):
# 1. Load and process the image and text conditioning
pixel_values, text, orig_size, crop_coords = (
batch["pixel_values"],
batch["captions"],
batch["original_sizes"],
batch["crop_top_lefts"],
)
encoded_text = compute_embeddings_fn(text, orig_size, crop_coords)
# encode pixel values with batch size of at most args.vae_encode_batch_size
pixel_values = pixel_values.to(dtype=vae.dtype)
latents = []
for i in range(0, pixel_values.shape[0], args.vae_encode_batch_size):
latents.append(vae.encode(pixel_values[i : i + args.vae_encode_batch_size]).latent_dist.sample())
latents = torch.cat(latents, dim=0)
latents = latents * vae.config.scaling_factor
if args.pretrained_vae_model_name_or_path is None:
latents = latents.to(weight_dtype)
# 2. Sample a random timestep for each image t_n from the ODE solver timesteps without bias.
# For the DDIM solver, the timestep schedule is [T - 1, T - k - 1, T - 2 * k - 1, ...]
bsz = latents.shape[0]
topk = noise_scheduler.config.num_train_timesteps // args.num_ddim_timesteps
index = torch.randint(0, args.num_ddim_timesteps, (bsz,), device=latents.device).long()
start_timesteps = solver.ddim_timesteps[index]
timesteps = start_timesteps - topk
timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps)
# 3. Get boundary scalings for start_timesteps and (end) timesteps.
c_skip_start, c_out_start = scalings_for_boundary_conditions(
start_timesteps, timestep_scaling=args.timestep_scaling_factor
)
c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]]
c_skip, c_out = scalings_for_boundary_conditions(
timesteps, timestep_scaling=args.timestep_scaling_factor
)
c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]]
# 4. Sample noise from the prior and add it to the latents according to the noise magnitude at each
# timestep (this is the forward diffusion process) [z_{t_{n + k}} in Algorithm 1]
noise = torch.randn_like(latents)
noisy_model_input = noise_scheduler.add_noise(latents, noise, start_timesteps)
# 5. Sample a random guidance scale w from U[w_min, w_max]
# Note that for LCM-LoRA distillation it is not necessary to use a guidance scale embedding
w = (args.w_max - args.w_min) * torch.rand((bsz,)) + args.w_min
w = w.reshape(bsz, 1, 1, 1)
w = w.to(device=latents.device, dtype=latents.dtype)
# 6. Prepare prompt embeds and unet_added_conditions
prompt_embeds = encoded_text.pop("prompt_embeds")
# 7. Get online LCM prediction on z_{t_{n + k}} (noisy_model_input), w, c, t_{n + k} (start_timesteps)
noise_pred = unet(
noisy_model_input,
start_timesteps,
encoder_hidden_states=prompt_embeds,
added_cond_kwargs=encoded_text,
).sample
pred_x_0 = get_predicted_original_sample(
noise_pred,
start_timesteps,
noisy_model_input,
noise_scheduler.config.prediction_type,
alpha_schedule,
sigma_schedule,
)
model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0
# 8. Compute the conditional and unconditional teacher model predictions to get CFG estimates of the
# predicted noise eps_0 and predicted original sample x_0, then run the ODE solver using these
# estimates to predict the data point in the augmented PF-ODE trajectory corresponding to the next ODE
# solver timestep.
# With the adapters disabled, the `unet` is the regular teacher model.
accelerator.unwrap_model(unet).disable_adapters()
with torch.no_grad():
# 1. Get teacher model prediction on noisy_model_input z_{t_{n + k}} and conditional embedding c
cond_teacher_output = unet(
noisy_model_input,
start_timesteps,
encoder_hidden_states=prompt_embeds,
added_cond_kwargs={k: v.to(weight_dtype) for k, v in encoded_text.items()},
).sample
cond_pred_x0 = get_predicted_original_sample(
cond_teacher_output,
start_timesteps,
noisy_model_input,
noise_scheduler.config.prediction_type,
alpha_schedule,
sigma_schedule,
)
cond_pred_noise = get_predicted_noise(
cond_teacher_output,
start_timesteps,
noisy_model_input,
noise_scheduler.config.prediction_type,
alpha_schedule,
sigma_schedule,
)
# 2. Get teacher model prediction on noisy_model_input z_{t_{n + k}} and unconditional embedding 0
uncond_prompt_embeds = torch.zeros_like(prompt_embeds)
uncond_pooled_prompt_embeds = torch.zeros_like(encoded_text["text_embeds"])
uncond_added_conditions = copy.deepcopy(encoded_text)
uncond_added_conditions["text_embeds"] = uncond_pooled_prompt_embeds
uncond_teacher_output = unet(
noisy_model_input,
start_timesteps,
encoder_hidden_states=uncond_prompt_embeds.to(weight_dtype),
added_cond_kwargs={k: v.to(weight_dtype) for k, v in uncond_added_conditions.items()},
).sample
uncond_pred_x0 = get_predicted_original_sample(
uncond_teacher_output,
start_timesteps,
noisy_model_input,
noise_scheduler.config.prediction_type,
alpha_schedule,
sigma_schedule,
)
uncond_pred_noise = get_predicted_noise(
uncond_teacher_output,
start_timesteps,
noisy_model_input,
noise_scheduler.config.prediction_type,
alpha_schedule,
sigma_schedule,
)
# 3. Calculate the CFG estimate of x_0 (pred_x0) and eps_0 (pred_noise)
# Note that this uses the LCM paper's CFG formulation rather than the Imagen CFG formulation
pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0)
pred_noise = cond_pred_noise + w * (cond_pred_noise - uncond_pred_noise)
# 4. Run one step of the ODE solver to estimate the next point x_prev on the
# augmented PF-ODE trajectory (solving backward in time)
# Note that the DDIM step depends on both the predicted x_0 and source noise eps_0.
x_prev = solver.ddim_step(pred_x0, pred_noise, index).to(unet.dtype)
# re-enable unet adapters to turn the `unet` into a student unet.
accelerator.unwrap_model(unet).enable_adapters()
# 9. Get target LCM prediction on x_prev, w, c, t_n (timesteps)
# Note that we do not use a separate target network for LCM-LoRA distillation.
with torch.no_grad():
target_noise_pred = unet(
x_prev,
timesteps,
encoder_hidden_states=prompt_embeds,
added_cond_kwargs={k: v.to(weight_dtype) for k, v in encoded_text.items()},
).sample
pred_x_0 = get_predicted_original_sample(
target_noise_pred,
timesteps,
x_prev,
noise_scheduler.config.prediction_type,
alpha_schedule,
sigma_schedule,
)
target = c_skip * x_prev + c_out * pred_x_0
# 10. Calculate loss
if args.loss_type == "l2":
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
elif args.loss_type == "huber":
loss = torch.mean(
torch.sqrt((model_pred.float() - target.float()) ** 2 + args.huber_c**2) - args.huber_c
)
# 11. Backpropagate on the online student model (`unet`) (only LoRA)
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(params_to_optimize, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if global_step % args.validation_steps == 0:
log_validation(
vae, args, accelerator, weight_dtype, global_step, unet=unet, is_final_validation=False
)
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
# Create the pipeline using using the trained modules and save it.
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = accelerator.unwrap_model(unet)
unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
StableDiffusionXLPipeline.save_lora_weights(args.output_dir, unet_lora_layers=unet_lora_state_dict)
if args.push_to_hub:
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
del unet
torch.cuda.empty_cache()
# Final inference.
if args.validation_steps is not None:
log_validation(vae, args, accelerator, weight_dtype, step=global_step, unet=None, is_final_validation=True)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
| diffusers/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py/0 | {
"file_path": "diffusers/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py",
"repo_id": "diffusers",
"token_count": 26737
} | 98 |
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class CustomDiffusion(ExamplesTestsAccelerate):
def test_custom_diffusion(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir docs/source/en/imgs
--instance_prompt <new1>
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 1.0e-05
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--modifier_token <new1>
--no_safe_serialization
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_custom_diffusion_weights.bin")))
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "<new1>.bin")))
def test_custom_diffusion_checkpointing_checkpoints_total_limit(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=6
--checkpoints_total_limit=2
--checkpointing_steps=2
--no_safe_serialization
""".split()
run_command(self._launch_args + test_args)
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
def test_custom_diffusion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=4
--checkpointing_steps=2
--no_safe_serialization
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
resume_run_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=8
--checkpointing_steps=2
--resume_from_checkpoint=checkpoint-4
--checkpoints_total_limit=2
--no_safe_serialization
""".split()
run_command(self._launch_args + resume_run_args)
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
| diffusers/examples/custom_diffusion/test_custom_diffusion.py/0 | {
"file_path": "diffusers/examples/custom_diffusion/test_custom_diffusion.py",
"repo_id": "diffusers",
"token_count": 2235
} | 99 |
# InstructPix2Pix training example
[InstructPix2Pix](https://arxiv.org/abs/2211.09800) is a method to fine-tune text-conditioned diffusion models such that they can follow an edit instruction for an input image. Models fine-tuned using this method take the following as inputs:
<p align="center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-instruction.png" alt="instructpix2pix-inputs" width=600/>
</p>
The output is an "edited" image that reflects the edit instruction applied on the input image:
<p align="center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/output-gs%407-igs%401-steps%4050.png" alt="instructpix2pix-output" width=600/>
</p>
The `train_instruct_pix2pix.py` script shows how to implement the training procedure and adapt it for Stable Diffusion.
***Disclaimer: Even though `train_instruct_pix2pix.py` implements the InstructPix2Pix
training procedure while being faithful to the [original implementation](https://github.com/timothybrooks/instruct-pix2pix) we have only tested it on a [small-scale dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples). This can impact the end results. For better results, we recommend longer training runs with a larger dataset. [Here](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) you can find a large dataset for InstructPix2Pix training.***
## Running locally with PyTorch
### Installing the dependencies
Before running the scripts, make sure to install the library's training dependencies:
**Important**
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install -e .
```
Then cd in the example folder and run
```bash
pip install -r requirements.txt
```
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
```bash
accelerate config
```
Or for a default accelerate configuration without answering questions about your environment
```bash
accelerate config default
```
Or if your environment doesn't support an interactive shell e.g. a notebook
```python
from accelerate.utils import write_basic_config
write_basic_config()
```
### Toy example
As mentioned before, we'll use a [small toy dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) for training. The dataset
is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) used in the InstructPix2Pix paper.
Configure environment variables such as the dataset identifier and the Stable Diffusion
checkpoint:
```bash
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export DATASET_ID="fusing/instructpix2pix-1000-samples"
```
Now, we can launch training:
```bash
accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$DATASET_ID \
--enable_xformers_memory_efficient_attention \
--resolution=256 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--seed=42 \
--push_to_hub
```
Additionally, we support performing validation inference to monitor training progress
with Weights and Biases. You can enable this feature with `report_to="wandb"`:
```bash
accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$DATASET_ID \
--enable_xformers_memory_efficient_attention \
--resolution=256 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \
--validation_prompt="make the mountains snowy" \
--seed=42 \
--report_to=wandb \
--push_to_hub
```
We recommend this type of validation as it can be useful for model debugging. Note that you need `wandb` installed to use this. You can install `wandb` by running `pip install wandb`.
[Here](https://wandb.ai/sayakpaul/instruct-pix2pix/runs/ctr3kovq), you can find an example training run that includes some validation samples and the training hyperparameters.
***Note: In the original paper, the authors observed that even when the model is trained with an image resolution of 256x256, it generalizes well to bigger resolutions such as 512x512. This is likely because of the larger dataset they used during training.***
## Training with multiple GPUs
`accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch)
for running distributed training with `accelerate`. Here is an example command:
```bash
accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py \
--pretrained_model_name_or_path=runwayml/stable-diffusion-v1-5 \
--dataset_name=sayakpaul/instructpix2pix-1000-samples \
--use_ema \
--enable_xformers_memory_efficient_attention \
--resolution=512 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--seed=42 \
--push_to_hub
```
## Inference
Once training is complete, we can perform inference:
```python
import PIL
import requests
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
model_id = "your_model_id" # <- replace this
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
generator = torch.Generator("cuda").manual_seed(0)
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png"
def download_image(url):
image = PIL.Image.open(requests.get(url, stream=True).raw)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
image = download_image(url)
prompt = "wipe out the lake"
num_inference_steps = 20
image_guidance_scale = 1.5
guidance_scale = 10
edited_image = pipe(prompt,
image=image,
num_inference_steps=num_inference_steps,
image_guidance_scale=image_guidance_scale,
guidance_scale=guidance_scale,
generator=generator,
).images[0]
edited_image.save("edited_image.png")
```
An example model repo obtained using this training script can be found
here - [sayakpaul/instruct-pix2pix](https://huggingface.co/sayakpaul/instruct-pix2pix).
We encourage you to play with the following three parameters to control
speed and quality during performance:
* `num_inference_steps`
* `image_guidance_scale`
* `guidance_scale`
Particularly, `image_guidance_scale` and `guidance_scale` can have a profound impact
on the generated ("edited") image (see [here](https://twitter.com/RisingSayak/status/1628392199196151808?s=20) for an example).
If you're looking for some interesting ways to use the InstructPix2Pix training methodology, we welcome you to check out this blog post: [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd).
## Stable Diffusion XL
There's an equivalent `train_instruct_pix2pix_sdxl.py` script for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). Please refer to the docs [here](./README_sdxl.md) to learn more.
| diffusers/examples/instruct_pix2pix/README.md/0 | {
"file_path": "diffusers/examples/instruct_pix2pix/README.md",
"repo_id": "diffusers",
"token_count": 2738
} | 100 |
import torch
from diffusers import StableDiffusionPipeline
model_id = "path-to-your-trained-model"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
prompt = "A photo of sks dog in a bucket"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| diffusers/examples/research_projects/colossalai/inference.py/0 | {
"file_path": "diffusers/examples/research_projects/colossalai/inference.py",
"repo_id": "diffusers",
"token_count": 127
} | 101 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 bram-w, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import contextlib
import io
import logging
import math
import os
import shutil
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
import wandb
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from peft import LoraConfig
from peft.utils import get_peft_model_state_dict
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
import diffusers
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
UNet2DConditionModel,
)
from diffusers.loaders import LoraLoaderMixin
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, convert_state_dict_to_diffusers
from diffusers.utils.import_utils import is_xformers_available
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.25.0.dev0")
logger = get_logger(__name__)
VALIDATION_PROMPTS = [
"portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography",
"Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece",
]
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
revision=revision,
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
else:
raise ValueError(f"{model_class} is not supported.")
def log_validation(args, unet, accelerator, weight_dtype, epoch, is_final_validation=False):
logger.info(f"Running validation... \n Generating images with prompts:\n" f" {VALIDATION_PROMPTS}.")
# create pipeline
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)
if not is_final_validation:
pipeline.unet = accelerator.unwrap_model(unet)
else:
pipeline.load_lora_weights(args.output_dir, weight_name="pytorch_lora_weights.safetensors")
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
images = []
context = contextlib.nullcontext() if is_final_validation else torch.cuda.amp.autocast()
for prompt in VALIDATION_PROMPTS:
with context:
image = pipeline(prompt, num_inference_steps=25, generator=generator).images[0]
images.append(image)
tracker_key = "test" if is_final_validation else "validation"
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images(tracker_key, np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
tracker_key: [
wandb.Image(image, caption=f"{i}: {VALIDATION_PROMPTS[i]}") for i, image in enumerate(images)
]
}
)
# Also log images without the LoRA params for comparison.
if is_final_validation:
pipeline.disable_lora()
no_lora_images = [
pipeline(prompt, num_inference_steps=25, generator=generator).images[0] for prompt in VALIDATION_PROMPTS
]
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in no_lora_images])
tracker.writer.add_images("test_without_lora", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
"test_without_lora": [
wandb.Image(image, caption=f"{i}: {VALIDATION_PROMPTS[i]}")
for i, image in enumerate(no_lora_images)
]
}
)
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--dataset_split_name",
type=str,
default="validation",
help="Dataset split to be used during training. Helpful to specify for conducting experimental runs.",
)
parser.add_argument(
"--variant",
type=str,
default=None,
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
)
parser.add_argument(
"--run_validation",
default=False,
action="store_true",
help="Whether to run validation inference in between training and also after training. Helps to track progress.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=200,
help="Run validation every X steps.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="diffusion-dpo-lora",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--vae_encode_batch_size",
type=int,
default=8,
help="Batch size to use for VAE encoding of the images for efficient processing.",
)
parser.add_argument(
"--no_hflip",
action="store_true",
help="whether to randomly flip images horizontally",
)
parser.add_argument(
"--random_crop",
default=False,
action="store_true",
help=(
"Whether to random crop the input images to the resolution. If not set, the images will be center-cropped."
),
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--beta_dpo",
type=int,
default=2500,
help="DPO KL Divergence penalty.",
)
parser.add_argument(
"--loss_type",
type=str,
default="sigmoid",
help="DPO loss type. Can be one of 'sigmoid' (default), 'ipo', or 'cpo'",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default=None,
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument(
"--rank",
type=int,
default=4,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--tracker_name",
type=str,
default="diffusion-dpo-lora",
help=("The name of the tracker to report results to."),
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
if args.dataset_name is None:
raise ValueError("Must provide a `dataset_name`.")
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args
def tokenize_captions(tokenizer, examples):
max_length = tokenizer.model_max_length
captions = []
for caption in examples["caption"]:
captions.append(caption)
text_inputs = tokenizer(
captions, truncation=True, padding="max_length", max_length=max_length, return_tensors="pt"
)
return text_inputs.input_ids
@torch.no_grad()
def encode_prompt(text_encoder, input_ids):
text_input_ids = input_ids.to(text_encoder.device)
attention_mask = None
prompt_embeds = text_encoder(text_input_ids, attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
return prompt_embeds
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
use_fast=False,
)
# import correct text encoder class
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = text_encoder_cls.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
)
vae = AutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant
)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
)
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
unet.requires_grad_(False)
# For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to weight_dtype
unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
# Set up LoRA.
unet_lora_config = LoraConfig(
r=args.rank,
lora_alpha=args.rank,
init_lora_weights="gaussian",
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
)
# Add adapter and make sure the trainable params are in float32.
unet.add_adapter(unet_lora_config)
if args.mixed_precision == "fp16":
for param in unet.parameters():
# only upcast trainable parameters (LoRA) into fp32
if param.requires_grad:
param.data = param.to(torch.float32)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
# there are only two options here. Either are just the unet attn processor layers
# or there are the unet and text encoder atten layers
unet_lora_layers_to_save = None
for model in models:
if isinstance(model, type(accelerator.unwrap_model(unet))):
unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
else:
raise ValueError(f"unexpected save model: {model.__class__}")
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
LoraLoaderMixin.save_lora_weights(
output_dir,
unet_lora_layers=unet_lora_layers_to_save,
text_encoder_lora_layers=None,
)
def load_model_hook(models, input_dir):
unet_ = None
while len(models) > 0:
model = models.pop()
if isinstance(model, type(accelerator.unwrap_model(unet))):
unet_ = model
else:
raise ValueError(f"unexpected save model: {model.__class__}")
lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Optimizer creation
params_to_optimize = list(filter(lambda p: p.requires_grad, unet.parameters()))
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Dataset and DataLoaders creation:
train_dataset = load_dataset(
args.dataset_name,
cache_dir=args.cache_dir,
split=args.dataset_split_name,
)
train_transforms = transforms.Compose(
[
transforms.Resize(int(args.resolution), interpolation=transforms.InterpolationMode.BILINEAR),
transforms.RandomCrop(args.resolution) if args.random_crop else transforms.CenterCrop(args.resolution),
transforms.Lambda(lambda x: x) if args.no_hflip else transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def preprocess_train(examples):
all_pixel_values = []
for col_name in ["jpg_0", "jpg_1"]:
images = [Image.open(io.BytesIO(im_bytes)).convert("RGB") for im_bytes in examples[col_name]]
pixel_values = [train_transforms(image) for image in images]
all_pixel_values.append(pixel_values)
# Double on channel dim, jpg_y then jpg_w
im_tup_iterator = zip(*all_pixel_values)
combined_pixel_values = []
for im_tup, label_0 in zip(im_tup_iterator, examples["label_0"]):
if label_0 == 0:
im_tup = im_tup[::-1]
combined_im = torch.cat(im_tup, dim=0) # no batch dim
combined_pixel_values.append(combined_im)
examples["pixel_values"] = combined_pixel_values
examples["input_ids"] = tokenize_captions(tokenizer, examples)
return examples
with accelerator.main_process_first():
if args.max_train_samples is not None:
train_dataset = train_dataset.shuffle(seed=args.seed).select(range(args.max_train_samples))
# Set the training transforms
train_dataset = train_dataset.with_transform(preprocess_train)
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
final_dict = {"pixel_values": pixel_values}
final_dict["input_ids"] = torch.stack([example["input_ids"] for example in examples])
return final_dict
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=collate_fn,
num_workers=args.dataloader_num_workers,
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
num_cycles=args.lr_num_cycles,
power=args.lr_power,
)
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, optimizer, train_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers(args.tracker_name, config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the mos recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
unet.train()
for epoch in range(first_epoch, args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet):
# (batch_size, 2*channels, h, w) -> (2*batch_size, channels, h, w)
pixel_values = batch["pixel_values"].to(dtype=weight_dtype)
feed_pixel_values = torch.cat(pixel_values.chunk(2, dim=1))
latents = []
for i in range(0, feed_pixel_values.shape[0], args.vae_encode_batch_size):
latents.append(
vae.encode(feed_pixel_values[i : i + args.vae_encode_batch_size]).latent_dist.sample()
)
latents = torch.cat(latents, dim=0)
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents).chunk(2)[0].repeat(2, 1, 1, 1)
# Sample a random timestep for each image
bsz = latents.shape[0] // 2
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device, dtype=torch.long
).repeat(2)
# Add noise to the model input according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_model_input = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = encode_prompt(text_encoder, batch["input_ids"]).repeat(2, 1, 1)
# Predict the noise residual
model_pred = unet(
noisy_model_input,
timesteps,
encoder_hidden_states,
).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
# Compute losses.
model_losses = F.mse_loss(model_pred.float(), target.float(), reduction="none")
model_losses = model_losses.mean(dim=list(range(1, len(model_losses.shape))))
model_losses_w, model_losses_l = model_losses.chunk(2)
# For logging
raw_model_loss = 0.5 * (model_losses_w.mean() + model_losses_l.mean())
model_diff = model_losses_w - model_losses_l # These are both LBS (as is t)
# Reference model predictions.
accelerator.unwrap_model(unet).disable_adapters()
with torch.no_grad():
ref_preds = unet(
noisy_model_input,
timesteps,
encoder_hidden_states,
).sample.detach()
ref_loss = F.mse_loss(ref_preds.float(), target.float(), reduction="none")
ref_loss = ref_loss.mean(dim=list(range(1, len(ref_loss.shape))))
ref_losses_w, ref_losses_l = ref_loss.chunk(2)
ref_diff = ref_losses_w - ref_losses_l
raw_ref_loss = ref_loss.mean()
# Re-enable adapters.
accelerator.unwrap_model(unet).enable_adapters()
# Final loss.
logits = ref_diff - model_diff
if args.loss_type == "sigmoid":
loss = -1 * F.logsigmoid(args.beta_dpo * logits).mean()
elif args.loss_type == "hinge":
loss = torch.relu(1 - args.beta_dpo * logits).mean()
elif args.loss_type == "ipo":
losses = (logits - 1 / (2 * args.beta)) ** 2
loss = losses.mean()
else:
raise ValueError(f"Unknown loss type {args.loss_type}")
implicit_acc = (logits > 0).sum().float() / logits.size(0)
implicit_acc += 0.5 * (logits == 0).sum().float() / logits.size(0)
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(params_to_optimize, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if args.run_validation and global_step % args.validation_steps == 0:
log_validation(
args, unet=unet, accelerator=accelerator, weight_dtype=weight_dtype, epoch=epoch
)
logs = {
"loss": loss.detach().item(),
"raw_model_loss": raw_model_loss.detach().item(),
"ref_loss": raw_ref_loss.detach().item(),
"implicit_acc": implicit_acc.detach().item(),
"lr": lr_scheduler.get_last_lr()[0],
}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
# Save the lora layers
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = accelerator.unwrap_model(unet)
unet = unet.to(torch.float32)
unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
LoraLoaderMixin.save_lora_weights(
save_directory=args.output_dir, unet_lora_layers=unet_lora_state_dict, text_encoder_lora_layers=None
)
# Final validation?
if args.run_validation:
log_validation(
args,
unet=None,
accelerator=accelerator,
weight_dtype=weight_dtype,
epoch=epoch,
is_final_validation=True,
)
if args.push_to_hub:
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
| diffusers/examples/research_projects/diffusion_dpo/train_diffusion_dpo.py/0 | {
"file_path": "diffusers/examples/research_projects/diffusion_dpo/train_diffusion_dpo.py",
"repo_id": "diffusers",
"token_count": 17320
} | 102 |
import argparse
import itertools
import math
import os
import random
from pathlib import Path
from typing import Iterable
import numpy as np
import PIL
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from accelerate import Accelerator
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
from neural_compressor.utils import logger
from packaging import version
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.utils import make_image_grid
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
PIL_INTERPOLATION = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
# ------------------------------------------------------------------------------
def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path):
logger.info("Saving embeddings")
learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id]
learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
torch.save(learned_embeds_dict, save_path)
def parse_args():
parser = argparse.ArgumentParser(description="Example of distillation for quantization on Textual Inversion.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save learned_embeds.bin every X updates steps.",
)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
)
parser.add_argument(
"--placeholder_token",
type=str,
default=None,
required=True,
help="A token to use as a placeholder for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
)
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
parser.add_argument(
"--output_dir",
type=str,
default="text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=5000,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU."
),
)
parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--do_quantization", action="store_true", help="Whether or not to do quantization.")
parser.add_argument("--do_distillation", action="store_true", help="Whether or not to do distillation.")
parser.add_argument(
"--verify_loading", action="store_true", help="Whether or not to verify the loading of the quantized model."
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.train_data_dir is None:
raise ValueError("You must specify a train data directory.")
return args
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
# Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14
class EMAModel:
"""
Exponential Moving Average of models weights
"""
def __init__(self, parameters: Iterable[torch.nn.Parameter], decay=0.9999):
parameters = list(parameters)
self.shadow_params = [p.clone().detach() for p in parameters]
self.decay = decay
self.optimization_step = 0
def get_decay(self, optimization_step):
"""
Compute the decay factor for the exponential moving average.
"""
value = (1 + optimization_step) / (10 + optimization_step)
return 1 - min(self.decay, value)
@torch.no_grad()
def step(self, parameters):
parameters = list(parameters)
self.optimization_step += 1
self.decay = self.get_decay(self.optimization_step)
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
tmp = self.decay * (s_param - param)
s_param.sub_(tmp)
else:
s_param.copy_(param)
torch.cuda.empty_cache()
def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None:
"""
Copy current averaged parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = list(parameters)
for s_param, param in zip(self.shadow_params, parameters):
param.data.copy_(s_param.data)
def to(self, device=None, dtype=None) -> None:
r"""Move internal buffers of the ExponentialMovingAverage to `device`.
Args:
device: like `device` argument to `torch.Tensor.to`
"""
# .to() on the tensors handles None correctly
self.shadow_params = [
p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device)
for p in self.shadow_params
]
class TextualInversionDataset(Dataset):
def __init__(
self,
data_root,
tokenizer,
learnable_property="object", # [object, style]
size=512,
repeats=100,
interpolation="bicubic",
flip_p=0.5,
set="train",
placeholder_token="*",
center_crop=False,
):
self.data_root = data_root
self.tokenizer = tokenizer
self.learnable_property = learnable_property
self.size = size
self.placeholder_token = placeholder_token
self.center_crop = center_crop
self.flip_p = flip_p
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
self.num_images = len(self.image_paths)
self._length = self.num_images
if set == "train":
self._length = self.num_images * repeats
self.interpolation = {
"linear": PIL_INTERPOLATION["linear"],
"bilinear": PIL_INTERPOLATION["bilinear"],
"bicubic": PIL_INTERPOLATION["bicubic"],
"lanczos": PIL_INTERPOLATION["lanczos"],
}[interpolation]
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
def __len__(self):
return self._length
def __getitem__(self, i):
example = {}
image = Image.open(self.image_paths[i % self.num_images])
if not image.mode == "RGB":
image = image.convert("RGB")
placeholder_string = self.placeholder_token
text = random.choice(self.templates).format(placeholder_string)
example["input_ids"] = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
(
h,
w,
) = (
img.shape[0],
img.shape[1],
)
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
image = Image.fromarray(img)
image = image.resize((self.size, self.size), resample=self.interpolation)
image = self.flip_transform(image)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
return example
def freeze_params(params):
for param in params:
param.requires_grad = False
def generate_images(pipeline, prompt="", guidance_scale=7.5, num_inference_steps=50, num_images_per_prompt=1, seed=42):
generator = torch.Generator(pipeline.device).manual_seed(seed)
images = pipeline(
prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
num_images_per_prompt=num_images_per_prompt,
).images
_rows = int(math.sqrt(num_images_per_prompt))
grid = make_image_grid(images, rows=_rows, cols=num_images_per_prompt // _rows)
return grid
def main():
args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with="tensorboard",
project_config=accelerator_project_config,
)
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load the tokenizer and add the placeholder token as a additional special token
if args.tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
elif args.pretrained_model_name_or_path:
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
# Load models and create wrapper for stable diffusion
noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="text_encoder",
revision=args.revision,
)
vae = AutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="vae",
revision=args.revision,
)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="unet",
revision=args.revision,
)
train_unet = False
# Freeze vae and unet
freeze_params(vae.parameters())
if not args.do_quantization and not args.do_distillation:
# Add the placeholder token in tokenizer
num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id = token_ids[0]
placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]
freeze_params(unet.parameters())
# Freeze all parameters except for the token embeddings in text encoder
params_to_freeze = itertools.chain(
text_encoder.text_model.encoder.parameters(),
text_encoder.text_model.final_layer_norm.parameters(),
text_encoder.text_model.embeddings.position_embedding.parameters(),
)
freeze_params(params_to_freeze)
else:
train_unet = True
freeze_params(text_encoder.parameters())
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Initialize the optimizer
optimizer = torch.optim.AdamW(
# only optimize the unet or embeddings of text_encoder
unet.parameters() if train_unet else text_encoder.get_input_embeddings().parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
train_dataset = TextualInversionDataset(
data_root=args.train_data_dir,
tokenizer=tokenizer,
size=args.resolution,
placeholder_token=args.placeholder_token,
repeats=args.repeats,
learnable_property=args.learnable_property,
center_crop=args.center_crop,
set="train",
)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
)
if not train_unet:
text_encoder = accelerator.prepare(text_encoder)
unet.to(accelerator.device)
unet.eval()
else:
unet = accelerator.prepare(unet)
text_encoder.to(accelerator.device)
text_encoder.eval()
optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler)
# Move vae to device
vae.to(accelerator.device)
# Keep vae in eval model as we don't train these
vae.eval()
compression_manager = None
def train_func(model):
if train_unet:
unet_ = model
text_encoder_ = text_encoder
else:
unet_ = unet
text_encoder_ = model
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion", config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
global_step = 0
if train_unet and args.use_ema:
ema_unet = EMAModel(unet_.parameters())
for epoch in range(args.num_train_epochs):
model.train()
train_loss = 0.0
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(model):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn(latents.shape).to(latents.device)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device
).long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder_(batch["input_ids"])[0]
# Predict the noise residual
model_pred = unet_(noisy_latents, timesteps, encoder_hidden_states).sample
loss = F.mse_loss(model_pred, noise, reduction="none").mean([1, 2, 3]).mean()
if train_unet and compression_manager:
unet_inputs = {
"sample": noisy_latents,
"timestep": timesteps,
"encoder_hidden_states": encoder_hidden_states,
}
loss = compression_manager.callbacks.on_after_compute_loss(unet_inputs, model_pred, loss)
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
train_loss += avg_loss.item() / args.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if train_unet:
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(unet_.parameters(), args.max_grad_norm)
else:
# Zero out the gradients for all token embeddings except the newly added
# embeddings for the concept, as we only want to optimize the concept embeddings
if accelerator.num_processes > 1:
grads = text_encoder_.module.get_input_embeddings().weight.grad
else:
grads = text_encoder_.get_input_embeddings().weight.grad
# Get the index for tokens that we want to zero the grads for
index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id
grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
if train_unet and args.use_ema:
ema_unet.step(unet_.parameters())
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if not train_unet and global_step % args.save_steps == 0:
save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
save_progress(text_encoder_, placeholder_token_id, accelerator, args, save_path)
logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
accelerator.wait_for_everyone()
if train_unet and args.use_ema:
ema_unet.copy_to(unet_.parameters())
if not train_unet:
return text_encoder_
if not train_unet:
text_encoder = train_func(text_encoder)
else:
import copy
model = copy.deepcopy(unet)
confs = []
if args.do_quantization:
from neural_compressor import QuantizationAwareTrainingConfig
q_conf = QuantizationAwareTrainingConfig()
confs.append(q_conf)
if args.do_distillation:
teacher_model = copy.deepcopy(model)
def attention_fetcher(x):
return x.sample
layer_mappings = [
[
[
"conv_in",
]
],
[
[
"time_embedding",
]
],
[["down_blocks.0.attentions.0", attention_fetcher]],
[["down_blocks.0.attentions.1", attention_fetcher]],
[
[
"down_blocks.0.resnets.0",
]
],
[
[
"down_blocks.0.resnets.1",
]
],
[
[
"down_blocks.0.downsamplers.0",
]
],
[["down_blocks.1.attentions.0", attention_fetcher]],
[["down_blocks.1.attentions.1", attention_fetcher]],
[
[
"down_blocks.1.resnets.0",
]
],
[
[
"down_blocks.1.resnets.1",
]
],
[
[
"down_blocks.1.downsamplers.0",
]
],
[["down_blocks.2.attentions.0", attention_fetcher]],
[["down_blocks.2.attentions.1", attention_fetcher]],
[
[
"down_blocks.2.resnets.0",
]
],
[
[
"down_blocks.2.resnets.1",
]
],
[
[
"down_blocks.2.downsamplers.0",
]
],
[
[
"down_blocks.3.resnets.0",
]
],
[
[
"down_blocks.3.resnets.1",
]
],
[
[
"up_blocks.0.resnets.0",
]
],
[
[
"up_blocks.0.resnets.1",
]
],
[
[
"up_blocks.0.resnets.2",
]
],
[
[
"up_blocks.0.upsamplers.0",
]
],
[["up_blocks.1.attentions.0", attention_fetcher]],
[["up_blocks.1.attentions.1", attention_fetcher]],
[["up_blocks.1.attentions.2", attention_fetcher]],
[
[
"up_blocks.1.resnets.0",
]
],
[
[
"up_blocks.1.resnets.1",
]
],
[
[
"up_blocks.1.resnets.2",
]
],
[
[
"up_blocks.1.upsamplers.0",
]
],
[["up_blocks.2.attentions.0", attention_fetcher]],
[["up_blocks.2.attentions.1", attention_fetcher]],
[["up_blocks.2.attentions.2", attention_fetcher]],
[
[
"up_blocks.2.resnets.0",
]
],
[
[
"up_blocks.2.resnets.1",
]
],
[
[
"up_blocks.2.resnets.2",
]
],
[
[
"up_blocks.2.upsamplers.0",
]
],
[["up_blocks.3.attentions.0", attention_fetcher]],
[["up_blocks.3.attentions.1", attention_fetcher]],
[["up_blocks.3.attentions.2", attention_fetcher]],
[
[
"up_blocks.3.resnets.0",
]
],
[
[
"up_blocks.3.resnets.1",
]
],
[
[
"up_blocks.3.resnets.2",
]
],
[["mid_block.attentions.0", attention_fetcher]],
[
[
"mid_block.resnets.0",
]
],
[
[
"mid_block.resnets.1",
]
],
[
[
"conv_out",
]
],
]
layer_names = [layer_mapping[0][0] for layer_mapping in layer_mappings]
if not set(layer_names).issubset([n[0] for n in model.named_modules()]):
raise ValueError(
"Provided model is not compatible with the default layer_mappings, "
'please use the model fine-tuned from "CompVis/stable-diffusion-v1-4", '
"or modify the layer_mappings variable to fit your model."
f"\nDefault layer_mappings are as such:\n{layer_mappings}"
)
from neural_compressor.config import DistillationConfig, IntermediateLayersKnowledgeDistillationLossConfig
distillation_criterion = IntermediateLayersKnowledgeDistillationLossConfig(
layer_mappings=layer_mappings,
loss_types=["MSE"] * len(layer_mappings),
loss_weights=[1.0 / len(layer_mappings)] * len(layer_mappings),
add_origin_loss=True,
)
d_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)
confs.append(d_conf)
from neural_compressor.training import prepare_compression
compression_manager = prepare_compression(model, confs)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model
train_func(model)
compression_manager.callbacks.on_train_end()
# Save the resulting model and its corresponding configuration in the given directory
model.save(args.output_dir)
logger.info(f"Optimized model saved to: {args.output_dir}.")
# change to framework model for further use
model = model.model
# Create the pipeline using using the trained modules and save it.
templates = imagenet_style_templates_small if args.learnable_property == "style" else imagenet_templates_small
prompt = templates[0].format(args.placeholder_token)
if accelerator.is_main_process:
pipeline = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
text_encoder=accelerator.unwrap_model(text_encoder),
vae=vae,
unet=accelerator.unwrap_model(unet),
tokenizer=tokenizer,
)
pipeline.save_pretrained(args.output_dir)
pipeline = pipeline.to(unet.device)
baseline_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
baseline_model_images.save(
os.path.join(args.output_dir, "{}_baseline_model.png".format("_".join(prompt.split())))
)
if not train_unet:
# Also save the newly trained embeddings
save_path = os.path.join(args.output_dir, "learned_embeds.bin")
save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path)
else:
setattr(pipeline, "unet", accelerator.unwrap_model(model))
if args.do_quantization:
pipeline = pipeline.to(torch.device("cpu"))
optimized_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
optimized_model_images.save(
os.path.join(args.output_dir, "{}_optimized_model.png".format("_".join(prompt.split())))
)
if args.push_to_hub:
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if args.do_quantization and args.verify_loading:
# Load the model obtained after Intel Neural Compressor quantization
from neural_compressor.utils.pytorch import load
loaded_model = load(args.output_dir, model=unet)
loaded_model.eval()
setattr(pipeline, "unet", loaded_model)
if args.do_quantization:
pipeline = pipeline.to(torch.device("cpu"))
loaded_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
if loaded_model_images != optimized_model_images:
logger.info("The quantized model was not successfully loaded.")
else:
logger.info("The quantized model was successfully loaded.")
if __name__ == "__main__":
main()
| diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py/0 | {
"file_path": "diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py",
"repo_id": "diffusers",
"token_count": 18301
} | 103 |
## Diffusers examples with ONNXRuntime optimizations
**This research project is not actively maintained by the diffusers team. For any questions or comments, please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.**
This aims to provide diffusers examples with ONNXRuntime optimizations for training/fine-tuning unconditional image generation, text to image, and textual inversion. Please see individual directories for more details on how to run each task using ONNXRuntime.
| diffusers/examples/research_projects/onnxruntime/README.md/0 | {
"file_path": "diffusers/examples/research_projects/onnxruntime/README.md",
"repo_id": "diffusers",
"token_count": 134
} | 104 |
import argparse
import copy
import itertools
import logging
import math
import os
import random
import shutil
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import torchvision.transforms.v2 as transforms_v2
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from huggingface_hub import create_repo, upload_folder
from packaging import version
from peft import LoraConfig, PeftModel, get_peft_model
from PIL import Image
from PIL.ImageOps import exif_transpose
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import AutoTokenizer, CLIPTextModel
import diffusers
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DPMSolverMultistepScheduler,
StableDiffusionInpaintPipeline,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
if is_wandb_available():
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.20.1")
logger = get_logger(__name__)
def make_mask(images, resolution, times=30):
mask, times = torch.ones_like(images[0:1, :, :]), np.random.randint(1, times)
min_size, max_size, margin = np.array([0.03, 0.25, 0.01]) * resolution
max_size = min(max_size, resolution - margin * 2)
for _ in range(times):
width = np.random.randint(int(min_size), int(max_size))
height = np.random.randint(int(min_size), int(max_size))
x_start = np.random.randint(int(margin), resolution - int(margin) - width + 1)
y_start = np.random.randint(int(margin), resolution - int(margin) - height + 1)
mask[:, y_start : y_start + height, x_start : x_start + width] = 0
mask = 1 - mask if random.random() < 0.5 else mask
return mask
def save_model_card(
repo_id: str,
images=None,
base_model=str,
repo_folder=None,
):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
prompt: "a photo of sks"
tags:
- stable-diffusion-inpainting
- stable-diffusion-inpainting-diffusers
- text-to-image
- diffusers
- realfill
inference: true
---
"""
model_card = f"""
# RealFill - {repo_id}
This is a realfill model derived from {base_model}. The weights were trained using [RealFill](https://realfill.github.io/).
You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card)
def log_validation(
text_encoder,
tokenizer,
unet,
args,
accelerator,
weight_dtype,
epoch,
):
logger.info(f"Running validation... \nGenerating {args.num_validation_images} images")
# create pipeline (note: unet and vae are loaded again in float32)
pipeline = StableDiffusionInpaintPipeline.from_pretrained(
args.pretrained_model_name_or_path,
tokenizer=tokenizer,
revision=args.revision,
torch_dtype=weight_dtype,
)
# set `keep_fp32_wrapper` to True because we do not want to remove
# mixed precision hooks while we are still training
pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True)
pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
target_dir = Path(args.train_data_dir) / "target"
target_image, target_mask = target_dir / "target.png", target_dir / "mask.png"
image, mask_image = Image.open(target_image), Image.open(target_mask)
if image.mode != "RGB":
image = image.convert("RGB")
images = []
for _ in range(args.num_validation_images):
image = pipeline(
prompt="a photo of sks",
image=image,
mask_image=mask_image,
num_inference_steps=25,
guidance_scale=5,
generator=generator,
).images[0]
images.append(image)
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log({"validation": [wandb.Image(image, caption=str(i)) for i, image in enumerate(images)]})
del pipeline
torch.cuda.empty_cache()
return images
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
required=True,
help="A folder containing the training data of images.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_conditioning`.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=100,
help=(
"Run realfill validation every X steps. RealFill validation consists of running the conditioning"
" `args.validation_conditioning` multiple times: `args.num_validation_images`."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="realfill-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--unet_learning_rate",
type=float,
default=2e-4,
help="Learning rate to use for unet.",
)
parser.add_argument(
"--text_encoder_learning_rate",
type=float,
default=4e-5,
help="Learning rate to use for text encoder.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--wandb_key",
type=str,
default=None,
help=("If report to option is set to wandb, api-key for wandb used for login to wandb "),
)
parser.add_argument(
"--wandb_project_name",
type=str,
default=None,
help=("If report to option is set to wandb, project name in wandb for log tracking "),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument(
"--set_grads_to_none",
action="store_true",
help=(
"Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
" behaviors, so disable this argument if it causes any problems. More info:"
" https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
),
)
parser.add_argument(
"--lora_rank",
type=int,
default=16,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--lora_alpha",
type=int,
default=27,
help=("The alpha constant of the LoRA update matrices."),
)
parser.add_argument(
"--lora_dropout",
type=float,
default=0.0,
help="The dropout rate of the LoRA update matrices.",
)
parser.add_argument(
"--lora_bias",
type=str,
default="none",
help="The bias type of the Lora update matrices. Must be 'none', 'all' or 'lora_only'.",
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
return args
class RealFillDataset(Dataset):
"""
A dataset to prepare the training and conditioning images and
the masks with the dummy prompt for fine-tuning the model.
It pre-processes the images, masks and tokenizes the prompts.
"""
def __init__(
self,
train_data_root,
tokenizer,
size=512,
):
self.size = size
self.tokenizer = tokenizer
self.ref_data_root = Path(train_data_root) / "ref"
self.target_image = Path(train_data_root) / "target" / "target.png"
self.target_mask = Path(train_data_root) / "target" / "mask.png"
if not (self.ref_data_root.exists() and self.target_image.exists() and self.target_mask.exists()):
raise ValueError("Train images root doesn't exists.")
self.train_images_path = list(self.ref_data_root.iterdir()) + [self.target_image]
self.num_train_images = len(self.train_images_path)
self.train_prompt = "a photo of sks"
self.transform = transforms_v2.Compose(
[
transforms_v2.ToImage(),
transforms_v2.RandomResize(size, int(1.125 * size)),
transforms_v2.RandomCrop(size),
transforms_v2.ToDtype(torch.float32, scale=True),
transforms_v2.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self.num_train_images
def __getitem__(self, index):
example = {}
image = Image.open(self.train_images_path[index])
image = exif_transpose(image)
if not image.mode == "RGB":
image = image.convert("RGB")
if index < len(self) - 1:
weighting = Image.new("L", image.size)
else:
weighting = Image.open(self.target_mask)
weighting = exif_transpose(weighting)
image, weighting = self.transform(image, weighting)
example["images"], example["weightings"] = image, weighting < 0
if random.random() < 0.1:
example["masks"] = torch.ones_like(example["images"][0:1, :, :])
else:
example["masks"] = make_mask(example["images"], self.size)
example["conditioning_images"] = example["images"] * (example["masks"] < 0.5)
train_prompt = "" if random.random() < 0.1 else self.train_prompt
example["prompt_ids"] = self.tokenizer(
train_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
return example
def collate_fn(examples):
input_ids = [example["prompt_ids"] for example in examples]
images = [example["images"] for example in examples]
masks = [example["masks"] for example in examples]
weightings = [example["weightings"] for example in examples]
conditioning_images = [example["conditioning_images"] for example in examples]
images = torch.stack(images)
images = images.to(memory_format=torch.contiguous_format).float()
masks = torch.stack(masks)
masks = masks.to(memory_format=torch.contiguous_format).float()
weightings = torch.stack(weightings)
weightings = weightings.to(memory_format=torch.contiguous_format).float()
conditioning_images = torch.stack(conditioning_images)
conditioning_images = conditioning_images.to(memory_format=torch.contiguous_format).float()
input_ids = torch.cat(input_ids, dim=0)
batch = {
"input_ids": input_ids,
"images": images,
"masks": masks,
"weightings": weightings,
"conditioning_images": conditioning_images,
}
return batch
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_dir=logging_dir,
)
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
wandb.login(key=args.wandb_key)
wandb.init(project=args.wandb_project_name)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load the tokenizer
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
use_fast=False,
)
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
)
config = LoraConfig(
r=args.lora_rank,
lora_alpha=args.lora_alpha,
target_modules=["to_k", "to_q", "to_v", "key", "query", "value"],
lora_dropout=args.lora_dropout,
bias=args.lora_bias,
)
unet = get_peft_model(unet, config)
config = LoraConfig(
r=args.lora_rank,
lora_alpha=args.lora_alpha,
target_modules=["k_proj", "q_proj", "v_proj"],
lora_dropout=args.lora_dropout,
bias=args.lora_bias,
)
text_encoder = get_peft_model(text_encoder, config)
vae.requires_grad_(False)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
text_encoder.gradient_checkpointing_enable()
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
for model in models:
sub_dir = (
"unet"
if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model))
else "text_encoder"
)
model.save_pretrained(os.path.join(output_dir, sub_dir))
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
def load_model_hook(models, input_dir):
while len(models) > 0:
# pop models so that they are not loaded again
model = models.pop()
sub_dir = (
"unet"
if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model))
else "text_encoder"
)
model_cls = (
UNet2DConditionModel
if isinstance(model.base_model.model, type(accelerator.unwrap_model(unet).base_model.model))
else CLIPTextModel
)
load_model = model_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder=sub_dir)
load_model = PeftModel.from_pretrained(load_model, input_dir, subfolder=sub_dir)
model.load_state_dict(load_model.state_dict())
del load_model
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.unet_learning_rate = (
args.unet_learning_rate
* args.gradient_accumulation_steps
* args.train_batch_size
* accelerator.num_processes
)
args.text_encoder_learning_rate = (
args.text_encoder_learning_rate
* args.gradient_accumulation_steps
* args.train_batch_size
* accelerator.num_processes
)
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Optimizer creation
optimizer = optimizer_class(
[
{"params": unet.parameters(), "lr": args.unet_learning_rate},
{"params": text_encoder.parameters(), "lr": args.text_encoder_learning_rate},
],
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Dataset and DataLoaders creation:
train_dataset = RealFillDataset(
train_data_root=args.train_data_dir,
tokenizer=tokenizer,
size=args.resolution,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=collate_fn,
num_workers=1,
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
num_cycles=args.lr_num_cycles,
power=args.lr_power,
)
# Prepare everything with our `accelerator`.
unet, text_encoder, optimizer, train_dataloader = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader
)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move vae to device and cast to weight_dtype
vae.to(accelerator.device, dtype=weight_dtype)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
tracker_config = vars(copy.deepcopy(args))
accelerator.init_trackers("realfill", config=tracker_config)
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the mos recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
for epoch in range(first_epoch, args.num_train_epochs):
unet.train()
text_encoder.train()
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet, text_encoder):
# Convert images to latent space
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
# Convert masked images to latent space
conditionings = vae.encode(batch["conditioning_images"].to(dtype=weight_dtype)).latent_dist.sample()
conditionings = conditionings * 0.18215
# Downsample mask and weighting so that they match with the latents
masks, size = batch["masks"].to(dtype=weight_dtype), latents.shape[2:]
masks = F.interpolate(masks, size=size)
weightings = batch["weightings"].to(dtype=weight_dtype)
weightings = F.interpolate(weightings, size=size)
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Concatenate noisy latents, masks and conditionings to get inputs to unet
inputs = torch.cat([noisy_latents, masks, conditionings], dim=1)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
# Predict the noise residual
model_pred = unet(inputs, timesteps, encoder_hidden_states).sample
# Compute the diffusion loss
assert noise_scheduler.config.prediction_type == "epsilon"
loss = (weightings * F.mse_loss(model_pred.float(), noise.float(), reduction="none")).mean()
# Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = itertools.chain(unet.parameters(), text_encoder.parameters())
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad(set_to_none=args.set_grads_to_none)
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
if args.report_to == "wandb":
accelerator.print(progress_bar)
global_step += 1
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
if global_step % args.validation_steps == 0:
log_validation(
text_encoder,
tokenizer,
unet,
args,
accelerator,
weight_dtype,
global_step,
)
logs = {"loss": loss.detach().item()}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
# Save the lora layers
accelerator.wait_for_everyone()
if accelerator.is_main_process:
pipeline = StableDiffusionInpaintPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet, keep_fp32_wrapper=True).merge_and_unload(),
text_encoder=accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True).merge_and_unload(),
revision=args.revision,
)
pipeline.save_pretrained(args.output_dir)
# Final inference
images = log_validation(
text_encoder,
tokenizer,
unet,
args,
accelerator,
weight_dtype,
global_step,
)
if args.push_to_hub:
save_model_card(
repo_id,
images=images,
base_model=args.pretrained_model_name_or_path,
repo_folder=args.output_dir,
)
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
| diffusers/examples/research_projects/realfill/train_realfill.py/0 | {
"file_path": "diffusers/examples/research_projects/realfill/train_realfill.py",
"repo_id": "diffusers",
"token_count": 16272
} | 105 |
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
import safetensors
from diffusers import DiffusionPipeline # noqa: E402
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class TextToImageLoRA(ExamplesTestsAccelerate):
def test_text_to_image_lora_sdxl_checkpointing_checkpoints_total_limit(self):
prompt = "a prompt"
pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe"
with tempfile.TemporaryDirectory() as tmpdir:
# Run training script with checkpointing
# max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2
# Should create checkpoints at steps 2, 4, 6
# with checkpoint at step 2 deleted
initial_run_args = f"""
examples/text_to_image/train_text_to_image_lora_sdxl.py
--pretrained_model_name_or_path {pipeline_path}
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 6
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
--checkpointing_steps=2
--checkpoints_total_limit=2
""".split()
run_command(self._launch_args + initial_run_args)
pipe = DiffusionPipeline.from_pretrained(pipeline_path)
pipe.load_lora_weights(tmpdir)
pipe(prompt, num_inference_steps=1)
# check checkpoint directories exist
# checkpoint-2 should have been deleted
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
def test_text_to_image_lora_checkpointing_checkpoints_total_limit(self):
pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
prompt = "a prompt"
with tempfile.TemporaryDirectory() as tmpdir:
# Run training script with checkpointing
# max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2
# Should create checkpoints at steps 2, 4, 6
# with checkpoint at step 2 deleted
initial_run_args = f"""
examples/text_to_image/train_text_to_image_lora.py
--pretrained_model_name_or_path {pretrained_model_name_or_path}
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--center_crop
--random_flip
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 6
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
--checkpointing_steps=2
--checkpoints_total_limit=2
--seed=0
--num_validation_images=0
""".split()
run_command(self._launch_args + initial_run_args)
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None
)
pipe.load_lora_weights(tmpdir)
pipe(prompt, num_inference_steps=1)
# check checkpoint directories exist
# checkpoint-2 should have been deleted
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
def test_text_to_image_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
prompt = "a prompt"
with tempfile.TemporaryDirectory() as tmpdir:
# Run training script with checkpointing
# max_train_steps == 4, checkpointing_steps == 2
# Should create checkpoints at steps 2, 4
initial_run_args = f"""
examples/text_to_image/train_text_to_image_lora.py
--pretrained_model_name_or_path {pretrained_model_name_or_path}
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--center_crop
--random_flip
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 4
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
--checkpointing_steps=2
--seed=0
--num_validation_images=0
""".split()
run_command(self._launch_args + initial_run_args)
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None
)
pipe.load_lora_weights(tmpdir)
pipe(prompt, num_inference_steps=1)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
# resume and we should try to checkpoint at 6, where we'll have to remove
# checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint
resume_run_args = f"""
examples/text_to_image/train_text_to_image_lora.py
--pretrained_model_name_or_path {pretrained_model_name_or_path}
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--center_crop
--random_flip
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 8
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
--checkpointing_steps=2
--resume_from_checkpoint=checkpoint-4
--checkpoints_total_limit=2
--seed=0
--num_validation_images=0
""".split()
run_command(self._launch_args + resume_run_args)
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None
)
pipe.load_lora_weights(tmpdir)
pipe(prompt, num_inference_steps=1)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-6", "checkpoint-8"},
)
class TextToImageLoRASDXL(ExamplesTestsAccelerate):
def test_text_to_image_lora_sdxl(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/text_to_image/train_text_to_image_lora_sdxl.py
--pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
def test_text_to_image_lora_sdxl_with_text_encoder(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/text_to_image/train_text_to_image_lora_sdxl.py
--pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
--train_text_encoder
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
# when not training the text encoder, all the parameters in the state dict should start
# with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names.
keys = lora_state_dict.keys()
starts_with_unet = all(
k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys
)
self.assertTrue(starts_with_unet)
def test_text_to_image_lora_sdxl_text_encoder_checkpointing_checkpoints_total_limit(self):
prompt = "a prompt"
pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe"
with tempfile.TemporaryDirectory() as tmpdir:
# Run training script with checkpointing
# max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2
# Should create checkpoints at steps 2, 4, 6
# with checkpoint at step 2 deleted
initial_run_args = f"""
examples/text_to_image/train_text_to_image_lora_sdxl.py
--pretrained_model_name_or_path {pipeline_path}
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 6
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--train_text_encoder
--lr_warmup_steps 0
--output_dir {tmpdir}
--checkpointing_steps=2
--checkpoints_total_limit=2
""".split()
run_command(self._launch_args + initial_run_args)
pipe = DiffusionPipeline.from_pretrained(pipeline_path)
pipe.load_lora_weights(tmpdir)
pipe(prompt, num_inference_steps=1)
# check checkpoint directories exist
# checkpoint-2 should have been deleted
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
| diffusers/examples/text_to_image/test_text_to_image_lora.py/0 | {
"file_path": "diffusers/examples/text_to_image/test_text_to_image_lora.py",
"repo_id": "diffusers",
"token_count": 6164
} | 106 |
import argparse
import time
from pathlib import Path
from typing import Any, Dict, Literal
import torch
from diffusers import AsymmetricAutoencoderKL
ASYMMETRIC_AUTOENCODER_KL_x_1_5_CONFIG = {
"in_channels": 3,
"out_channels": 3,
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],
"down_block_out_channels": [128, 256, 512, 512],
"layers_per_down_block": 2,
"up_block_types": [
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
],
"up_block_out_channels": [192, 384, 768, 768],
"layers_per_up_block": 3,
"act_fn": "silu",
"latent_channels": 4,
"norm_num_groups": 32,
"sample_size": 256,
"scaling_factor": 0.18215,
}
ASYMMETRIC_AUTOENCODER_KL_x_2_CONFIG = {
"in_channels": 3,
"out_channels": 3,
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],
"down_block_out_channels": [128, 256, 512, 512],
"layers_per_down_block": 2,
"up_block_types": [
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
],
"up_block_out_channels": [256, 512, 1024, 1024],
"layers_per_up_block": 5,
"act_fn": "silu",
"latent_channels": 4,
"norm_num_groups": 32,
"sample_size": 256,
"scaling_factor": 0.18215,
}
def convert_asymmetric_autoencoder_kl_state_dict(original_state_dict: Dict[str, Any]) -> Dict[str, Any]:
converted_state_dict = {}
for k, v in original_state_dict.items():
if k.startswith("encoder."):
converted_state_dict[
k.replace("encoder.down.", "encoder.down_blocks.")
.replace("encoder.mid.", "encoder.mid_block.")
.replace("encoder.norm_out.", "encoder.conv_norm_out.")
.replace(".downsample.", ".downsamplers.0.")
.replace(".nin_shortcut.", ".conv_shortcut.")
.replace(".block.", ".resnets.")
.replace(".block_1.", ".resnets.0.")
.replace(".block_2.", ".resnets.1.")
.replace(".attn_1.k.", ".attentions.0.to_k.")
.replace(".attn_1.q.", ".attentions.0.to_q.")
.replace(".attn_1.v.", ".attentions.0.to_v.")
.replace(".attn_1.proj_out.", ".attentions.0.to_out.0.")
.replace(".attn_1.norm.", ".attentions.0.group_norm.")
] = v
elif k.startswith("decoder.") and "up_layers" not in k:
converted_state_dict[
k.replace("decoder.encoder.", "decoder.condition_encoder.")
.replace(".norm_out.", ".conv_norm_out.")
.replace(".up.0.", ".up_blocks.3.")
.replace(".up.1.", ".up_blocks.2.")
.replace(".up.2.", ".up_blocks.1.")
.replace(".up.3.", ".up_blocks.0.")
.replace(".block.", ".resnets.")
.replace("mid", "mid_block")
.replace(".0.upsample.", ".0.upsamplers.0.")
.replace(".1.upsample.", ".1.upsamplers.0.")
.replace(".2.upsample.", ".2.upsamplers.0.")
.replace(".nin_shortcut.", ".conv_shortcut.")
.replace(".block_1.", ".resnets.0.")
.replace(".block_2.", ".resnets.1.")
.replace(".attn_1.k.", ".attentions.0.to_k.")
.replace(".attn_1.q.", ".attentions.0.to_q.")
.replace(".attn_1.v.", ".attentions.0.to_v.")
.replace(".attn_1.proj_out.", ".attentions.0.to_out.0.")
.replace(".attn_1.norm.", ".attentions.0.group_norm.")
] = v
elif k.startswith("quant_conv."):
converted_state_dict[k] = v
elif k.startswith("post_quant_conv."):
converted_state_dict[k] = v
else:
print(f" skipping key `{k}`")
# fix weights shape
for k, v in converted_state_dict.items():
if (
(k.startswith("encoder.mid_block.attentions.0") or k.startswith("decoder.mid_block.attentions.0"))
and k.endswith("weight")
and ("to_q" in k or "to_k" in k or "to_v" in k or "to_out" in k)
):
converted_state_dict[k] = converted_state_dict[k][:, :, 0, 0]
return converted_state_dict
def get_asymmetric_autoencoder_kl_from_original_checkpoint(
scale: Literal["1.5", "2"], original_checkpoint_path: str, map_location: torch.device
) -> AsymmetricAutoencoderKL:
print("Loading original state_dict")
original_state_dict = torch.load(original_checkpoint_path, map_location=map_location)
original_state_dict = original_state_dict["state_dict"]
print("Converting state_dict")
converted_state_dict = convert_asymmetric_autoencoder_kl_state_dict(original_state_dict)
kwargs = ASYMMETRIC_AUTOENCODER_KL_x_1_5_CONFIG if scale == "1.5" else ASYMMETRIC_AUTOENCODER_KL_x_2_CONFIG
print("Initializing AsymmetricAutoencoderKL model")
asymmetric_autoencoder_kl = AsymmetricAutoencoderKL(**kwargs)
print("Loading weight from converted state_dict")
asymmetric_autoencoder_kl.load_state_dict(converted_state_dict)
asymmetric_autoencoder_kl.eval()
print("AsymmetricAutoencoderKL successfully initialized")
return asymmetric_autoencoder_kl
if __name__ == "__main__":
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument(
"--scale",
default=None,
type=str,
required=True,
help="Asymmetric VQGAN scale: `1.5` or `2`",
)
parser.add_argument(
"--original_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the original Asymmetric VQGAN checkpoint",
)
parser.add_argument(
"--output_path",
default=None,
type=str,
required=True,
help="Path to save pretrained AsymmetricAutoencoderKL model",
)
parser.add_argument(
"--map_location",
default="cpu",
type=str,
required=False,
help="The device passed to `map_location` when loading the checkpoint",
)
args = parser.parse_args()
assert args.scale in ["1.5", "2"], f"{args.scale} should be `1.5` of `2`"
assert Path(args.original_checkpoint_path).is_file()
asymmetric_autoencoder_kl = get_asymmetric_autoencoder_kl_from_original_checkpoint(
scale=args.scale,
original_checkpoint_path=args.original_checkpoint_path,
map_location=torch.device(args.map_location),
)
print("Saving pretrained AsymmetricAutoencoderKL")
asymmetric_autoencoder_kl.save_pretrained(args.output_path)
print(f"Done in {time.time() - start:.2f} seconds")
| diffusers/scripts/convert_asymmetric_vqgan_to_diffusers.py/0 | {
"file_path": "diffusers/scripts/convert_asymmetric_vqgan_to_diffusers.py",
"repo_id": "diffusers",
"token_count": 3351
} | 107 |
import argparse
import os
import tempfile
import torch
from accelerate import load_checkpoint_and_dispatch
from diffusers import UNet2DConditionModel
from diffusers.models.transformers.prior_transformer import PriorTransformer
from diffusers.models.vq_model import VQModel
"""
Example - From the diffusers root directory:
Download weights:
```sh
$ wget https://huggingface.co/ai-forever/Kandinsky_2.1/blob/main/prior_fp16.ckpt
```
Convert the model:
```sh
python scripts/convert_kandinsky_to_diffusers.py \
--prior_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/prior_fp16.ckpt \
--clip_stat_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/ViT-L-14_stats.th \
--text2img_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/decoder_fp16.ckpt \
--inpaint_text2img_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/inpainting_fp16.ckpt \
--movq_checkpoint_path /home/yiyi_huggingface_co/Kandinsky-2/checkpoints_Kandinsky_2.1/movq_final.ckpt \
--dump_path /home/yiyi_huggingface_co/dump \
--debug decoder
```
"""
# prior
PRIOR_ORIGINAL_PREFIX = "model"
# Uses default arguments
PRIOR_CONFIG = {}
def prior_model_from_original_config():
model = PriorTransformer(**PRIOR_CONFIG)
return model
def prior_original_checkpoint_to_diffusers_checkpoint(model, checkpoint, clip_stats_checkpoint):
diffusers_checkpoint = {}
# <original>.time_embed.0 -> <diffusers>.time_embedding.linear_1
diffusers_checkpoint.update(
{
"time_embedding.linear_1.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.weight"],
"time_embedding.linear_1.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.0.bias"],
}
)
# <original>.clip_img_proj -> <diffusers>.proj_in
diffusers_checkpoint.update(
{
"proj_in.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.weight"],
"proj_in.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.clip_img_proj.bias"],
}
)
# <original>.text_emb_proj -> <diffusers>.embedding_proj
diffusers_checkpoint.update(
{
"embedding_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.weight"],
"embedding_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_emb_proj.bias"],
}
)
# <original>.text_enc_proj -> <diffusers>.encoder_hidden_states_proj
diffusers_checkpoint.update(
{
"encoder_hidden_states_proj.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.weight"],
"encoder_hidden_states_proj.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.text_enc_proj.bias"],
}
)
# <original>.positional_embedding -> <diffusers>.positional_embedding
diffusers_checkpoint.update({"positional_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.positional_embedding"]})
# <original>.prd_emb -> <diffusers>.prd_embedding
diffusers_checkpoint.update({"prd_embedding": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.prd_emb"]})
# <original>.time_embed.2 -> <diffusers>.time_embedding.linear_2
diffusers_checkpoint.update(
{
"time_embedding.linear_2.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.weight"],
"time_embedding.linear_2.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.time_embed.2.bias"],
}
)
# <original>.resblocks.<x> -> <diffusers>.transformer_blocks.<x>
for idx in range(len(model.transformer_blocks)):
diffusers_transformer_prefix = f"transformer_blocks.{idx}"
original_transformer_prefix = f"{PRIOR_ORIGINAL_PREFIX}.transformer.resblocks.{idx}"
# <original>.attn -> <diffusers>.attn1
diffusers_attention_prefix = f"{diffusers_transformer_prefix}.attn1"
original_attention_prefix = f"{original_transformer_prefix}.attn"
diffusers_checkpoint.update(
prior_attention_to_diffusers(
checkpoint,
diffusers_attention_prefix=diffusers_attention_prefix,
original_attention_prefix=original_attention_prefix,
attention_head_dim=model.attention_head_dim,
)
)
# <original>.mlp -> <diffusers>.ff
diffusers_ff_prefix = f"{diffusers_transformer_prefix}.ff"
original_ff_prefix = f"{original_transformer_prefix}.mlp"
diffusers_checkpoint.update(
prior_ff_to_diffusers(
checkpoint, diffusers_ff_prefix=diffusers_ff_prefix, original_ff_prefix=original_ff_prefix
)
)
# <original>.ln_1 -> <diffusers>.norm1
diffusers_checkpoint.update(
{
f"{diffusers_transformer_prefix}.norm1.weight": checkpoint[
f"{original_transformer_prefix}.ln_1.weight"
],
f"{diffusers_transformer_prefix}.norm1.bias": checkpoint[f"{original_transformer_prefix}.ln_1.bias"],
}
)
# <original>.ln_2 -> <diffusers>.norm3
diffusers_checkpoint.update(
{
f"{diffusers_transformer_prefix}.norm3.weight": checkpoint[
f"{original_transformer_prefix}.ln_2.weight"
],
f"{diffusers_transformer_prefix}.norm3.bias": checkpoint[f"{original_transformer_prefix}.ln_2.bias"],
}
)
# <original>.final_ln -> <diffusers>.norm_out
diffusers_checkpoint.update(
{
"norm_out.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.weight"],
"norm_out.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.final_ln.bias"],
}
)
# <original>.out_proj -> <diffusers>.proj_to_clip_embeddings
diffusers_checkpoint.update(
{
"proj_to_clip_embeddings.weight": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.weight"],
"proj_to_clip_embeddings.bias": checkpoint[f"{PRIOR_ORIGINAL_PREFIX}.out_proj.bias"],
}
)
# clip stats
clip_mean, clip_std = clip_stats_checkpoint
clip_mean = clip_mean[None, :]
clip_std = clip_std[None, :]
diffusers_checkpoint.update({"clip_mean": clip_mean, "clip_std": clip_std})
return diffusers_checkpoint
def prior_attention_to_diffusers(
checkpoint, *, diffusers_attention_prefix, original_attention_prefix, attention_head_dim
):
diffusers_checkpoint = {}
# <original>.c_qkv -> <diffusers>.{to_q, to_k, to_v}
[q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions(
weight=checkpoint[f"{original_attention_prefix}.c_qkv.weight"],
bias=checkpoint[f"{original_attention_prefix}.c_qkv.bias"],
split=3,
chunk_size=attention_head_dim,
)
diffusers_checkpoint.update(
{
f"{diffusers_attention_prefix}.to_q.weight": q_weight,
f"{diffusers_attention_prefix}.to_q.bias": q_bias,
f"{diffusers_attention_prefix}.to_k.weight": k_weight,
f"{diffusers_attention_prefix}.to_k.bias": k_bias,
f"{diffusers_attention_prefix}.to_v.weight": v_weight,
f"{diffusers_attention_prefix}.to_v.bias": v_bias,
}
)
# <original>.c_proj -> <diffusers>.to_out.0
diffusers_checkpoint.update(
{
f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{original_attention_prefix}.c_proj.weight"],
f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{original_attention_prefix}.c_proj.bias"],
}
)
return diffusers_checkpoint
def prior_ff_to_diffusers(checkpoint, *, diffusers_ff_prefix, original_ff_prefix):
diffusers_checkpoint = {
# <original>.c_fc -> <diffusers>.net.0.proj
f"{diffusers_ff_prefix}.net.{0}.proj.weight": checkpoint[f"{original_ff_prefix}.c_fc.weight"],
f"{diffusers_ff_prefix}.net.{0}.proj.bias": checkpoint[f"{original_ff_prefix}.c_fc.bias"],
# <original>.c_proj -> <diffusers>.net.2
f"{diffusers_ff_prefix}.net.{2}.weight": checkpoint[f"{original_ff_prefix}.c_proj.weight"],
f"{diffusers_ff_prefix}.net.{2}.bias": checkpoint[f"{original_ff_prefix}.c_proj.bias"],
}
return diffusers_checkpoint
# done prior
# unet
# We are hardcoding the model configuration for now. If we need to generalize to more model configurations, we can
# update then.
UNET_CONFIG = {
"act_fn": "silu",
"addition_embed_type": "text_image",
"addition_embed_type_num_heads": 64,
"attention_head_dim": 64,
"block_out_channels": [384, 768, 1152, 1536],
"center_input_sample": False,
"class_embed_type": None,
"class_embeddings_concat": False,
"conv_in_kernel": 3,
"conv_out_kernel": 3,
"cross_attention_dim": 768,
"cross_attention_norm": None,
"down_block_types": [
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
"SimpleCrossAttnDownBlock2D",
"SimpleCrossAttnDownBlock2D",
],
"downsample_padding": 1,
"dual_cross_attention": False,
"encoder_hid_dim": 1024,
"encoder_hid_dim_type": "text_image_proj",
"flip_sin_to_cos": True,
"freq_shift": 0,
"in_channels": 4,
"layers_per_block": 3,
"mid_block_only_cross_attention": None,
"mid_block_scale_factor": 1,
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"norm_eps": 1e-05,
"norm_num_groups": 32,
"num_class_embeds": None,
"only_cross_attention": False,
"out_channels": 8,
"projection_class_embeddings_input_dim": None,
"resnet_out_scale_factor": 1.0,
"resnet_skip_time_act": False,
"resnet_time_scale_shift": "scale_shift",
"sample_size": 64,
"time_cond_proj_dim": None,
"time_embedding_act_fn": None,
"time_embedding_dim": None,
"time_embedding_type": "positional",
"timestep_post_act": None,
"up_block_types": [
"SimpleCrossAttnUpBlock2D",
"SimpleCrossAttnUpBlock2D",
"SimpleCrossAttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"upcast_attention": False,
"use_linear_projection": False,
}
def unet_model_from_original_config():
model = UNet2DConditionModel(**UNET_CONFIG)
return model
def unet_original_checkpoint_to_diffusers_checkpoint(model, checkpoint):
diffusers_checkpoint = {}
num_head_channels = UNET_CONFIG["attention_head_dim"]
diffusers_checkpoint.update(unet_time_embeddings(checkpoint))
diffusers_checkpoint.update(unet_conv_in(checkpoint))
diffusers_checkpoint.update(unet_add_embedding(checkpoint))
diffusers_checkpoint.update(unet_encoder_hid_proj(checkpoint))
# <original>.input_blocks -> <diffusers>.down_blocks
original_down_block_idx = 1
for diffusers_down_block_idx in range(len(model.down_blocks)):
checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint(
model,
checkpoint,
diffusers_down_block_idx=diffusers_down_block_idx,
original_down_block_idx=original_down_block_idx,
num_head_channels=num_head_channels,
)
original_down_block_idx += num_original_down_blocks
diffusers_checkpoint.update(checkpoint_update)
# done <original>.input_blocks -> <diffusers>.down_blocks
diffusers_checkpoint.update(
unet_midblock_to_diffusers_checkpoint(
model,
checkpoint,
num_head_channels=num_head_channels,
)
)
# <original>.output_blocks -> <diffusers>.up_blocks
original_up_block_idx = 0
for diffusers_up_block_idx in range(len(model.up_blocks)):
checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint(
model,
checkpoint,
diffusers_up_block_idx=diffusers_up_block_idx,
original_up_block_idx=original_up_block_idx,
num_head_channels=num_head_channels,
)
original_up_block_idx += num_original_up_blocks
diffusers_checkpoint.update(checkpoint_update)
# done <original>.output_blocks -> <diffusers>.up_blocks
diffusers_checkpoint.update(unet_conv_norm_out(checkpoint))
diffusers_checkpoint.update(unet_conv_out(checkpoint))
return diffusers_checkpoint
# done unet
# inpaint unet
# We are hardcoding the model configuration for now. If we need to generalize to more model configurations, we can
# update then.
INPAINT_UNET_CONFIG = {
"act_fn": "silu",
"addition_embed_type": "text_image",
"addition_embed_type_num_heads": 64,
"attention_head_dim": 64,
"block_out_channels": [384, 768, 1152, 1536],
"center_input_sample": False,
"class_embed_type": None,
"class_embeddings_concat": None,
"conv_in_kernel": 3,
"conv_out_kernel": 3,
"cross_attention_dim": 768,
"cross_attention_norm": None,
"down_block_types": [
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
"SimpleCrossAttnDownBlock2D",
"SimpleCrossAttnDownBlock2D",
],
"downsample_padding": 1,
"dual_cross_attention": False,
"encoder_hid_dim": 1024,
"encoder_hid_dim_type": "text_image_proj",
"flip_sin_to_cos": True,
"freq_shift": 0,
"in_channels": 9,
"layers_per_block": 3,
"mid_block_only_cross_attention": None,
"mid_block_scale_factor": 1,
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"norm_eps": 1e-05,
"norm_num_groups": 32,
"num_class_embeds": None,
"only_cross_attention": False,
"out_channels": 8,
"projection_class_embeddings_input_dim": None,
"resnet_out_scale_factor": 1.0,
"resnet_skip_time_act": False,
"resnet_time_scale_shift": "scale_shift",
"sample_size": 64,
"time_cond_proj_dim": None,
"time_embedding_act_fn": None,
"time_embedding_dim": None,
"time_embedding_type": "positional",
"timestep_post_act": None,
"up_block_types": [
"SimpleCrossAttnUpBlock2D",
"SimpleCrossAttnUpBlock2D",
"SimpleCrossAttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"upcast_attention": False,
"use_linear_projection": False,
}
def inpaint_unet_model_from_original_config():
model = UNet2DConditionModel(**INPAINT_UNET_CONFIG)
return model
def inpaint_unet_original_checkpoint_to_diffusers_checkpoint(model, checkpoint):
diffusers_checkpoint = {}
num_head_channels = INPAINT_UNET_CONFIG["attention_head_dim"]
diffusers_checkpoint.update(unet_time_embeddings(checkpoint))
diffusers_checkpoint.update(unet_conv_in(checkpoint))
diffusers_checkpoint.update(unet_add_embedding(checkpoint))
diffusers_checkpoint.update(unet_encoder_hid_proj(checkpoint))
# <original>.input_blocks -> <diffusers>.down_blocks
original_down_block_idx = 1
for diffusers_down_block_idx in range(len(model.down_blocks)):
checkpoint_update, num_original_down_blocks = unet_downblock_to_diffusers_checkpoint(
model,
checkpoint,
diffusers_down_block_idx=diffusers_down_block_idx,
original_down_block_idx=original_down_block_idx,
num_head_channels=num_head_channels,
)
original_down_block_idx += num_original_down_blocks
diffusers_checkpoint.update(checkpoint_update)
# done <original>.input_blocks -> <diffusers>.down_blocks
diffusers_checkpoint.update(
unet_midblock_to_diffusers_checkpoint(
model,
checkpoint,
num_head_channels=num_head_channels,
)
)
# <original>.output_blocks -> <diffusers>.up_blocks
original_up_block_idx = 0
for diffusers_up_block_idx in range(len(model.up_blocks)):
checkpoint_update, num_original_up_blocks = unet_upblock_to_diffusers_checkpoint(
model,
checkpoint,
diffusers_up_block_idx=diffusers_up_block_idx,
original_up_block_idx=original_up_block_idx,
num_head_channels=num_head_channels,
)
original_up_block_idx += num_original_up_blocks
diffusers_checkpoint.update(checkpoint_update)
# done <original>.output_blocks -> <diffusers>.up_blocks
diffusers_checkpoint.update(unet_conv_norm_out(checkpoint))
diffusers_checkpoint.update(unet_conv_out(checkpoint))
return diffusers_checkpoint
# done inpaint unet
# unet utils
# <original>.time_embed -> <diffusers>.time_embedding
def unet_time_embeddings(checkpoint):
diffusers_checkpoint = {}
diffusers_checkpoint.update(
{
"time_embedding.linear_1.weight": checkpoint["time_embed.0.weight"],
"time_embedding.linear_1.bias": checkpoint["time_embed.0.bias"],
"time_embedding.linear_2.weight": checkpoint["time_embed.2.weight"],
"time_embedding.linear_2.bias": checkpoint["time_embed.2.bias"],
}
)
return diffusers_checkpoint
# <original>.input_blocks.0 -> <diffusers>.conv_in
def unet_conv_in(checkpoint):
diffusers_checkpoint = {}
diffusers_checkpoint.update(
{
"conv_in.weight": checkpoint["input_blocks.0.0.weight"],
"conv_in.bias": checkpoint["input_blocks.0.0.bias"],
}
)
return diffusers_checkpoint
def unet_add_embedding(checkpoint):
diffusers_checkpoint = {}
diffusers_checkpoint.update(
{
"add_embedding.text_norm.weight": checkpoint["ln_model_n.weight"],
"add_embedding.text_norm.bias": checkpoint["ln_model_n.bias"],
"add_embedding.text_proj.weight": checkpoint["proj_n.weight"],
"add_embedding.text_proj.bias": checkpoint["proj_n.bias"],
"add_embedding.image_proj.weight": checkpoint["img_layer.weight"],
"add_embedding.image_proj.bias": checkpoint["img_layer.bias"],
}
)
return diffusers_checkpoint
def unet_encoder_hid_proj(checkpoint):
diffusers_checkpoint = {}
diffusers_checkpoint.update(
{
"encoder_hid_proj.image_embeds.weight": checkpoint["clip_to_seq.weight"],
"encoder_hid_proj.image_embeds.bias": checkpoint["clip_to_seq.bias"],
"encoder_hid_proj.text_proj.weight": checkpoint["to_model_dim_n.weight"],
"encoder_hid_proj.text_proj.bias": checkpoint["to_model_dim_n.bias"],
}
)
return diffusers_checkpoint
# <original>.out.0 -> <diffusers>.conv_norm_out
def unet_conv_norm_out(checkpoint):
diffusers_checkpoint = {}
diffusers_checkpoint.update(
{
"conv_norm_out.weight": checkpoint["out.0.weight"],
"conv_norm_out.bias": checkpoint["out.0.bias"],
}
)
return diffusers_checkpoint
# <original>.out.2 -> <diffusers>.conv_out
def unet_conv_out(checkpoint):
diffusers_checkpoint = {}
diffusers_checkpoint.update(
{
"conv_out.weight": checkpoint["out.2.weight"],
"conv_out.bias": checkpoint["out.2.bias"],
}
)
return diffusers_checkpoint
# <original>.input_blocks -> <diffusers>.down_blocks
def unet_downblock_to_diffusers_checkpoint(
model, checkpoint, *, diffusers_down_block_idx, original_down_block_idx, num_head_channels
):
diffusers_checkpoint = {}
diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.resnets"
original_down_block_prefix = "input_blocks"
down_block = model.down_blocks[diffusers_down_block_idx]
num_resnets = len(down_block.resnets)
if down_block.downsamplers is None:
downsampler = False
else:
assert len(down_block.downsamplers) == 1
downsampler = True
# The downsample block is also a resnet
num_resnets += 1
for resnet_idx_inc in range(num_resnets):
full_resnet_prefix = f"{original_down_block_prefix}.{original_down_block_idx + resnet_idx_inc}.0"
if downsampler and resnet_idx_inc == num_resnets - 1:
# this is a downsample block
full_diffusers_resnet_prefix = f"down_blocks.{diffusers_down_block_idx}.downsamplers.0"
else:
# this is a regular resnet block
full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}"
diffusers_checkpoint.update(
resnet_to_diffusers_checkpoint(
checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix
)
)
if hasattr(down_block, "attentions"):
num_attentions = len(down_block.attentions)
diffusers_attention_prefix = f"down_blocks.{diffusers_down_block_idx}.attentions"
for attention_idx_inc in range(num_attentions):
full_attention_prefix = f"{original_down_block_prefix}.{original_down_block_idx + attention_idx_inc}.1"
full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}"
diffusers_checkpoint.update(
attention_to_diffusers_checkpoint(
checkpoint,
attention_prefix=full_attention_prefix,
diffusers_attention_prefix=full_diffusers_attention_prefix,
num_head_channels=num_head_channels,
)
)
num_original_down_blocks = num_resnets
return diffusers_checkpoint, num_original_down_blocks
# <original>.middle_block -> <diffusers>.mid_block
def unet_midblock_to_diffusers_checkpoint(model, checkpoint, *, num_head_channels):
diffusers_checkpoint = {}
# block 0
original_block_idx = 0
diffusers_checkpoint.update(
resnet_to_diffusers_checkpoint(
checkpoint,
diffusers_resnet_prefix="mid_block.resnets.0",
resnet_prefix=f"middle_block.{original_block_idx}",
)
)
original_block_idx += 1
# optional block 1
if hasattr(model.mid_block, "attentions") and model.mid_block.attentions[0] is not None:
diffusers_checkpoint.update(
attention_to_diffusers_checkpoint(
checkpoint,
diffusers_attention_prefix="mid_block.attentions.0",
attention_prefix=f"middle_block.{original_block_idx}",
num_head_channels=num_head_channels,
)
)
original_block_idx += 1
# block 1 or block 2
diffusers_checkpoint.update(
resnet_to_diffusers_checkpoint(
checkpoint,
diffusers_resnet_prefix="mid_block.resnets.1",
resnet_prefix=f"middle_block.{original_block_idx}",
)
)
return diffusers_checkpoint
# <original>.output_blocks -> <diffusers>.up_blocks
def unet_upblock_to_diffusers_checkpoint(
model, checkpoint, *, diffusers_up_block_idx, original_up_block_idx, num_head_channels
):
diffusers_checkpoint = {}
diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.resnets"
original_up_block_prefix = "output_blocks"
up_block = model.up_blocks[diffusers_up_block_idx]
num_resnets = len(up_block.resnets)
if up_block.upsamplers is None:
upsampler = False
else:
assert len(up_block.upsamplers) == 1
upsampler = True
# The upsample block is also a resnet
num_resnets += 1
has_attentions = hasattr(up_block, "attentions")
for resnet_idx_inc in range(num_resnets):
if upsampler and resnet_idx_inc == num_resnets - 1:
# this is an upsample block
if has_attentions:
# There is a middle attention block that we skip
original_resnet_block_idx = 2
else:
original_resnet_block_idx = 1
# we add the `minus 1` because the last two resnets are stuck together in the same output block
full_resnet_prefix = (
f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc - 1}.{original_resnet_block_idx}"
)
full_diffusers_resnet_prefix = f"up_blocks.{diffusers_up_block_idx}.upsamplers.0"
else:
# this is a regular resnet block
full_resnet_prefix = f"{original_up_block_prefix}.{original_up_block_idx + resnet_idx_inc}.0"
full_diffusers_resnet_prefix = f"{diffusers_resnet_prefix}.{resnet_idx_inc}"
diffusers_checkpoint.update(
resnet_to_diffusers_checkpoint(
checkpoint, resnet_prefix=full_resnet_prefix, diffusers_resnet_prefix=full_diffusers_resnet_prefix
)
)
if has_attentions:
num_attentions = len(up_block.attentions)
diffusers_attention_prefix = f"up_blocks.{diffusers_up_block_idx}.attentions"
for attention_idx_inc in range(num_attentions):
full_attention_prefix = f"{original_up_block_prefix}.{original_up_block_idx + attention_idx_inc}.1"
full_diffusers_attention_prefix = f"{diffusers_attention_prefix}.{attention_idx_inc}"
diffusers_checkpoint.update(
attention_to_diffusers_checkpoint(
checkpoint,
attention_prefix=full_attention_prefix,
diffusers_attention_prefix=full_diffusers_attention_prefix,
num_head_channels=num_head_channels,
)
)
num_original_down_blocks = num_resnets - 1 if upsampler else num_resnets
return diffusers_checkpoint, num_original_down_blocks
def resnet_to_diffusers_checkpoint(checkpoint, *, diffusers_resnet_prefix, resnet_prefix):
diffusers_checkpoint = {
f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.in_layers.0.weight"],
f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.in_layers.0.bias"],
f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.in_layers.2.weight"],
f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.in_layers.2.bias"],
f"{diffusers_resnet_prefix}.time_emb_proj.weight": checkpoint[f"{resnet_prefix}.emb_layers.1.weight"],
f"{diffusers_resnet_prefix}.time_emb_proj.bias": checkpoint[f"{resnet_prefix}.emb_layers.1.bias"],
f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.out_layers.0.weight"],
f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.out_layers.0.bias"],
f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.out_layers.3.weight"],
f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.out_layers.3.bias"],
}
skip_connection_prefix = f"{resnet_prefix}.skip_connection"
if f"{skip_connection_prefix}.weight" in checkpoint:
diffusers_checkpoint.update(
{
f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{skip_connection_prefix}.weight"],
f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{skip_connection_prefix}.bias"],
}
)
return diffusers_checkpoint
def attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix, num_head_channels):
diffusers_checkpoint = {}
# <original>.norm -> <diffusers>.group_norm
diffusers_checkpoint.update(
{
f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"],
f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"],
}
)
# <original>.qkv -> <diffusers>.{query, key, value}
[q_weight, k_weight, v_weight], [q_bias, k_bias, v_bias] = split_attentions(
weight=checkpoint[f"{attention_prefix}.qkv.weight"][:, :, 0],
bias=checkpoint[f"{attention_prefix}.qkv.bias"],
split=3,
chunk_size=num_head_channels,
)
diffusers_checkpoint.update(
{
f"{diffusers_attention_prefix}.to_q.weight": q_weight,
f"{diffusers_attention_prefix}.to_q.bias": q_bias,
f"{diffusers_attention_prefix}.to_k.weight": k_weight,
f"{diffusers_attention_prefix}.to_k.bias": k_bias,
f"{diffusers_attention_prefix}.to_v.weight": v_weight,
f"{diffusers_attention_prefix}.to_v.bias": v_bias,
}
)
# <original>.encoder_kv -> <diffusers>.{context_key, context_value}
[encoder_k_weight, encoder_v_weight], [encoder_k_bias, encoder_v_bias] = split_attentions(
weight=checkpoint[f"{attention_prefix}.encoder_kv.weight"][:, :, 0],
bias=checkpoint[f"{attention_prefix}.encoder_kv.bias"],
split=2,
chunk_size=num_head_channels,
)
diffusers_checkpoint.update(
{
f"{diffusers_attention_prefix}.add_k_proj.weight": encoder_k_weight,
f"{diffusers_attention_prefix}.add_k_proj.bias": encoder_k_bias,
f"{diffusers_attention_prefix}.add_v_proj.weight": encoder_v_weight,
f"{diffusers_attention_prefix}.add_v_proj.bias": encoder_v_bias,
}
)
# <original>.proj_out (1d conv) -> <diffusers>.proj_attn (linear)
diffusers_checkpoint.update(
{
f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][
:, :, 0
],
f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"],
}
)
return diffusers_checkpoint
# TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?)
def split_attentions(*, weight, bias, split, chunk_size):
weights = [None] * split
biases = [None] * split
weights_biases_idx = 0
for starting_row_index in range(0, weight.shape[0], chunk_size):
row_indices = torch.arange(starting_row_index, starting_row_index + chunk_size)
weight_rows = weight[row_indices, :]
bias_rows = bias[row_indices]
if weights[weights_biases_idx] is None:
assert weights[weights_biases_idx] is None
weights[weights_biases_idx] = weight_rows
biases[weights_biases_idx] = bias_rows
else:
assert weights[weights_biases_idx] is not None
weights[weights_biases_idx] = torch.concat([weights[weights_biases_idx], weight_rows])
biases[weights_biases_idx] = torch.concat([biases[weights_biases_idx], bias_rows])
weights_biases_idx = (weights_biases_idx + 1) % split
return weights, biases
# done unet utils
def prior(*, args, checkpoint_map_location):
print("loading prior")
prior_checkpoint = torch.load(args.prior_checkpoint_path, map_location=checkpoint_map_location)
clip_stats_checkpoint = torch.load(args.clip_stat_path, map_location=checkpoint_map_location)
prior_model = prior_model_from_original_config()
prior_diffusers_checkpoint = prior_original_checkpoint_to_diffusers_checkpoint(
prior_model, prior_checkpoint, clip_stats_checkpoint
)
del prior_checkpoint
del clip_stats_checkpoint
load_checkpoint_to_model(prior_diffusers_checkpoint, prior_model, strict=True)
print("done loading prior")
return prior_model
def text2img(*, args, checkpoint_map_location):
print("loading text2img")
text2img_checkpoint = torch.load(args.text2img_checkpoint_path, map_location=checkpoint_map_location)
unet_model = unet_model_from_original_config()
unet_diffusers_checkpoint = unet_original_checkpoint_to_diffusers_checkpoint(unet_model, text2img_checkpoint)
del text2img_checkpoint
load_checkpoint_to_model(unet_diffusers_checkpoint, unet_model, strict=True)
print("done loading text2img")
return unet_model
def inpaint_text2img(*, args, checkpoint_map_location):
print("loading inpaint text2img")
inpaint_text2img_checkpoint = torch.load(
args.inpaint_text2img_checkpoint_path, map_location=checkpoint_map_location
)
inpaint_unet_model = inpaint_unet_model_from_original_config()
inpaint_unet_diffusers_checkpoint = inpaint_unet_original_checkpoint_to_diffusers_checkpoint(
inpaint_unet_model, inpaint_text2img_checkpoint
)
del inpaint_text2img_checkpoint
load_checkpoint_to_model(inpaint_unet_diffusers_checkpoint, inpaint_unet_model, strict=True)
print("done loading inpaint text2img")
return inpaint_unet_model
# movq
MOVQ_CONFIG = {
"in_channels": 3,
"out_channels": 3,
"latent_channels": 4,
"down_block_types": ("DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D"),
"up_block_types": ("AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"),
"num_vq_embeddings": 16384,
"block_out_channels": (128, 256, 256, 512),
"vq_embed_dim": 4,
"layers_per_block": 2,
"norm_type": "spatial",
}
def movq_model_from_original_config():
movq = VQModel(**MOVQ_CONFIG)
return movq
def movq_encoder_to_diffusers_checkpoint(model, checkpoint):
diffusers_checkpoint = {}
# conv_in
diffusers_checkpoint.update(
{
"encoder.conv_in.weight": checkpoint["encoder.conv_in.weight"],
"encoder.conv_in.bias": checkpoint["encoder.conv_in.bias"],
}
)
# down_blocks
for down_block_idx, down_block in enumerate(model.encoder.down_blocks):
diffusers_down_block_prefix = f"encoder.down_blocks.{down_block_idx}"
down_block_prefix = f"encoder.down.{down_block_idx}"
# resnets
for resnet_idx, resnet in enumerate(down_block.resnets):
diffusers_resnet_prefix = f"{diffusers_down_block_prefix}.resnets.{resnet_idx}"
resnet_prefix = f"{down_block_prefix}.block.{resnet_idx}"
diffusers_checkpoint.update(
movq_resnet_to_diffusers_checkpoint(
resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix
)
)
# downsample
# do not include the downsample when on the last down block
# There is no downsample on the last down block
if down_block_idx != len(model.encoder.down_blocks) - 1:
# There's a single downsample in the original checkpoint but a list of downsamples
# in the diffusers model.
diffusers_downsample_prefix = f"{diffusers_down_block_prefix}.downsamplers.0.conv"
downsample_prefix = f"{down_block_prefix}.downsample.conv"
diffusers_checkpoint.update(
{
f"{diffusers_downsample_prefix}.weight": checkpoint[f"{downsample_prefix}.weight"],
f"{diffusers_downsample_prefix}.bias": checkpoint[f"{downsample_prefix}.bias"],
}
)
# attentions
if hasattr(down_block, "attentions"):
for attention_idx, _ in enumerate(down_block.attentions):
diffusers_attention_prefix = f"{diffusers_down_block_prefix}.attentions.{attention_idx}"
attention_prefix = f"{down_block_prefix}.attn.{attention_idx}"
diffusers_checkpoint.update(
movq_attention_to_diffusers_checkpoint(
checkpoint,
diffusers_attention_prefix=diffusers_attention_prefix,
attention_prefix=attention_prefix,
)
)
# mid block
# mid block attentions
# There is a single hardcoded attention block in the middle of the VQ-diffusion encoder
diffusers_attention_prefix = "encoder.mid_block.attentions.0"
attention_prefix = "encoder.mid.attn_1"
diffusers_checkpoint.update(
movq_attention_to_diffusers_checkpoint(
checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix
)
)
# mid block resnets
for diffusers_resnet_idx, resnet in enumerate(model.encoder.mid_block.resnets):
diffusers_resnet_prefix = f"encoder.mid_block.resnets.{diffusers_resnet_idx}"
# the hardcoded prefixes to `block_` are 1 and 2
orig_resnet_idx = diffusers_resnet_idx + 1
# There are two hardcoded resnets in the middle of the VQ-diffusion encoder
resnet_prefix = f"encoder.mid.block_{orig_resnet_idx}"
diffusers_checkpoint.update(
movq_resnet_to_diffusers_checkpoint(
resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix
)
)
diffusers_checkpoint.update(
{
# conv_norm_out
"encoder.conv_norm_out.weight": checkpoint["encoder.norm_out.weight"],
"encoder.conv_norm_out.bias": checkpoint["encoder.norm_out.bias"],
# conv_out
"encoder.conv_out.weight": checkpoint["encoder.conv_out.weight"],
"encoder.conv_out.bias": checkpoint["encoder.conv_out.bias"],
}
)
return diffusers_checkpoint
def movq_decoder_to_diffusers_checkpoint(model, checkpoint):
diffusers_checkpoint = {}
# conv in
diffusers_checkpoint.update(
{
"decoder.conv_in.weight": checkpoint["decoder.conv_in.weight"],
"decoder.conv_in.bias": checkpoint["decoder.conv_in.bias"],
}
)
# up_blocks
for diffusers_up_block_idx, up_block in enumerate(model.decoder.up_blocks):
# up_blocks are stored in reverse order in the VQ-diffusion checkpoint
orig_up_block_idx = len(model.decoder.up_blocks) - 1 - diffusers_up_block_idx
diffusers_up_block_prefix = f"decoder.up_blocks.{diffusers_up_block_idx}"
up_block_prefix = f"decoder.up.{orig_up_block_idx}"
# resnets
for resnet_idx, resnet in enumerate(up_block.resnets):
diffusers_resnet_prefix = f"{diffusers_up_block_prefix}.resnets.{resnet_idx}"
resnet_prefix = f"{up_block_prefix}.block.{resnet_idx}"
diffusers_checkpoint.update(
movq_resnet_to_diffusers_checkpoint_spatial_norm(
resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix
)
)
# upsample
# there is no up sample on the last up block
if diffusers_up_block_idx != len(model.decoder.up_blocks) - 1:
# There's a single upsample in the VQ-diffusion checkpoint but a list of downsamples
# in the diffusers model.
diffusers_downsample_prefix = f"{diffusers_up_block_prefix}.upsamplers.0.conv"
downsample_prefix = f"{up_block_prefix}.upsample.conv"
diffusers_checkpoint.update(
{
f"{diffusers_downsample_prefix}.weight": checkpoint[f"{downsample_prefix}.weight"],
f"{diffusers_downsample_prefix}.bias": checkpoint[f"{downsample_prefix}.bias"],
}
)
# attentions
if hasattr(up_block, "attentions"):
for attention_idx, _ in enumerate(up_block.attentions):
diffusers_attention_prefix = f"{diffusers_up_block_prefix}.attentions.{attention_idx}"
attention_prefix = f"{up_block_prefix}.attn.{attention_idx}"
diffusers_checkpoint.update(
movq_attention_to_diffusers_checkpoint_spatial_norm(
checkpoint,
diffusers_attention_prefix=diffusers_attention_prefix,
attention_prefix=attention_prefix,
)
)
# mid block
# mid block attentions
# There is a single hardcoded attention block in the middle of the VQ-diffusion decoder
diffusers_attention_prefix = "decoder.mid_block.attentions.0"
attention_prefix = "decoder.mid.attn_1"
diffusers_checkpoint.update(
movq_attention_to_diffusers_checkpoint_spatial_norm(
checkpoint, diffusers_attention_prefix=diffusers_attention_prefix, attention_prefix=attention_prefix
)
)
# mid block resnets
for diffusers_resnet_idx, resnet in enumerate(model.encoder.mid_block.resnets):
diffusers_resnet_prefix = f"decoder.mid_block.resnets.{diffusers_resnet_idx}"
# the hardcoded prefixes to `block_` are 1 and 2
orig_resnet_idx = diffusers_resnet_idx + 1
# There are two hardcoded resnets in the middle of the VQ-diffusion decoder
resnet_prefix = f"decoder.mid.block_{orig_resnet_idx}"
diffusers_checkpoint.update(
movq_resnet_to_diffusers_checkpoint_spatial_norm(
resnet, checkpoint, diffusers_resnet_prefix=diffusers_resnet_prefix, resnet_prefix=resnet_prefix
)
)
diffusers_checkpoint.update(
{
# conv_norm_out
"decoder.conv_norm_out.norm_layer.weight": checkpoint["decoder.norm_out.norm_layer.weight"],
"decoder.conv_norm_out.norm_layer.bias": checkpoint["decoder.norm_out.norm_layer.bias"],
"decoder.conv_norm_out.conv_y.weight": checkpoint["decoder.norm_out.conv_y.weight"],
"decoder.conv_norm_out.conv_y.bias": checkpoint["decoder.norm_out.conv_y.bias"],
"decoder.conv_norm_out.conv_b.weight": checkpoint["decoder.norm_out.conv_b.weight"],
"decoder.conv_norm_out.conv_b.bias": checkpoint["decoder.norm_out.conv_b.bias"],
# conv_out
"decoder.conv_out.weight": checkpoint["decoder.conv_out.weight"],
"decoder.conv_out.bias": checkpoint["decoder.conv_out.bias"],
}
)
return diffusers_checkpoint
def movq_resnet_to_diffusers_checkpoint(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix):
rv = {
# norm1
f"{diffusers_resnet_prefix}.norm1.weight": checkpoint[f"{resnet_prefix}.norm1.weight"],
f"{diffusers_resnet_prefix}.norm1.bias": checkpoint[f"{resnet_prefix}.norm1.bias"],
# conv1
f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.conv1.weight"],
f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.conv1.bias"],
# norm2
f"{diffusers_resnet_prefix}.norm2.weight": checkpoint[f"{resnet_prefix}.norm2.weight"],
f"{diffusers_resnet_prefix}.norm2.bias": checkpoint[f"{resnet_prefix}.norm2.bias"],
# conv2
f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.conv2.weight"],
f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.conv2.bias"],
}
if resnet.conv_shortcut is not None:
rv.update(
{
f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.nin_shortcut.weight"],
f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{resnet_prefix}.nin_shortcut.bias"],
}
)
return rv
def movq_resnet_to_diffusers_checkpoint_spatial_norm(resnet, checkpoint, *, diffusers_resnet_prefix, resnet_prefix):
rv = {
# norm1
f"{diffusers_resnet_prefix}.norm1.norm_layer.weight": checkpoint[f"{resnet_prefix}.norm1.norm_layer.weight"],
f"{diffusers_resnet_prefix}.norm1.norm_layer.bias": checkpoint[f"{resnet_prefix}.norm1.norm_layer.bias"],
f"{diffusers_resnet_prefix}.norm1.conv_y.weight": checkpoint[f"{resnet_prefix}.norm1.conv_y.weight"],
f"{diffusers_resnet_prefix}.norm1.conv_y.bias": checkpoint[f"{resnet_prefix}.norm1.conv_y.bias"],
f"{diffusers_resnet_prefix}.norm1.conv_b.weight": checkpoint[f"{resnet_prefix}.norm1.conv_b.weight"],
f"{diffusers_resnet_prefix}.norm1.conv_b.bias": checkpoint[f"{resnet_prefix}.norm1.conv_b.bias"],
# conv1
f"{diffusers_resnet_prefix}.conv1.weight": checkpoint[f"{resnet_prefix}.conv1.weight"],
f"{diffusers_resnet_prefix}.conv1.bias": checkpoint[f"{resnet_prefix}.conv1.bias"],
# norm2
f"{diffusers_resnet_prefix}.norm2.norm_layer.weight": checkpoint[f"{resnet_prefix}.norm2.norm_layer.weight"],
f"{diffusers_resnet_prefix}.norm2.norm_layer.bias": checkpoint[f"{resnet_prefix}.norm2.norm_layer.bias"],
f"{diffusers_resnet_prefix}.norm2.conv_y.weight": checkpoint[f"{resnet_prefix}.norm2.conv_y.weight"],
f"{diffusers_resnet_prefix}.norm2.conv_y.bias": checkpoint[f"{resnet_prefix}.norm2.conv_y.bias"],
f"{diffusers_resnet_prefix}.norm2.conv_b.weight": checkpoint[f"{resnet_prefix}.norm2.conv_b.weight"],
f"{diffusers_resnet_prefix}.norm2.conv_b.bias": checkpoint[f"{resnet_prefix}.norm2.conv_b.bias"],
# conv2
f"{diffusers_resnet_prefix}.conv2.weight": checkpoint[f"{resnet_prefix}.conv2.weight"],
f"{diffusers_resnet_prefix}.conv2.bias": checkpoint[f"{resnet_prefix}.conv2.bias"],
}
if resnet.conv_shortcut is not None:
rv.update(
{
f"{diffusers_resnet_prefix}.conv_shortcut.weight": checkpoint[f"{resnet_prefix}.nin_shortcut.weight"],
f"{diffusers_resnet_prefix}.conv_shortcut.bias": checkpoint[f"{resnet_prefix}.nin_shortcut.bias"],
}
)
return rv
def movq_attention_to_diffusers_checkpoint(checkpoint, *, diffusers_attention_prefix, attention_prefix):
return {
# norm
f"{diffusers_attention_prefix}.group_norm.weight": checkpoint[f"{attention_prefix}.norm.weight"],
f"{diffusers_attention_prefix}.group_norm.bias": checkpoint[f"{attention_prefix}.norm.bias"],
# query
f"{diffusers_attention_prefix}.to_q.weight": checkpoint[f"{attention_prefix}.q.weight"][:, :, 0, 0],
f"{diffusers_attention_prefix}.to_q.bias": checkpoint[f"{attention_prefix}.q.bias"],
# key
f"{diffusers_attention_prefix}.to_k.weight": checkpoint[f"{attention_prefix}.k.weight"][:, :, 0, 0],
f"{diffusers_attention_prefix}.to_k.bias": checkpoint[f"{attention_prefix}.k.bias"],
# value
f"{diffusers_attention_prefix}.to_v.weight": checkpoint[f"{attention_prefix}.v.weight"][:, :, 0, 0],
f"{diffusers_attention_prefix}.to_v.bias": checkpoint[f"{attention_prefix}.v.bias"],
# proj_attn
f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][:, :, 0, 0],
f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"],
}
def movq_attention_to_diffusers_checkpoint_spatial_norm(checkpoint, *, diffusers_attention_prefix, attention_prefix):
return {
# norm
f"{diffusers_attention_prefix}.spatial_norm.norm_layer.weight": checkpoint[
f"{attention_prefix}.norm.norm_layer.weight"
],
f"{diffusers_attention_prefix}.spatial_norm.norm_layer.bias": checkpoint[
f"{attention_prefix}.norm.norm_layer.bias"
],
f"{diffusers_attention_prefix}.spatial_norm.conv_y.weight": checkpoint[
f"{attention_prefix}.norm.conv_y.weight"
],
f"{diffusers_attention_prefix}.spatial_norm.conv_y.bias": checkpoint[f"{attention_prefix}.norm.conv_y.bias"],
f"{diffusers_attention_prefix}.spatial_norm.conv_b.weight": checkpoint[
f"{attention_prefix}.norm.conv_b.weight"
],
f"{diffusers_attention_prefix}.spatial_norm.conv_b.bias": checkpoint[f"{attention_prefix}.norm.conv_b.bias"],
# query
f"{diffusers_attention_prefix}.to_q.weight": checkpoint[f"{attention_prefix}.q.weight"][:, :, 0, 0],
f"{diffusers_attention_prefix}.to_q.bias": checkpoint[f"{attention_prefix}.q.bias"],
# key
f"{diffusers_attention_prefix}.to_k.weight": checkpoint[f"{attention_prefix}.k.weight"][:, :, 0, 0],
f"{diffusers_attention_prefix}.to_k.bias": checkpoint[f"{attention_prefix}.k.bias"],
# value
f"{diffusers_attention_prefix}.to_v.weight": checkpoint[f"{attention_prefix}.v.weight"][:, :, 0, 0],
f"{diffusers_attention_prefix}.to_v.bias": checkpoint[f"{attention_prefix}.v.bias"],
# proj_attn
f"{diffusers_attention_prefix}.to_out.0.weight": checkpoint[f"{attention_prefix}.proj_out.weight"][:, :, 0, 0],
f"{diffusers_attention_prefix}.to_out.0.bias": checkpoint[f"{attention_prefix}.proj_out.bias"],
}
def movq_original_checkpoint_to_diffusers_checkpoint(model, checkpoint):
diffusers_checkpoint = {}
diffusers_checkpoint.update(movq_encoder_to_diffusers_checkpoint(model, checkpoint))
# quant_conv
diffusers_checkpoint.update(
{
"quant_conv.weight": checkpoint["quant_conv.weight"],
"quant_conv.bias": checkpoint["quant_conv.bias"],
}
)
# quantize
diffusers_checkpoint.update({"quantize.embedding.weight": checkpoint["quantize.embedding.weight"]})
# post_quant_conv
diffusers_checkpoint.update(
{
"post_quant_conv.weight": checkpoint["post_quant_conv.weight"],
"post_quant_conv.bias": checkpoint["post_quant_conv.bias"],
}
)
# decoder
diffusers_checkpoint.update(movq_decoder_to_diffusers_checkpoint(model, checkpoint))
return diffusers_checkpoint
def movq(*, args, checkpoint_map_location):
print("loading movq")
movq_checkpoint = torch.load(args.movq_checkpoint_path, map_location=checkpoint_map_location)
movq_model = movq_model_from_original_config()
movq_diffusers_checkpoint = movq_original_checkpoint_to_diffusers_checkpoint(movq_model, movq_checkpoint)
del movq_checkpoint
load_checkpoint_to_model(movq_diffusers_checkpoint, movq_model, strict=True)
print("done loading movq")
return movq_model
def load_checkpoint_to_model(checkpoint, model, strict=False):
with tempfile.NamedTemporaryFile(delete=False) as file:
torch.save(checkpoint, file.name)
del checkpoint
if strict:
model.load_state_dict(torch.load(file.name), strict=True)
else:
load_checkpoint_and_dispatch(model, file.name, device_map="auto")
os.remove(file.name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--prior_checkpoint_path",
default=None,
type=str,
required=False,
help="Path to the prior checkpoint to convert.",
)
parser.add_argument(
"--clip_stat_path",
default=None,
type=str,
required=False,
help="Path to the clip stats checkpoint to convert.",
)
parser.add_argument(
"--text2img_checkpoint_path",
default=None,
type=str,
required=False,
help="Path to the text2img checkpoint to convert.",
)
parser.add_argument(
"--movq_checkpoint_path",
default=None,
type=str,
required=False,
help="Path to the text2img checkpoint to convert.",
)
parser.add_argument(
"--inpaint_text2img_checkpoint_path",
default=None,
type=str,
required=False,
help="Path to the inpaint text2img checkpoint to convert.",
)
parser.add_argument(
"--checkpoint_load_device",
default="cpu",
type=str,
required=False,
help="The device passed to `map_location` when loading checkpoints.",
)
parser.add_argument(
"--debug",
default=None,
type=str,
required=False,
help="Only run a specific stage of the convert script. Used for debugging",
)
args = parser.parse_args()
print(f"loading checkpoints to {args.checkpoint_load_device}")
checkpoint_map_location = torch.device(args.checkpoint_load_device)
if args.debug is not None:
print(f"debug: only executing {args.debug}")
if args.debug is None:
print("to-do")
elif args.debug == "prior":
prior_model = prior(args=args, checkpoint_map_location=checkpoint_map_location)
prior_model.save_pretrained(args.dump_path)
elif args.debug == "text2img":
unet_model = text2img(args=args, checkpoint_map_location=checkpoint_map_location)
unet_model.save_pretrained(f"{args.dump_path}/unet")
elif args.debug == "inpaint_text2img":
inpaint_unet_model = inpaint_text2img(args=args, checkpoint_map_location=checkpoint_map_location)
inpaint_unet_model.save_pretrained(f"{args.dump_path}/inpaint_unet")
elif args.debug == "decoder":
decoder = movq(args=args, checkpoint_map_location=checkpoint_map_location)
decoder.save_pretrained(f"{args.dump_path}/decoder")
else:
raise ValueError(f"unknown debug value : {args.debug}")
| diffusers/scripts/convert_kandinsky_to_diffusers.py/0 | {
"file_path": "diffusers/scripts/convert_kandinsky_to_diffusers.py",
"repo_id": "diffusers",
"token_count": 23602
} | 108 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import onnx_graphsurgeon as gs
import torch
from onnx import shape_inference
from packaging import version
from polygraphy.backend.onnx.loader import fold_constants
from torch.onnx import export
from diffusers import (
ControlNetModel,
StableDiffusionControlNetImg2ImgPipeline,
)
from diffusers.models.attention_processor import AttnProcessor
from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
is_torch_2_0_1 = version.parse(version.parse(torch.__version__).base_version) == version.parse("2.0.1")
class Optimizer:
def __init__(self, onnx_graph, verbose=False):
self.graph = gs.import_onnx(onnx_graph)
self.verbose = verbose
def info(self, prefix):
if self.verbose:
print(
f"{prefix} .. {len(self.graph.nodes)} nodes, {len(self.graph.tensors().keys())} tensors, {len(self.graph.inputs)} inputs, {len(self.graph.outputs)} outputs"
)
def cleanup(self, return_onnx=False):
self.graph.cleanup().toposort()
if return_onnx:
return gs.export_onnx(self.graph)
def select_outputs(self, keep, names=None):
self.graph.outputs = [self.graph.outputs[o] for o in keep]
if names:
for i, name in enumerate(names):
self.graph.outputs[i].name = name
def fold_constants(self, return_onnx=False):
onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
self.graph = gs.import_onnx(onnx_graph)
if return_onnx:
return onnx_graph
def infer_shapes(self, return_onnx=False):
onnx_graph = gs.export_onnx(self.graph)
if onnx_graph.ByteSize() > 2147483648:
raise TypeError("ERROR: model size exceeds supported 2GB limit")
else:
onnx_graph = shape_inference.infer_shapes(onnx_graph)
self.graph = gs.import_onnx(onnx_graph)
if return_onnx:
return onnx_graph
def optimize(onnx_graph, name, verbose):
opt = Optimizer(onnx_graph, verbose=verbose)
opt.info(name + ": original")
opt.cleanup()
opt.info(name + ": cleanup")
opt.fold_constants()
opt.info(name + ": fold constants")
# opt.infer_shapes()
# opt.info(name + ': shape inference')
onnx_opt_graph = opt.cleanup(return_onnx=True)
opt.info(name + ": finished")
return onnx_opt_graph
class UNet2DConditionControlNetModel(torch.nn.Module):
def __init__(
self,
unet,
controlnets: ControlNetModel,
):
super().__init__()
self.unet = unet
self.controlnets = controlnets
def forward(
self,
sample,
timestep,
encoder_hidden_states,
controlnet_conds,
controlnet_scales,
):
for i, (controlnet_cond, conditioning_scale, controlnet) in enumerate(
zip(controlnet_conds, controlnet_scales, self.controlnets)
):
down_samples, mid_sample = controlnet(
sample,
timestep,
encoder_hidden_states=encoder_hidden_states,
controlnet_cond=controlnet_cond,
conditioning_scale=conditioning_scale,
return_dict=False,
)
# merge samples
if i == 0:
down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
else:
down_block_res_samples = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
]
mid_block_res_sample += mid_sample
noise_pred = self.unet(
sample,
timestep,
encoder_hidden_states=encoder_hidden_states,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
return_dict=False,
)[0]
return noise_pred
class UNet2DConditionXLControlNetModel(torch.nn.Module):
def __init__(
self,
unet,
controlnets: ControlNetModel,
):
super().__init__()
self.unet = unet
self.controlnets = controlnets
def forward(
self,
sample,
timestep,
encoder_hidden_states,
controlnet_conds,
controlnet_scales,
text_embeds,
time_ids,
):
added_cond_kwargs = {"text_embeds": text_embeds, "time_ids": time_ids}
for i, (controlnet_cond, conditioning_scale, controlnet) in enumerate(
zip(controlnet_conds, controlnet_scales, self.controlnets)
):
down_samples, mid_sample = controlnet(
sample,
timestep,
encoder_hidden_states=encoder_hidden_states,
controlnet_cond=controlnet_cond,
conditioning_scale=conditioning_scale,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)
# merge samples
if i == 0:
down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
else:
down_block_res_samples = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
]
mid_block_res_sample += mid_sample
noise_pred = self.unet(
sample,
timestep,
encoder_hidden_states=encoder_hidden_states,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
return noise_pred
def onnx_export(
model,
model_args: tuple,
output_path: Path,
ordered_input_names,
output_names,
dynamic_axes,
opset,
use_external_data_format=False,
):
output_path.parent.mkdir(parents=True, exist_ok=True)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
with torch.inference_mode(), torch.autocast("cuda"):
if is_torch_less_than_1_11:
export(
model,
model_args,
f=output_path.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
use_external_data_format=use_external_data_format,
enable_onnx_checker=True,
opset_version=opset,
)
else:
export(
model,
model_args,
f=output_path.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
opset_version=opset,
)
@torch.no_grad()
def convert_models(
model_path: str, controlnet_path: list, output_path: str, opset: int, fp16: bool = False, sd_xl: bool = False
):
"""
Function to convert models in stable diffusion controlnet pipeline into ONNX format
Example:
python convert_stable_diffusion_controlnet_to_onnx.py
--model_path danbrown/RevAnimated-v1-2-2
--controlnet_path lllyasviel/control_v11f1e_sd15_tile ioclab/brightness-controlnet
--output_path path-to-models-stable_diffusion/RevAnimated-v1-2-2
--fp16
Example for SD XL:
python convert_stable_diffusion_controlnet_to_onnx.py
--model_path stabilityai/stable-diffusion-xl-base-1.0
--controlnet_path SargeZT/sdxl-controlnet-seg
--output_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0
--fp16
--sd_xl
Returns:
create 4 onnx models in output path
text_encoder/model.onnx
unet/model.onnx + unet/weights.pb
vae_encoder/model.onnx
vae_decoder/model.onnx
run test script in diffusers/examples/community
python test_onnx_controlnet.py
--sd_model danbrown/RevAnimated-v1-2-2
--onnx_model_dir path-to-models-stable_diffusion/RevAnimated-v1-2-2
--qr_img_path path-to-qr-code-image
"""
dtype = torch.float16 if fp16 else torch.float32
if fp16 and torch.cuda.is_available():
device = "cuda"
elif fp16 and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
device = "cpu"
# init controlnet
controlnets = []
for path in controlnet_path:
controlnet = ControlNetModel.from_pretrained(path, torch_dtype=dtype).to(device)
if is_torch_2_0_1:
controlnet.set_attn_processor(AttnProcessor())
controlnets.append(controlnet)
if sd_xl:
if len(controlnets) == 1:
controlnet = controlnets[0]
else:
raise ValueError("MultiControlNet is not yet supported.")
pipeline = StableDiffusionXLControlNetPipeline.from_pretrained(
model_path, controlnet=controlnet, torch_dtype=dtype, variant="fp16", use_safetensors=True
).to(device)
else:
pipeline = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
model_path, controlnet=controlnets, torch_dtype=dtype
).to(device)
output_path = Path(output_path)
if is_torch_2_0_1:
pipeline.unet.set_attn_processor(AttnProcessor())
pipeline.vae.set_attn_processor(AttnProcessor())
# # TEXT ENCODER
num_tokens = pipeline.text_encoder.config.max_position_embeddings
text_hidden_size = pipeline.text_encoder.config.hidden_size
text_input = pipeline.tokenizer(
"A sample prompt",
padding="max_length",
max_length=pipeline.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
onnx_export(
pipeline.text_encoder,
# casting to torch.int32 until the CLIP fix is released: https://github.com/huggingface/transformers/pull/18515/files
model_args=(text_input.input_ids.to(device=device, dtype=torch.int32)),
output_path=output_path / "text_encoder" / "model.onnx",
ordered_input_names=["input_ids"],
output_names=["last_hidden_state", "pooler_output"],
dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
},
opset=opset,
)
del pipeline.text_encoder
# # UNET
if sd_xl:
controlnets = torch.nn.ModuleList(controlnets)
unet_controlnet = UNet2DConditionXLControlNetModel(pipeline.unet, controlnets)
unet_in_channels = pipeline.unet.config.in_channels
unet_sample_size = pipeline.unet.config.sample_size
text_hidden_size = 2048
img_size = 8 * unet_sample_size
unet_path = output_path / "unet" / "model.onnx"
onnx_export(
unet_controlnet,
model_args=(
torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
torch.tensor([1.0]).to(device=device, dtype=dtype),
torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype),
torch.randn(len(controlnets), 2, 3, img_size, img_size).to(device=device, dtype=dtype),
torch.randn(len(controlnets), 1).to(device=device, dtype=dtype),
torch.randn(2, 1280).to(device=device, dtype=dtype),
torch.rand(2, 6).to(device=device, dtype=dtype),
),
output_path=unet_path,
ordered_input_names=[
"sample",
"timestep",
"encoder_hidden_states",
"controlnet_conds",
"conditioning_scales",
"text_embeds",
"time_ids",
],
output_names=["noise_pred"], # has to be different from "sample" for correct tracing
dynamic_axes={
"sample": {0: "2B", 2: "H", 3: "W"},
"encoder_hidden_states": {0: "2B"},
"controlnet_conds": {1: "2B", 3: "8H", 4: "8W"},
"text_embeds": {0: "2B"},
"time_ids": {0: "2B"},
},
opset=opset,
use_external_data_format=True, # UNet is > 2GB, so the weights need to be split
)
unet_model_path = str(unet_path.absolute().as_posix())
unet_dir = os.path.dirname(unet_model_path)
# optimize onnx
shape_inference.infer_shapes_path(unet_model_path, unet_model_path)
unet_opt_graph = optimize(onnx.load(unet_model_path), name="Unet", verbose=True)
# clean up existing tensor files
shutil.rmtree(unet_dir)
os.mkdir(unet_dir)
# collate external tensor files into one
onnx.save_model(
unet_opt_graph,
unet_model_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location="weights.pb",
convert_attribute=False,
)
del pipeline.unet
else:
controlnets = torch.nn.ModuleList(controlnets)
unet_controlnet = UNet2DConditionControlNetModel(pipeline.unet, controlnets)
unet_in_channels = pipeline.unet.config.in_channels
unet_sample_size = pipeline.unet.config.sample_size
img_size = 8 * unet_sample_size
unet_path = output_path / "unet" / "model.onnx"
onnx_export(
unet_controlnet,
model_args=(
torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
torch.tensor([1.0]).to(device=device, dtype=dtype),
torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype),
torch.randn(len(controlnets), 2, 3, img_size, img_size).to(device=device, dtype=dtype),
torch.randn(len(controlnets), 1).to(device=device, dtype=dtype),
),
output_path=unet_path,
ordered_input_names=[
"sample",
"timestep",
"encoder_hidden_states",
"controlnet_conds",
"conditioning_scales",
],
output_names=["noise_pred"], # has to be different from "sample" for correct tracing
dynamic_axes={
"sample": {0: "2B", 2: "H", 3: "W"},
"encoder_hidden_states": {0: "2B"},
"controlnet_conds": {1: "2B", 3: "8H", 4: "8W"},
},
opset=opset,
use_external_data_format=True, # UNet is > 2GB, so the weights need to be split
)
unet_model_path = str(unet_path.absolute().as_posix())
unet_dir = os.path.dirname(unet_model_path)
# optimize onnx
shape_inference.infer_shapes_path(unet_model_path, unet_model_path)
unet_opt_graph = optimize(onnx.load(unet_model_path), name="Unet", verbose=True)
# clean up existing tensor files
shutil.rmtree(unet_dir)
os.mkdir(unet_dir)
# collate external tensor files into one
onnx.save_model(
unet_opt_graph,
unet_model_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location="weights.pb",
convert_attribute=False,
)
del pipeline.unet
# VAE ENCODER
vae_encoder = pipeline.vae
vae_in_channels = vae_encoder.config.in_channels
vae_sample_size = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
vae_encoder.forward = lambda sample: vae_encoder.encode(sample).latent_dist.sample()
onnx_export(
vae_encoder,
model_args=(torch.randn(1, vae_in_channels, vae_sample_size, vae_sample_size).to(device=device, dtype=dtype),),
output_path=output_path / "vae_encoder" / "model.onnx",
ordered_input_names=["sample"],
output_names=["latent_sample"],
dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
},
opset=opset,
)
# VAE DECODER
vae_decoder = pipeline.vae
vae_latent_channels = vae_decoder.config.latent_channels
# forward only through the decoder part
vae_decoder.forward = vae_encoder.decode
onnx_export(
vae_decoder,
model_args=(
torch.randn(1, vae_latent_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
),
output_path=output_path / "vae_decoder" / "model.onnx",
ordered_input_names=["latent_sample"],
output_names=["sample"],
dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
},
opset=opset,
)
del pipeline.vae
del pipeline
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sd_xl", action="store_true", default=False, help="SD XL pipeline")
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument(
"--controlnet_path",
nargs="+",
required=True,
help="Path to the `controlnet` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
args = parser.parse_args()
convert_models(args.model_path, args.controlnet_path, args.output_path, args.opset, args.fp16, args.sd_xl)
| diffusers/scripts/convert_stable_diffusion_controlnet_to_onnx.py/0 | {
"file_path": "diffusers/scripts/convert_stable_diffusion_controlnet_to_onnx.py",
"repo_id": "diffusers",
"token_count": 8995
} | 109 |
#!/usr/bin/env python
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from .env import EnvironmentCommand
from .fp16_safetensors import FP16SafetensorsCommand
def main():
parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(commands_parser)
FP16SafetensorsCommand.register_subcommand(commands_parser)
# Let's go
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
# Run
service = args.func(args)
service.run()
if __name__ == "__main__":
main()
| diffusers/src/diffusers/commands/diffusers_cli.py/0 | {
"file_path": "diffusers/src/diffusers/commands/diffusers_cli.py",
"repo_id": "diffusers",
"token_count": 412
} | 110 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import logging
logger = logging.get_logger(__name__)
def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5):
# 1. get all state_dict_keys
all_keys = list(state_dict.keys())
sgm_patterns = ["input_blocks", "middle_block", "output_blocks"]
# 2. check if needs remapping, if not return original dict
is_in_sgm_format = False
for key in all_keys:
if any(p in key for p in sgm_patterns):
is_in_sgm_format = True
break
if not is_in_sgm_format:
return state_dict
# 3. Else remap from SGM patterns
new_state_dict = {}
inner_block_map = ["resnets", "attentions", "upsamplers"]
# Retrieves # of down, mid and up blocks
input_block_ids, middle_block_ids, output_block_ids = set(), set(), set()
for layer in all_keys:
if "text" in layer:
new_state_dict[layer] = state_dict.pop(layer)
else:
layer_id = int(layer.split(delimiter)[:block_slice_pos][-1])
if sgm_patterns[0] in layer:
input_block_ids.add(layer_id)
elif sgm_patterns[1] in layer:
middle_block_ids.add(layer_id)
elif sgm_patterns[2] in layer:
output_block_ids.add(layer_id)
else:
raise ValueError(f"Checkpoint not supported because layer {layer} not supported.")
input_blocks = {
layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key]
for layer_id in input_block_ids
}
middle_blocks = {
layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key]
for layer_id in middle_block_ids
}
output_blocks = {
layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key]
for layer_id in output_block_ids
}
# Rename keys accordingly
for i in input_block_ids:
block_id = (i - 1) // (unet_config.layers_per_block + 1)
layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1)
for key in input_blocks[i]:
inner_block_id = int(key.split(delimiter)[block_slice_pos])
inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers"
inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0"
new_key = delimiter.join(
key.split(delimiter)[: block_slice_pos - 1]
+ [str(block_id), inner_block_key, inner_layers_in_block]
+ key.split(delimiter)[block_slice_pos + 1 :]
)
new_state_dict[new_key] = state_dict.pop(key)
for i in middle_block_ids:
key_part = None
if i == 0:
key_part = [inner_block_map[0], "0"]
elif i == 1:
key_part = [inner_block_map[1], "0"]
elif i == 2:
key_part = [inner_block_map[0], "1"]
else:
raise ValueError(f"Invalid middle block id {i}.")
for key in middle_blocks[i]:
new_key = delimiter.join(
key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:]
)
new_state_dict[new_key] = state_dict.pop(key)
for i in output_block_ids:
block_id = i // (unet_config.layers_per_block + 1)
layer_in_block_id = i % (unet_config.layers_per_block + 1)
for key in output_blocks[i]:
inner_block_id = int(key.split(delimiter)[block_slice_pos])
inner_block_key = inner_block_map[inner_block_id]
inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0"
new_key = delimiter.join(
key.split(delimiter)[: block_slice_pos - 1]
+ [str(block_id), inner_block_key, inner_layers_in_block]
+ key.split(delimiter)[block_slice_pos + 1 :]
)
new_state_dict[new_key] = state_dict.pop(key)
if len(state_dict) > 0:
raise ValueError("At this point all state dict entries have to be converted.")
return new_state_dict
def _convert_kohya_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"):
unet_state_dict = {}
te_state_dict = {}
te2_state_dict = {}
network_alphas = {}
# every down weight has a corresponding up weight and potentially an alpha weight
lora_keys = [k for k in state_dict.keys() if k.endswith("lora_down.weight")]
for key in lora_keys:
lora_name = key.split(".")[0]
lora_name_up = lora_name + ".lora_up.weight"
lora_name_alpha = lora_name + ".alpha"
if lora_name.startswith("lora_unet_"):
diffusers_name = key.replace("lora_unet_", "").replace("_", ".")
if "input.blocks" in diffusers_name:
diffusers_name = diffusers_name.replace("input.blocks", "down_blocks")
else:
diffusers_name = diffusers_name.replace("down.blocks", "down_blocks")
if "middle.block" in diffusers_name:
diffusers_name = diffusers_name.replace("middle.block", "mid_block")
else:
diffusers_name = diffusers_name.replace("mid.block", "mid_block")
if "output.blocks" in diffusers_name:
diffusers_name = diffusers_name.replace("output.blocks", "up_blocks")
else:
diffusers_name = diffusers_name.replace("up.blocks", "up_blocks")
diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks")
diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora")
diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora")
diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora")
diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora")
diffusers_name = diffusers_name.replace("proj.in", "proj_in")
diffusers_name = diffusers_name.replace("proj.out", "proj_out")
diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj")
# SDXL specificity.
if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name:
pattern = r"\.\d+(?=\D*$)"
diffusers_name = re.sub(pattern, "", diffusers_name, count=1)
if ".in." in diffusers_name:
diffusers_name = diffusers_name.replace("in.layers.2", "conv1")
if ".out." in diffusers_name:
diffusers_name = diffusers_name.replace("out.layers.3", "conv2")
if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name:
diffusers_name = diffusers_name.replace("op", "conv")
if "skip" in diffusers_name:
diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut")
# LyCORIS specificity.
if "time.emb.proj" in diffusers_name:
diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj")
if "conv.shortcut" in diffusers_name:
diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut")
# General coverage.
if "transformer_blocks" in diffusers_name:
if "attn1" in diffusers_name or "attn2" in diffusers_name:
diffusers_name = diffusers_name.replace("attn1", "attn1.processor")
diffusers_name = diffusers_name.replace("attn2", "attn2.processor")
unet_state_dict[diffusers_name] = state_dict.pop(key)
unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
elif "ff" in diffusers_name:
unet_state_dict[diffusers_name] = state_dict.pop(key)
unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
elif any(key in diffusers_name for key in ("proj_in", "proj_out")):
unet_state_dict[diffusers_name] = state_dict.pop(key)
unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
else:
unet_state_dict[diffusers_name] = state_dict.pop(key)
unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
elif lora_name.startswith("lora_te_"):
diffusers_name = key.replace("lora_te_", "").replace("_", ".")
diffusers_name = diffusers_name.replace("text.model", "text_model")
diffusers_name = diffusers_name.replace("self.attn", "self_attn")
diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
if "self_attn" in diffusers_name:
te_state_dict[diffusers_name] = state_dict.pop(key)
te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
elif "mlp" in diffusers_name:
# Be aware that this is the new diffusers convention and the rest of the code might
# not utilize it yet.
diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
te_state_dict[diffusers_name] = state_dict.pop(key)
te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
# (sayakpaul): Duplicate code. Needs to be cleaned.
elif lora_name.startswith("lora_te1_"):
diffusers_name = key.replace("lora_te1_", "").replace("_", ".")
diffusers_name = diffusers_name.replace("text.model", "text_model")
diffusers_name = diffusers_name.replace("self.attn", "self_attn")
diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
if "self_attn" in diffusers_name:
te_state_dict[diffusers_name] = state_dict.pop(key)
te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
elif "mlp" in diffusers_name:
# Be aware that this is the new diffusers convention and the rest of the code might
# not utilize it yet.
diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
te_state_dict[diffusers_name] = state_dict.pop(key)
te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
# (sayakpaul): Duplicate code. Needs to be cleaned.
elif lora_name.startswith("lora_te2_"):
diffusers_name = key.replace("lora_te2_", "").replace("_", ".")
diffusers_name = diffusers_name.replace("text.model", "text_model")
diffusers_name = diffusers_name.replace("self.attn", "self_attn")
diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
if "self_attn" in diffusers_name:
te2_state_dict[diffusers_name] = state_dict.pop(key)
te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
elif "mlp" in diffusers_name:
# Be aware that this is the new diffusers convention and the rest of the code might
# not utilize it yet.
diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
te2_state_dict[diffusers_name] = state_dict.pop(key)
te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
# Rename the alphas so that they can be mapped appropriately.
if lora_name_alpha in state_dict:
alpha = state_dict.pop(lora_name_alpha).item()
if lora_name_alpha.startswith("lora_unet_"):
prefix = "unet."
elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")):
prefix = "text_encoder."
else:
prefix = "text_encoder_2."
new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha"
network_alphas.update({new_name: alpha})
if len(state_dict) > 0:
raise ValueError(f"The following keys have not been correctly be renamed: \n\n {', '.join(state_dict.keys())}")
logger.info("Kohya-style checkpoint detected.")
unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()}
te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()}
te2_state_dict = (
{f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()}
if len(te2_state_dict) > 0
else None
)
if te2_state_dict is not None:
te_state_dict.update(te2_state_dict)
new_state_dict = {**unet_state_dict, **te_state_dict}
return new_state_dict, network_alphas
| diffusers/src/diffusers/loaders/lora_conversion_utils.py/0 | {
"file_path": "diffusers/src/diffusers/loaders/lora_conversion_utils.py",
"repo_id": "diffusers",
"token_count": 6713
} | 111 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...loaders import FromOriginalVAEMixin
from ...utils.accelerate_utils import apply_forward_hook
from ..attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
Attention,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
)
from ..modeling_outputs import AutoencoderKLOutput
from ..modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalVAEMixin):
r"""
A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
out_channels (int, *optional*, defaults to 3): Number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
Tuple of downsample block types.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
Tuple of upsample block types.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
Tuple of block output channels.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
sample_size (`int`, *optional*, defaults to `32`): Sample input size.
scaling_factor (`float`, *optional*, defaults to 0.18215):
The component-wise standard deviation of the trained latent space computed using the first batch of the
training set. This is used to scale the latent space to have unit variance when training the diffusion
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
force_upcast (`bool`, *optional*, default to `True`):
If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
can be fine-tuned / trained to a lower range without loosing too much precision in which case
`force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 3,
out_channels: int = 3,
down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
block_out_channels: Tuple[int] = (64,),
layers_per_block: int = 1,
act_fn: str = "silu",
latent_channels: int = 4,
norm_num_groups: int = 32,
sample_size: int = 32,
scaling_factor: float = 0.18215,
force_upcast: float = True,
):
super().__init__()
# pass init params to Encoder
self.encoder = Encoder(
in_channels=in_channels,
out_channels=latent_channels,
down_block_types=down_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
act_fn=act_fn,
norm_num_groups=norm_num_groups,
double_z=True,
)
# pass init params to Decoder
self.decoder = Decoder(
in_channels=latent_channels,
out_channels=out_channels,
up_block_types=up_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
norm_num_groups=norm_num_groups,
act_fn=act_fn,
)
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
self.use_slicing = False
self.use_tiling = False
# only relevant if vae tiling is enabled
self.tile_sample_min_size = self.config.sample_size
sample_size = (
self.config.sample_size[0]
if isinstance(self.config.sample_size, (list, tuple))
else self.config.sample_size
)
self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
self.tile_overlap_factor = 0.25
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (Encoder, Decoder)):
module.gradient_checkpointing = value
def enable_tiling(self, use_tiling: bool = True):
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.use_tiling = use_tiling
def disable_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
decoding in one step.
"""
self.enable_tiling(False)
def enable_slicing(self):
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.use_slicing = True
def disable_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
decoding in one step.
"""
self.use_slicing = False
@property
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor)
@apply_forward_hook
def encode(
self, x: torch.FloatTensor, return_dict: bool = True
) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
"""
Encode a batch of images into latents.
Args:
x (`torch.FloatTensor`): Input batch of images.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
Returns:
The latent representations of the encoded images. If `return_dict` is True, a
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(x, return_dict=return_dict)
if self.use_slicing and x.shape[0] > 1:
encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
h = torch.cat(encoded_slices)
else:
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=posterior)
def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(z, return_dict=return_dict)
z = self.post_quant_conv(z)
dec = self.decoder(z)
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
@apply_forward_hook
def decode(
self, z: torch.FloatTensor, return_dict: bool = True, generator=None
) -> Union[DecoderOutput, torch.FloatTensor]:
"""
Decode a batch of images.
Args:
z (`torch.FloatTensor`): Input batch of latent vectors.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
Returns:
[`~models.vae.DecoderOutput`] or `tuple`:
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
returned.
"""
if self.use_slicing and z.shape[0] > 1:
decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
decoded = torch.cat(decoded_slices)
else:
decoded = self._decode(z).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=decoded)
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[2], b.shape[2], blend_extent)
for y in range(blend_extent):
b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[3], b.shape[3], blend_extent)
for x in range(blend_extent):
b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
r"""Encode a batch of images using a tiled encoder.
When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
output, but they should be much less noticeable.
Args:
x (`torch.FloatTensor`): Input batch of images.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
Returns:
[`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`:
If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain
`tuple` is returned.
"""
overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
row_limit = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
rows = []
for i in range(0, x.shape[2], overlap_size):
row = []
for j in range(0, x.shape[3], overlap_size):
tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
tile = self.encoder(tile)
tile = self.quant_conv(tile)
row.append(tile)
rows.append(row)
result_rows = []
for i, row in enumerate(rows):
result_row = []
for j, tile in enumerate(row):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
if j > 0:
tile = self.blend_h(row[j - 1], tile, blend_extent)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(result_row, dim=3))
moments = torch.cat(result_rows, dim=2)
posterior = DiagonalGaussianDistribution(moments)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=posterior)
def tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
r"""
Decode a batch of images using a tiled decoder.
Args:
z (`torch.FloatTensor`): Input batch of latent vectors.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
Returns:
[`~models.vae.DecoderOutput`] or `tuple`:
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
returned.
"""
overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor)
row_limit = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
rows = []
for i in range(0, z.shape[2], overlap_size):
row = []
for j in range(0, z.shape[3], overlap_size):
tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
tile = self.post_quant_conv(tile)
decoded = self.decoder(tile)
row.append(decoded)
rows.append(row)
result_rows = []
for i, row in enumerate(rows):
result_row = []
for j, tile in enumerate(row):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
if j > 0:
tile = self.blend_h(row[j - 1], tile, blend_extent)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(result_row, dim=3))
dec = torch.cat(result_rows, dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
def forward(
self,
sample: torch.FloatTensor,
sample_posterior: bool = False,
return_dict: bool = True,
generator: Optional[torch.Generator] = None,
) -> Union[DecoderOutput, torch.FloatTensor]:
r"""
Args:
sample (`torch.FloatTensor`): Input sample.
sample_posterior (`bool`, *optional*, defaults to `False`):
Whether to sample from the posterior.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
"""
x = sample
posterior = self.encode(x).latent_dist
if sample_posterior:
z = posterior.sample(generator=generator)
else:
z = posterior.mode()
dec = self.decode(z).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True)
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors)
| diffusers/src/diffusers/models/autoencoders/autoencoder_kl.py/0 | {
"file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl.py",
"repo_id": "diffusers",
"token_count": 9068
} | 112 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import itertools
import os
import re
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, List, Optional, Tuple, Union
import safetensors
import torch
from huggingface_hub import create_repo
from huggingface_hub.utils import validate_hf_hub_args
from torch import Tensor, nn
from .. import __version__
from ..utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
_add_variant,
_get_model_file,
deprecate,
is_accelerate_available,
is_torch_version,
logging,
)
from ..utils.hub_utils import PushToHubMixin, load_or_create_model_card, populate_model_card
logger = logging.get_logger(__name__)
if is_torch_version(">=", "1.9.0"):
_LOW_CPU_MEM_USAGE_DEFAULT = True
else:
_LOW_CPU_MEM_USAGE_DEFAULT = False
if is_accelerate_available():
import accelerate
from accelerate.utils import set_module_tensor_to_device
from accelerate.utils.versions import is_torch_version
def get_parameter_device(parameter: torch.nn.Module) -> torch.device:
try:
parameters_and_buffers = itertools.chain(parameter.parameters(), parameter.buffers())
return next(parameters_and_buffers).device
except StopIteration:
# For torch.nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
def get_parameter_dtype(parameter: torch.nn.Module) -> torch.dtype:
try:
params = tuple(parameter.parameters())
if len(params) > 0:
return params[0].dtype
buffers = tuple(parameter.buffers())
if len(buffers) > 0:
return buffers[0].dtype
except StopIteration:
# For torch.nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str] = None):
"""
Reads a checkpoint file, returning properly formatted errors if they arise.
"""
try:
file_extension = os.path.basename(checkpoint_file).split(".")[-1]
if file_extension == SAFETENSORS_FILE_EXTENSION:
return safetensors.torch.load_file(checkpoint_file, device="cpu")
else:
return torch.load(checkpoint_file, map_location="cpu")
except Exception as e:
try:
with open(checkpoint_file) as f:
if f.read().startswith("version"):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please install "
"git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
"you cloned."
)
else:
raise ValueError(
f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained "
"model. Make sure you have saved the model properly."
) from e
except (UnicodeDecodeError, ValueError):
raise OSError(
f"Unable to load weights from checkpoint file for '{checkpoint_file}' "
f"at '{checkpoint_file}'. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True."
)
def load_model_dict_into_meta(
model,
state_dict: OrderedDict,
device: Optional[Union[str, torch.device]] = None,
dtype: Optional[Union[str, torch.dtype]] = None,
model_name_or_path: Optional[str] = None,
) -> List[str]:
device = device or torch.device("cpu")
dtype = dtype or torch.float32
accepts_dtype = "dtype" in set(inspect.signature(set_module_tensor_to_device).parameters.keys())
unexpected_keys = []
empty_state_dict = model.state_dict()
for param_name, param in state_dict.items():
if param_name not in empty_state_dict:
unexpected_keys.append(param_name)
continue
if empty_state_dict[param_name].shape != param.shape:
model_name_or_path_str = f"{model_name_or_path} " if model_name_or_path is not None else ""
raise ValueError(
f"Cannot load {model_name_or_path_str}because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example."
)
if accepts_dtype:
set_module_tensor_to_device(model, param_name, device, value=param, dtype=dtype)
else:
set_module_tensor_to_device(model, param_name, device, value=param)
return unexpected_keys
def _load_state_dict_into_model(model_to_load, state_dict: OrderedDict) -> List[str]:
# Convert old format to new format if needed from a PyTorch state_dict
# copy state_dict so _load_from_state_dict can modify it
state_dict = state_dict.copy()
error_msgs = []
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: torch.nn.Module, prefix: str = ""):
args = (state_dict, prefix, {}, True, [], [], error_msgs)
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(model_to_load)
return error_msgs
class ModelMixin(torch.nn.Module, PushToHubMixin):
r"""
Base class for all models.
[`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and
saving models.
- **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`].
"""
config_name = CONFIG_NAME
_automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"]
_supports_gradient_checkpointing = False
_keys_to_ignore_on_load_unexpected = None
def __init__(self):
super().__init__()
def __getattr__(self, name: str) -> Any:
"""The only reason we overwrite `getattr` here is to gracefully deprecate accessing
config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite
__getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__':
https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module
"""
is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name)
is_attribute = name in self.__dict__
if is_in_config and not is_attribute:
deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'."
deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False, stacklevel=3)
return self._internal_dict[name]
# call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module
return super().__getattr__(name)
@property
def is_gradient_checkpointing(self) -> bool:
"""
Whether gradient checkpointing is activated for this model or not.
"""
return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules())
def enable_gradient_checkpointing(self) -> None:
"""
Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or
*checkpoint activations* in other frameworks).
"""
if not self._supports_gradient_checkpointing:
raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
self.apply(partial(self._set_gradient_checkpointing, value=True))
def disable_gradient_checkpointing(self) -> None:
"""
Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or
*checkpoint activations* in other frameworks).
"""
if self._supports_gradient_checkpointing:
self.apply(partial(self._set_gradient_checkpointing, value=False))
def set_use_memory_efficient_attention_xformers(
self, valid: bool, attention_op: Optional[Callable] = None
) -> None:
# Recursively walk through all the children.
# Any children which exposes the set_use_memory_efficient_attention_xformers method
# gets the message
def fn_recursive_set_mem_eff(module: torch.nn.Module):
if hasattr(module, "set_use_memory_efficient_attention_xformers"):
module.set_use_memory_efficient_attention_xformers(valid, attention_op)
for child in module.children():
fn_recursive_set_mem_eff(child)
for module in self.children():
if isinstance(module, torch.nn.Module):
fn_recursive_set_mem_eff(module)
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None) -> None:
r"""
Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).
When this option is enabled, you should observe lower GPU memory usage and a potential speed up during
inference. Speed up during training is not guaranteed.
<Tip warning={true}>
⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes
precedent.
</Tip>
Parameters:
attention_op (`Callable`, *optional*):
Override the default `None` operator for use as `op` argument to the
[`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)
function of xFormers.
Examples:
```py
>>> import torch
>>> from diffusers import UNet2DConditionModel
>>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
>>> model = UNet2DConditionModel.from_pretrained(
... "stabilityai/stable-diffusion-2-1", subfolder="unet", torch_dtype=torch.float16
... )
>>> model = model.to("cuda")
>>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)
```
"""
self.set_use_memory_efficient_attention_xformers(True, attention_op)
def disable_xformers_memory_efficient_attention(self) -> None:
r"""
Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).
"""
self.set_use_memory_efficient_attention_xformers(False)
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
is_main_process: bool = True,
save_function: Optional[Callable] = None,
safe_serialization: bool = True,
variant: Optional[str] = None,
push_to_hub: bool = False,
**kwargs,
):
"""
Save a model and its configuration file to a directory so that it can be reloaded using the
[`~models.ModelMixin.from_pretrained`] class method.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to save a model and its configuration file to. Will be created if it doesn't exist.
is_main_process (`bool`, *optional*, defaults to `True`):
Whether the process calling this is the main process or not. Useful during distributed training and you
need to call this function on all processes. In this case, set `is_main_process=True` only on the main
process to avoid race conditions.
save_function (`Callable`):
The function to use to save the state dictionary. Useful during distributed training when you need to
replace `torch.save` with another method. Can be configured with the environment variable
`DIFFUSERS_SAVE_MODE`.
safe_serialization (`bool`, *optional*, defaults to `True`):
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
variant (`str`, *optional*):
If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
private = kwargs.pop("private", False)
create_pr = kwargs.pop("create_pr", False)
token = kwargs.pop("token", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
# Only save the model itself if we are using distributed training
model_to_save = self
# Attach architecture to the config
# Save the config
if is_main_process:
model_to_save.save_config(save_directory)
# Save the model
state_dict = model_to_save.state_dict()
weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
weights_name = _add_variant(weights_name, variant)
# Save the model
if safe_serialization:
safetensors.torch.save_file(
state_dict, os.path.join(save_directory, weights_name), metadata={"format": "pt"}
)
else:
torch.save(state_dict, os.path.join(save_directory, weights_name))
logger.info(f"Model weights saved in {os.path.join(save_directory, weights_name)}")
if push_to_hub:
# Create a new empty model card and eventually tag it
model_card = load_or_create_model_card(repo_id, token=token)
model_card = populate_model_card(model_card)
model_card.save(os.path.join(save_directory, "README.md"))
self._upload_folder(
save_directory,
repo_id,
token=token,
commit_message=commit_message,
create_pr=create_pr,
)
@classmethod
@validate_hf_hub_args
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
r"""
Instantiate a pretrained PyTorch model from a pretrained model configuration.
The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To
train the model, set it back in training mode with `model.train()`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`~ModelMixin.save_pretrained`].
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.
torch_dtype (`str` or `torch.dtype`, *optional*):
Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
dtype is automatically derived from the model's weights.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
incompletely downloaded files are deleted.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info (`bool`, *optional*, defaults to `False`):
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
from_flax (`bool`, *optional*, defaults to `False`):
Load the model weights from a Flax checkpoint save file.
subfolder (`str`, *optional*, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
mirror (`str`, *optional*):
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
information.
device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be defined for each
parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
same device.
Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
max_memory (`Dict`, *optional*):
A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
each GPU and the available CPU RAM if unset.
offload_folder (`str` or `os.PathLike`, *optional*):
The path to offload weights if `device_map` contains the value `"disk"`.
offload_state_dict (`bool`, *optional*):
If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`
when there is some disk offload.
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
argument to `True` will raise an error.
variant (`str`, *optional*):
Load weights from a specified `variant` filename such as `"fp16"` or `"ema"`. This is ignored when
loading `from_flax`.
use_safetensors (`bool`, *optional*, defaults to `None`):
If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the
`safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors`
weights. If set to `False`, `safetensors` weights are not loaded.
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment.
</Tip>
Example:
```py
from diffusers import UNet2DConditionModel
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet")
```
If you get the error message below, you need to finetune the weights for your downstream task:
```bash
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
```
"""
cache_dir = kwargs.pop("cache_dir", None)
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
force_download = kwargs.pop("force_download", False)
from_flax = kwargs.pop("from_flax", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", None)
token = kwargs.pop("token", None)
revision = kwargs.pop("revision", None)
torch_dtype = kwargs.pop("torch_dtype", None)
subfolder = kwargs.pop("subfolder", None)
device_map = kwargs.pop("device_map", None)
max_memory = kwargs.pop("max_memory", None)
offload_folder = kwargs.pop("offload_folder", None)
offload_state_dict = kwargs.pop("offload_state_dict", False)
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
variant = kwargs.pop("variant", None)
use_safetensors = kwargs.pop("use_safetensors", None)
allow_pickle = False
if use_safetensors is None:
use_safetensors = True
allow_pickle = True
if low_cpu_mem_usage and not is_accelerate_available():
low_cpu_mem_usage = False
logger.warning(
"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
" `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
" install accelerate\n```\n."
)
if device_map is not None and not is_accelerate_available():
raise NotImplementedError(
"Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set"
" `device_map=None`. You can install accelerate with `pip install accelerate`."
)
# Check if we can handle device_map and dispatching the weights
if device_map is not None and not is_torch_version(">=", "1.9.0"):
raise NotImplementedError(
"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set"
" `device_map=None`."
)
if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
raise NotImplementedError(
"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
" `low_cpu_mem_usage=False`."
)
if low_cpu_mem_usage is False and device_map is not None:
raise ValueError(
f"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and"
" dispatching. Please make sure to set `low_cpu_mem_usage=True`."
)
# Load config if we don't provide a configuration
config_path = pretrained_model_name_or_path
user_agent = {
"diffusers": __version__,
"file_type": "model",
"framework": "pytorch",
}
# load config
config, unused_kwargs, commit_hash = cls.load_config(
config_path,
cache_dir=cache_dir,
return_unused_kwargs=True,
return_commit_hash=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
subfolder=subfolder,
device_map=device_map,
max_memory=max_memory,
offload_folder=offload_folder,
offload_state_dict=offload_state_dict,
user_agent=user_agent,
**kwargs,
)
# load model
model_file = None
if from_flax:
model_file = _get_model_file(
pretrained_model_name_or_path,
weights_name=FLAX_WEIGHTS_NAME,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
subfolder=subfolder,
user_agent=user_agent,
commit_hash=commit_hash,
)
model = cls.from_config(config, **unused_kwargs)
# Convert the weights
from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model
model = load_flax_checkpoint_in_pytorch_model(model, model_file)
else:
if use_safetensors:
try:
model_file = _get_model_file(
pretrained_model_name_or_path,
weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant),
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
subfolder=subfolder,
user_agent=user_agent,
commit_hash=commit_hash,
)
except IOError as e:
if not allow_pickle:
raise e
pass
if model_file is None:
model_file = _get_model_file(
pretrained_model_name_or_path,
weights_name=_add_variant(WEIGHTS_NAME, variant),
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
subfolder=subfolder,
user_agent=user_agent,
commit_hash=commit_hash,
)
if low_cpu_mem_usage:
# Instantiate model with empty weights
with accelerate.init_empty_weights():
model = cls.from_config(config, **unused_kwargs)
# if device_map is None, load the state dict and move the params from meta device to the cpu
if device_map is None:
param_device = "cpu"
state_dict = load_state_dict(model_file, variant=variant)
model._convert_deprecated_attention_blocks(state_dict)
# move the params from meta device to cpu
missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
if len(missing_keys) > 0:
raise ValueError(
f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are"
f" missing: \n {', '.join(missing_keys)}. \n Please make sure to pass"
" `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize"
" those weights or else make sure your checkpoint file is correct."
)
unexpected_keys = load_model_dict_into_meta(
model,
state_dict,
device=param_device,
dtype=torch_dtype,
model_name_or_path=pretrained_model_name_or_path,
)
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warn(
f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
)
else: # else let accelerate handle loading and dispatching.
# Load weights and dispatch according to the device_map
# by default the device_map is None and the weights are loaded on the CPU
try:
accelerate.load_checkpoint_and_dispatch(
model,
model_file,
device_map,
max_memory=max_memory,
offload_folder=offload_folder,
offload_state_dict=offload_state_dict,
dtype=torch_dtype,
)
except AttributeError as e:
# When using accelerate loading, we do not have the ability to load the state
# dict and rename the weight names manually. Additionally, accelerate skips
# torch loading conventions and directly writes into `module.{_buffers, _parameters}`
# (which look like they should be private variables?), so we can't use the standard hooks
# to rename parameters on load. We need to mimic the original weight names so the correct
# attributes are available. After we have loaded the weights, we convert the deprecated
# names to the new non-deprecated names. Then we _greatly encourage_ the user to convert
# the weights so we don't have to do this again.
if "'Attention' object has no attribute" in str(e):
logger.warn(
f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}"
" was saved with deprecated attention block weight names. We will load it with the deprecated attention block"
" names and convert them on the fly to the new attention block format. Please re-save the model after this conversion,"
" so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint,"
" please also re-upload it or open a PR on the original repository."
)
model._temp_convert_self_to_deprecated_attention_blocks()
accelerate.load_checkpoint_and_dispatch(
model,
model_file,
device_map,
max_memory=max_memory,
offload_folder=offload_folder,
offload_state_dict=offload_state_dict,
dtype=torch_dtype,
)
model._undo_temp_convert_self_to_deprecated_attention_blocks()
else:
raise e
loading_info = {
"missing_keys": [],
"unexpected_keys": [],
"mismatched_keys": [],
"error_msgs": [],
}
else:
model = cls.from_config(config, **unused_kwargs)
state_dict = load_state_dict(model_file, variant=variant)
model._convert_deprecated_attention_blocks(state_dict)
model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(
model,
state_dict,
model_file,
pretrained_model_name_or_path,
ignore_mismatched_sizes=ignore_mismatched_sizes,
)
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
"error_msgs": error_msgs,
}
if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype):
raise ValueError(
f"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}."
)
elif torch_dtype is not None:
model = model.to(torch_dtype)
model.register_to_config(_name_or_path=pretrained_model_name_or_path)
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
return model, loading_info
return model
@classmethod
def _load_pretrained_model(
cls,
model,
state_dict: OrderedDict,
resolved_archive_file,
pretrained_model_name_or_path: Union[str, os.PathLike],
ignore_mismatched_sizes: bool = False,
):
# Retrieve missing & unexpected_keys
model_state_dict = model.state_dict()
loaded_keys = list(state_dict.keys())
expected_keys = list(model_state_dict.keys())
original_loaded_keys = loaded_keys
missing_keys = list(set(expected_keys) - set(loaded_keys))
unexpected_keys = list(set(loaded_keys) - set(expected_keys))
# Make sure we are able to load base models as well as derived models (with heads)
model_to_load = model
def _find_mismatched_keys(
state_dict,
model_state_dict,
loaded_keys,
ignore_mismatched_sizes,
):
mismatched_keys = []
if ignore_mismatched_sizes:
for checkpoint_key in loaded_keys:
model_key = checkpoint_key
if (
model_key in model_state_dict
and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape
):
mismatched_keys.append(
(checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)
)
del state_dict[checkpoint_key]
return mismatched_keys
if state_dict is not None:
# Whole checkpoint
mismatched_keys = _find_mismatched_keys(
state_dict,
model_state_dict,
original_loaded_keys,
ignore_mismatched_sizes,
)
error_msgs = _load_state_dict_into_model(model_to_load, state_dict)
if len(error_msgs) > 0:
error_msg = "\n\t".join(error_msgs)
if "size mismatch" in error_msg:
error_msg += (
"\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method."
)
raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task"
" or with another architecture (e.g. initializing a BertForSequenceClassification model from a"
" BertForPreTraining model).\n- This IS NOT expected if you are initializing"
f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly"
" identical (initializing a BertForSequenceClassification model from a"
" BertForSequenceClassification model)."
)
else:
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
" TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
elif len(mismatched_keys) == 0:
logger.info(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the"
f" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions"
" without further training."
)
if len(mismatched_keys) > 0:
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not"
f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be"
" able to use it for predictions and inference."
)
return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs
@property
def device(self) -> torch.device:
"""
`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
return get_parameter_device(self)
@property
def dtype(self) -> torch.dtype:
"""
`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
return get_parameter_dtype(self)
def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
"""
Get number of (trainable or non-embedding) parameters in the module.
Args:
only_trainable (`bool`, *optional*, defaults to `False`):
Whether or not to return only the number of trainable parameters.
exclude_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to return only the number of non-embedding parameters.
Returns:
`int`: The number of parameters.
Example:
```py
from diffusers import UNet2DConditionModel
model_id = "runwayml/stable-diffusion-v1-5"
unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet")
unet.num_parameters(only_trainable=True)
859520964
```
"""
if exclude_embeddings:
embedding_param_names = [
f"{name}.weight"
for name, module_type in self.named_modules()
if isinstance(module_type, torch.nn.Embedding)
]
non_embedding_parameters = [
parameter for name, parameter in self.named_parameters() if name not in embedding_param_names
]
return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)
else:
return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)
def _convert_deprecated_attention_blocks(self, state_dict: OrderedDict) -> None:
deprecated_attention_block_paths = []
def recursive_find_attn_block(name, module):
if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block:
deprecated_attention_block_paths.append(name)
for sub_name, sub_module in module.named_children():
sub_name = sub_name if name == "" else f"{name}.{sub_name}"
recursive_find_attn_block(sub_name, sub_module)
recursive_find_attn_block("", self)
# NOTE: we have to check if the deprecated parameters are in the state dict
# because it is possible we are loading from a state dict that was already
# converted
for path in deprecated_attention_block_paths:
# group_norm path stays the same
# query -> to_q
if f"{path}.query.weight" in state_dict:
state_dict[f"{path}.to_q.weight"] = state_dict.pop(f"{path}.query.weight")
if f"{path}.query.bias" in state_dict:
state_dict[f"{path}.to_q.bias"] = state_dict.pop(f"{path}.query.bias")
# key -> to_k
if f"{path}.key.weight" in state_dict:
state_dict[f"{path}.to_k.weight"] = state_dict.pop(f"{path}.key.weight")
if f"{path}.key.bias" in state_dict:
state_dict[f"{path}.to_k.bias"] = state_dict.pop(f"{path}.key.bias")
# value -> to_v
if f"{path}.value.weight" in state_dict:
state_dict[f"{path}.to_v.weight"] = state_dict.pop(f"{path}.value.weight")
if f"{path}.value.bias" in state_dict:
state_dict[f"{path}.to_v.bias"] = state_dict.pop(f"{path}.value.bias")
# proj_attn -> to_out.0
if f"{path}.proj_attn.weight" in state_dict:
state_dict[f"{path}.to_out.0.weight"] = state_dict.pop(f"{path}.proj_attn.weight")
if f"{path}.proj_attn.bias" in state_dict:
state_dict[f"{path}.to_out.0.bias"] = state_dict.pop(f"{path}.proj_attn.bias")
def _temp_convert_self_to_deprecated_attention_blocks(self) -> None:
deprecated_attention_block_modules = []
def recursive_find_attn_block(module):
if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block:
deprecated_attention_block_modules.append(module)
for sub_module in module.children():
recursive_find_attn_block(sub_module)
recursive_find_attn_block(self)
for module in deprecated_attention_block_modules:
module.query = module.to_q
module.key = module.to_k
module.value = module.to_v
module.proj_attn = module.to_out[0]
# We don't _have_ to delete the old attributes, but it's helpful to ensure
# that _all_ the weights are loaded into the new attributes and we're not
# making an incorrect assumption that this model should be converted when
# it really shouldn't be.
del module.to_q
del module.to_k
del module.to_v
del module.to_out
def _undo_temp_convert_self_to_deprecated_attention_blocks(self) -> None:
deprecated_attention_block_modules = []
def recursive_find_attn_block(module) -> None:
if hasattr(module, "_from_deprecated_attn_block") and module._from_deprecated_attn_block:
deprecated_attention_block_modules.append(module)
for sub_module in module.children():
recursive_find_attn_block(sub_module)
recursive_find_attn_block(self)
for module in deprecated_attention_block_modules:
module.to_q = module.query
module.to_k = module.key
module.to_v = module.value
module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)])
del module.query
del module.key
del module.value
del module.proj_attn
| diffusers/src/diffusers/models/modeling_utils.py/0 | {
"file_path": "diffusers/src/diffusers/models/modeling_utils.py",
"repo_id": "diffusers",
"token_count": 21784
} | 113 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import deprecate
from .unets.unet_2d import UNet2DModel, UNet2DOutput
class UNet2DOutput(UNet2DOutput):
deprecation_message = "Importing `UNet2DOutput` from `diffusers.models.unet_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d import UNet2DOutput`, instead."
deprecate("UNet2DOutput", "0.29", deprecation_message)
class UNet2DModel(UNet2DModel):
deprecation_message = "Importing `UNet2DModel` from `diffusers.models.unet_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d import UNet2DModel`, instead."
deprecate("UNet2DModel", "0.29", deprecation_message)
| diffusers/src/diffusers/models/unet_2d.py/0 | {
"file_path": "diffusers/src/diffusers/models/unet_2d.py",
"repo_id": "diffusers",
"token_count": 411
} | 114 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.