text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# Using the hub Install the [`hf-hub`](https://github.com/huggingface/hf-hub) crate: ```bash cargo add hf-hub ``` Then let's start by downloading the [model file](https://huggingface.co/bert-base-uncased/tree/main). ```rust # extern crate candle_core; # extern crate hf_hub; use hf_hub::api::sync::Api; use candle_core::Device; let api = Api::new().unwrap(); let repo = api.model("bert-base-uncased".to_string()); let weights = repo.get("model.safetensors").unwrap(); let weights = candle_core::safetensors::load(weights, &Device::Cpu); ``` We now have access to all the [tensors](https://huggingface.co/bert-base-uncased?show_tensors=true) within the file. You can check all the names of the tensors [here](https://huggingface.co/bert-base-uncased?show_tensors=true) ## Using async `hf-hub` comes with an async API. ```bash cargo add hf-hub --features tokio ``` ```rust,ignore # This is tested directly in examples crate because it needs external dependencies unfortunately: # See [this](https://github.com/rust-lang/mdBook/issues/706) {{#include ../lib.rs:book_hub_1}} ``` ## Using in a real model. Now that we have our weights, we can use them in our bert architecture: ```rust # extern crate candle_core; # extern crate candle_nn; # extern crate hf_hub; # use hf_hub::api::sync::Api; # # let api = Api::new().unwrap(); # let repo = api.model("bert-base-uncased".to_string()); # # let weights = repo.get("model.safetensors").unwrap(); use candle_core::{Device, Tensor, DType}; use candle_nn::{Linear, Module}; let weights = candle_core::safetensors::load(weights, &Device::Cpu).unwrap(); let weight = weights.get("bert.encoder.layer.0.attention.self.query.weight").unwrap(); let bias = weights.get("bert.encoder.layer.0.attention.self.query.bias").unwrap(); let linear = Linear::new(weight.clone(), Some(bias.clone())); let input_ids = Tensor::zeros((3, 768), DType::F32, &Device::Cpu).unwrap(); let output = linear.forward(&input_ids).unwrap(); ``` For a full reference, you can check out the full [bert](https://github.com/LaurentMazare/candle/tree/main/candle-examples/examples/bert) example. ## Memory mapping For more efficient loading, instead of reading the file, you could use [`memmap2`](https://docs.rs/memmap2/latest/memmap2/) **Note**: Be careful about memory mapping it seems to cause issues on [Windows, WSL](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/5893) and will definitely be slower on network mounted disk, because it will issue more read calls. ```rust,ignore {{#include ../lib.rs:book_hub_2}} ``` **Note**: This operation is **unsafe**. [See the safety notice](https://docs.rs/memmap2/latest/memmap2/struct.Mmap.html#safety). In practice model files should never be modified, and the mmaps should be mostly READONLY anyway, so the caveat most likely does not apply, but always keep it in mind. ## Tensor Parallel Sharding When using multiple GPUs to use in Tensor Parallel in order to get good latency, you can load only the part of the Tensor you need. For that you need to use [`safetensors`](https://crates.io/crates/safetensors) directly. ```bash cargo add safetensors ``` ```rust,ignore {{#include ../lib.rs:book_hub_3}} ```
candle/candle-book/src/inference/hub.md/0
{ "file_path": "candle/candle-book/src/inference/hub.md", "repo_id": "candle", "token_count": 1098 }
11
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn rand_uniform(a: &Tensor) { a.rand_like(-1.0, 123.0).unwrap(); } fn rand_normal(a: &Tensor) { a.randn_like(100.0, 15.0).unwrap(); } fn run_random_bench(c: &mut Criterion, device: &Device) { let b = 1; let rows = 2048; let cols = 2048; let dtype = DType::F32; let tensor = Tensor::zeros((b, rows, cols), dtype, device).unwrap(); let flops = b * rows * cols * dtype.size_in_bytes(); let mut group = c.benchmark_group(device.bench_name("random_uniform")); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |benches| { benches.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { rand_uniform(black_box(&tensor)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); let tensor = Tensor::zeros((b, rows, cols), dtype, device).unwrap(); let mut group = c.benchmark_group(device.bench_name("random_normal")); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |benches| { benches.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { rand_normal(black_box(&tensor)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { run_random_bench(c, &device); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/random.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/random.rs", "repo_id": "candle", "token_count": 812 }
12
use super::Cpu; use core::arch::wasm32::*; pub struct CurrentCpu {} const STEP: usize = 16; const EPR: usize = 4; const ARR: usize = STEP / EPR; impl Cpu<ARR> for CurrentCpu { type Unit = v128; type Array = [v128; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { f32x4_splat(0.0) } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { f32x4_splat(v) } unsafe fn load(mem_addr: *const f32) -> Self::Unit { v128_load(mem_addr as *mut v128) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { f32x4_add(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { f32x4_add(f32x4_mul(b, c), a) } unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) { v128_store(mem_addr as *mut v128, a); } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { for i in 0..ARR / 2 { x[2 * i] = f32x4_add(x[2 * i], x[2 * i + 1]); } for i in 0..ARR / 4 { x[4 * i] = f32x4_add(x[4 * i], x[4 * i + 2]); } for i in 0..ARR / 8 { x[8 * i] = f32x4_add(x[8 * i], x[8 * i + 4]); } *y = f32x4_extract_lane::<0>(x[0]) + f32x4_extract_lane::<1>(x[0]) + f32x4_extract_lane::<2>(x[0]) + f32x4_extract_lane::<3>(x[0]); } }
candle/candle-core/src/cpu/simd128.rs/0
{ "file_path": "candle/candle-core/src/cpu/simd128.rs", "repo_id": "candle", "token_count": 839 }
13
#![allow(clippy::redundant_closure_call)] use crate::{CpuStorage, CudaStorage, Layout, MetalStorage, Result, Shape, Tensor}; use half::{bf16, f16}; use num_traits::float::Float; #[derive(Clone, Copy, PartialEq, Eq)] pub enum CmpOp { Eq, Ne, Le, Ge, Lt, Gt, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ReduceOp { Sum, Min, Max, ArgMin, ArgMax, } impl ReduceOp { pub(crate) fn name(&self) -> &'static str { match self { Self::ArgMax => "argmax", Self::ArgMin => "argmin", Self::Min => "min", Self::Max => "max", Self::Sum => "sum", } } } // These ops return the same type as their input type. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BinaryOp { Add, Mul, Sub, Div, Maximum, Minimum, } // Unary ops with no argument #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UnaryOp { Exp, Log, Sin, Cos, Abs, Neg, Recip, Sqr, Sqrt, Gelu, GeluErf, Erf, Relu, Tanh, Floor, Ceil, Round, } #[derive(Clone)] pub enum Op { Binary(Tensor, Tensor, BinaryOp), Unary(Tensor, UnaryOp), Cmp(Tensor, CmpOp), // The third argument is the reduced shape with `keepdim=true`. Reduce(Tensor, ReduceOp, Vec<usize>), Matmul(Tensor, Tensor), Gather(Tensor, Tensor, usize), ScatterAdd(Tensor, Tensor, Tensor, usize), IndexSelect(Tensor, Tensor, usize), IndexAdd(Tensor, Tensor, Tensor, usize), WhereCond(Tensor, Tensor, Tensor), #[allow(dead_code)] Conv1D { arg: Tensor, kernel: Tensor, padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] ConvTranspose1D { arg: Tensor, kernel: Tensor, padding: usize, output_padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] Conv2D { arg: Tensor, kernel: Tensor, padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] ConvTranspose2D { arg: Tensor, kernel: Tensor, padding: usize, output_padding: usize, stride: usize, dilation: usize, }, AvgPool2D { arg: Tensor, kernel_size: (usize, usize), stride: (usize, usize), }, MaxPool2D { arg: Tensor, kernel_size: (usize, usize), stride: (usize, usize), }, UpsampleNearest1D(Tensor), UpsampleNearest2D { arg: Tensor, target_h: usize, target_w: usize, }, Cat(Vec<Tensor>, usize), #[allow(dead_code)] // add is currently unused. Affine { arg: Tensor, mul: f64, add: f64, }, ToDType(Tensor), Copy(Tensor), Broadcast(Tensor), Narrow(Tensor, usize, usize, usize), SliceScatter0(Tensor, Tensor, usize), Reshape(Tensor), ToDevice(Tensor), Transpose(Tensor, usize, usize), Permute(Tensor, Vec<usize>), Elu(Tensor, f64), Powf(Tensor, f64), CustomOp1(Tensor, std::sync::Arc<Box<dyn CustomOp1 + Send + Sync>>), CustomOp2( Tensor, Tensor, std::sync::Arc<Box<dyn CustomOp2 + Send + Sync>>, ), CustomOp3( Tensor, Tensor, Tensor, std::sync::Arc<Box<dyn CustomOp3 + Send + Sync>>, ), } /// Unary ops that can be defined in user-land. pub trait CustomOp1 { // Box<dyn> does not support const yet, so use a function to get the name. fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd(&self, _storage: &CudaStorage, _layout: &Layout) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _storage: &MetalStorage, _layout: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } /// This function takes as argument the argument `arg` used in the forward pass, the result /// produced by the forward operation `res` and the gradient of the result `grad_res`. /// The function should return the gradient of the argument. fn bwd(&self, _arg: &Tensor, _res: &Tensor, _grad_res: &Tensor) -> Result<Option<Tensor>> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } pub trait CustomOp2 { fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, ) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd( &self, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, ) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } fn bwd( &self, _arg1: &Tensor, _arg2: &Tensor, _res: &Tensor, _grad_res: &Tensor, ) -> Result<(Option<Tensor>, Option<Tensor>)> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } pub trait CustomOp3 { fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd( &self, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, ) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } fn bwd( &self, _arg1: &Tensor, _arg2: &Tensor, _arg3: &Tensor, _res: &Tensor, _grad_res: &Tensor, ) -> Result<(Option<Tensor>, Option<Tensor>, Option<Tensor>)> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } pub trait UnaryOpT { const NAME: &'static str; const KERNEL: &'static str; const V: Self; fn bf16(v1: bf16) -> bf16; fn f16(v1: f16) -> f16; fn f32(v1: f32) -> f32; fn f64(v1: f64) -> f64; fn u8(v1: u8) -> u8; fn u32(v1: u32) -> u32; fn i64(v1: i64) -> i64; // There is no very good way to represent optional function in traits so we go for an explicit // boolean flag to mark the function as existing. const BF16_VEC: bool = false; fn bf16_vec(_xs: &[bf16], _ys: &mut [bf16]) {} const F16_VEC: bool = false; fn f16_vec(_xs: &[f16], _ys: &mut [f16]) {} const F32_VEC: bool = false; fn f32_vec(_xs: &[f32], _ys: &mut [f32]) {} const F64_VEC: bool = false; fn f64_vec(_xs: &[f64], _ys: &mut [f64]) {} } pub trait BinaryOpT { const NAME: &'static str; const KERNEL: &'static str; const V: Self; fn bf16(v1: bf16, v2: bf16) -> bf16; fn f16(v1: f16, v2: f16) -> f16; fn f32(v1: f32, v2: f32) -> f32; fn f64(v1: f64, v2: f64) -> f64; fn u8(v1: u8, v2: u8) -> u8; fn u32(v1: u32, v2: u32) -> u32; fn i64(v1: i64, v2: i64) -> i64; const BF16_VEC: bool = false; fn bf16_vec(_xs1: &[bf16], _xs2: &[bf16], _ys: &mut [bf16]) {} const F16_VEC: bool = false; fn f16_vec(_xs1: &[f16], _xs2: &[f16], _ys: &mut [f16]) {} const F32_VEC: bool = false; fn f32_vec(_xs1: &[f32], _xs2: &[f32], _ys: &mut [f32]) {} const F64_VEC: bool = false; fn f64_vec(_xs1: &[f64], _xs2: &[f64], _ys: &mut [f64]) {} const U8_VEC: bool = false; fn u8_vec(_xs1: &[u8], _xs2: &[u8], _ys: &mut [u8]) {} const U32_VEC: bool = false; fn u32_vec(_xs1: &[u32], _xs2: &[u32], _ys: &mut [u32]) {} const I64_VEC: bool = false; fn i64_vec(_xs1: &[i64], _xs2: &[i64], _ys: &mut [i64]) {} } pub(crate) struct Add; pub(crate) struct Div; pub(crate) struct Mul; pub(crate) struct Sub; pub(crate) struct Maximum; pub(crate) struct Minimum; pub(crate) struct Exp; pub(crate) struct Log; pub(crate) struct Sin; pub(crate) struct Cos; pub(crate) struct Abs; pub(crate) struct Neg; pub(crate) struct Recip; pub(crate) struct Sqr; pub(crate) struct Sqrt; pub(crate) struct Gelu; pub(crate) struct GeluErf; pub(crate) struct Erf; pub(crate) struct Relu; pub(crate) struct Tanh; pub(crate) struct Floor; pub(crate) struct Ceil; pub(crate) struct Round; macro_rules! bin_op { ($op:ident, $name: literal, $e: expr, $f32_vec: ident, $f64_vec: ident) => { impl BinaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("b", $name); const V: Self = $op; #[inline(always)] fn bf16(v1: bf16, v2: bf16) -> bf16 { $e(v1, v2) } #[inline(always)] fn f16(v1: f16, v2: f16) -> f16 { $e(v1, v2) } #[inline(always)] fn f32(v1: f32, v2: f32) -> f32 { $e(v1, v2) } #[inline(always)] fn f64(v1: f64, v2: f64) -> f64 { $e(v1, v2) } #[inline(always)] fn u8(v1: u8, v2: u8) -> u8 { $e(v1, v2) } #[inline(always)] fn u32(v1: u32, v2: u32) -> u32 { $e(v1, v2) } #[inline(always)] fn i64(v1: i64, v2: i64) -> i64 { $e(v1, v2) } #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs1: &[f32], xs2: &[f32], ys: &mut [f32]) { crate::mkl::$f32_vec(xs1, xs2, ys) } #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs1: &[f64], xs2: &[f64], ys: &mut [f64]) { crate::mkl::$f64_vec(xs1, xs2, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs1: &[f32], xs2: &[f32], ys: &mut [f32]) { crate::accelerate::$f32_vec(xs1, xs2, ys) } #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs1: &[f64], xs2: &[f64], ys: &mut [f64]) { crate::accelerate::$f64_vec(xs1, xs2, ys) } } }; } bin_op!(Add, "add", |v1, v2| v1 + v2, vs_add, vd_add); bin_op!(Sub, "sub", |v1, v2| v1 - v2, vs_sub, vd_sub); bin_op!(Mul, "mul", |v1, v2| v1 * v2, vs_mul, vd_mul); bin_op!(Div, "div", |v1, v2| v1 / v2, vs_div, vd_div); bin_op!( Minimum, "minimum", |v1, v2| if v1 > v2 { v2 } else { v1 }, vs_min, vd_min ); bin_op!( Maximum, "maximum", |v1, v2| if v1 < v2 { v2 } else { v1 }, vs_max, vd_max ); #[allow(clippy::redundant_closure_call)] macro_rules! unary_op { ($op: ident, $name: literal, $a: ident, $e: expr) => { impl UnaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("u", $name); const V: Self = $op; #[inline(always)] fn bf16($a: bf16) -> bf16 { $e } #[inline(always)] fn f16($a: f16) -> f16 { $e } #[inline(always)] fn f32($a: f32) -> f32 { $e } #[inline(always)] fn f64($a: f64) -> f64 { $e } #[inline(always)] fn u8(_: u8) -> u8 { todo!("no unary function for u8") } #[inline(always)] fn u32(_: u32) -> u32 { todo!("no unary function for u32") } #[inline(always)] fn i64(_: i64) -> i64 { todo!("no unary function for i64") } } }; ($op: ident, $name: literal, $a: ident, $e: expr, $f32_vec:ident, $f64_vec:ident) => { impl UnaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("u", $name); const V: Self = $op; #[inline(always)] fn bf16($a: bf16) -> bf16 { $e } #[inline(always)] fn f16($a: f16) -> f16 { $e } #[inline(always)] fn f32($a: f32) -> f32 { $e } #[inline(always)] fn f64($a: f64) -> f64 { $e } #[inline(always)] fn u8(_: u8) -> u8 { todo!("no unary function for u8") } #[inline(always)] fn u32(_: u32) -> u32 { todo!("no unary function for u32") } #[inline(always)] fn i64(_: i64) -> i64 { todo!("no unary function for i64") } #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::mkl::$f32_vec(xs, ys) } #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::mkl::$f64_vec(xs, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::accelerate::$f32_vec(xs, ys) } #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::accelerate::$f64_vec(xs, ys) } } }; } unary_op!(Exp, "exp", v, v.exp(), vs_exp, vd_exp); unary_op!(Log, "log", v, v.ln(), vs_ln, vd_ln); unary_op!(Sin, "sin", v, v.sin(), vs_sin, vd_sin); unary_op!(Cos, "cos", v, v.cos(), vs_cos, vd_cos); unary_op!(Tanh, "tanh", v, v.tanh(), vs_tanh, vd_tanh); unary_op!(Neg, "neg", v, -v); unary_op!(Recip, "recip", v, v.recip()); unary_op!(Sqr, "sqr", v, v * v, vs_sqr, vd_sqr); unary_op!(Sqrt, "sqrt", v, v.sqrt(), vs_sqrt, vd_sqrt); /// Tanh based approximation of the `gelu` operation /// GeluErf is the more precise one. /// <https://en.wikipedia.org/wiki/Activation_function#Comparison_of_activation_functions> impl UnaryOpT for Gelu { const NAME: &'static str = "gelu"; const V: Self = Gelu; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f32_const(0.5) * v * (bf16::ONE + bf16::tanh( (bf16::from_f32_const(2.0) / bf16::PI).sqrt() * v * (bf16::ONE + bf16::from_f32_const(0.044715) * v * v), )) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f32_const(0.5) * v * (f16::ONE + f16::tanh( (f16::from_f32_const(2.0) / f16::PI).sqrt() * v * (f16::ONE + f16::from_f32_const(0.044715) * v * v), )) } #[inline(always)] fn f32(v: f32) -> f32 { 0.5 * v * (1.0 + f32::tanh((2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v))) } #[inline(always)] fn f64(v: f64) -> f64 { 0.5 * v * (1.0 + f64::tanh((2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v))) } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } const KERNEL: &'static str = "ugelu"; #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::mkl::vs_gelu(xs, ys) } #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::mkl::vd_gelu(xs, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::accelerate::vs_gelu(xs, ys) } #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::accelerate::vd_gelu(xs, ys) } } /// `erf` operation /// <https://en.wikipedia.org/wiki/Error_function> impl UnaryOpT for Erf { const NAME: &'static str = "erf"; const KERNEL: &'static str = "uerf"; const V: Self = Erf; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f32(v: f32) -> f32 { Self::f64(v as f64) as f32 } #[inline(always)] fn f64(v: f64) -> f64 { crate::cpu::erf::erf(v) } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } } impl UnaryOpT for Abs { const NAME: &'static str = "abs"; const KERNEL: &'static str = "uabs"; const V: Self = Abs; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.abs() } #[inline(always)] fn f16(v: f16) -> f16 { v.abs() } #[inline(always)] fn f32(v: f32) -> f32 { v.abs() } #[inline(always)] fn f64(v: f64) -> f64 { v.abs() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v.abs() } } impl UnaryOpT for Ceil { const NAME: &'static str = "ceil"; const KERNEL: &'static str = "uceil"; const V: Self = Ceil; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.ceil() } #[inline(always)] fn f16(v: f16) -> f16 { v.ceil() } #[inline(always)] fn f32(v: f32) -> f32 { v.ceil() } #[inline(always)] fn f64(v: f64) -> f64 { v.ceil() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for Floor { const NAME: &'static str = "floor"; const KERNEL: &'static str = "ufloor"; const V: Self = Floor; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.floor() } #[inline(always)] fn f16(v: f16) -> f16 { v.floor() } #[inline(always)] fn f32(v: f32) -> f32 { v.floor() } #[inline(always)] fn f64(v: f64) -> f64 { v.floor() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for Round { const NAME: &'static str = "round"; const KERNEL: &'static str = "uround"; const V: Self = Round; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.round() } #[inline(always)] fn f16(v: f16) -> f16 { v.round() } #[inline(always)] fn f32(v: f32) -> f32 { v.round() } #[inline(always)] fn f64(v: f64) -> f64 { v.round() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for GeluErf { const NAME: &'static str = "gelu_erf"; const KERNEL: &'static str = "ugelu_erf"; const V: Self = GeluErf; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f32(v: f32) -> f32 { Self::f64(v as f64) as f32 } #[inline(always)] fn f64(v: f64) -> f64 { (crate::cpu::erf::erf(v / 2f64.sqrt()) + 1.) * 0.5 * v } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } } impl UnaryOpT for Relu { const NAME: &'static str = "relu"; const KERNEL: &'static str = "urelu"; const V: Self = Relu; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.max(bf16::ZERO) } #[inline(always)] fn f16(v: f16) -> f16 { v.max(f16::ZERO) } #[inline(always)] fn f32(v: f32) -> f32 { v.max(0f32) } #[inline(always)] fn f64(v: f64) -> f64 { v.max(0f64) } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } /// `BackpropOp` is a wrapper around `Option<Op>`. The main goal is to ensure that dependencies are /// properly checked when creating a new value #[derive(Clone)] pub struct BackpropOp(Option<Op>); impl BackpropOp { pub(crate) fn none() -> Self { BackpropOp(None) } pub(crate) fn new1(arg: &Tensor, f: impl Fn(Tensor) -> Op) -> Self { let op = if arg.track_op() { Some(f(arg.clone())) } else { None }; Self(op) } pub(crate) fn new2(arg1: &Tensor, arg2: &Tensor, f: impl Fn(Tensor, Tensor) -> Op) -> Self { let op = if arg1.track_op() || arg2.track_op() { Some(f(arg1.clone(), arg2.clone())) } else { None }; Self(op) } pub(crate) fn new3( arg1: &Tensor, arg2: &Tensor, arg3: &Tensor, f: impl Fn(Tensor, Tensor, Tensor) -> Op, ) -> Self { let op = if arg1.track_op() || arg2.track_op() || arg3.track_op() { Some(f(arg1.clone(), arg2.clone(), arg3.clone())) } else { None }; Self(op) } pub(crate) fn new<A: AsRef<Tensor>>(args: &[A], f: impl Fn(Vec<Tensor>) -> Op) -> Self { let op = if args.iter().any(|arg| arg.as_ref().track_op()) { let args: Vec<Tensor> = args.iter().map(|arg| arg.as_ref().clone()).collect(); Some(f(args)) } else { None }; Self(op) } pub(crate) fn is_none(&self) -> bool { self.0.is_none() } } impl std::ops::Deref for BackpropOp { type Target = Option<Op>; fn deref(&self) -> &Self::Target { &self.0 } }
candle/candle-core/src/op.rs/0
{ "file_path": "candle/candle-core/src/op.rs", "repo_id": "candle", "token_count": 14122 }
14
//! Tensors are N-dimensional matrixes of elements using a single data type. #![allow(clippy::redundant_closure_call)] use crate::backend::{BackendDevice, BackendStorage}; use crate::op::{ BackpropOp, BinaryOp, CmpOp, CustomOp1, CustomOp2, CustomOp3, Op, ReduceOp, UnaryOp, }; use crate::scalar::TensorOrScalar; use crate::shape::{Dim, Dims}; use crate::{bail, storage::Storage, DType, Device, Error, Layout, Result, Shape}; use std::sync::{Arc, RwLock}; /// Unique identifier for tensors. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct TensorId(usize); impl TensorId { fn new() -> Self { // https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805 use std::sync::atomic; static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) } } pub struct Tensor_ { id: TensorId, // As we provide inner mutability on the tensor content, the alternatives are: // - Using a mutex, this would have the highest cost when retrieving the storage but would // prevent errors when concurrent access takes place. Mutex would also be subject to // deadlocks for example using the current code if the same tensor is used twice by a single // binary op. // - Using a refcell unsafe cell would have some intermediary cost, borrow checking would be // verified dynamically, but the resulting tensors would not be send or sync. // - Using an unsafe cell would have the lowest cost but undefined behavior on concurrent // accesses. // Ideally, we would use Arc<Storage> for tensors on which we don't plan on modifying the data // and Arc<Mutex<Storage>> for tensors where the data could be modified, e.g. variables but // that's tricky to encode in the current setup. storage: Arc<RwLock<Storage>>, layout: Layout, op: BackpropOp, is_variable: bool, dtype: DType, device: Device, } impl AsRef<Tensor> for Tensor { fn as_ref(&self) -> &Tensor { self } } // Tensors are refcounted so that cloning is cheap when building the op graph. // Storages are also refcounted independently so that its possible to avoid // copying the storage for operations that only modify the shape or stride. #[derive(Clone)] /// The core struct for manipulating tensors. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// /// let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?; /// let b = Tensor::arange(0f32, 12f32, &Device::Cpu)?.reshape((3, 4))?; /// /// let c = a.matmul(&b)?; /// # Ok::<(), candle_core::Error>(()) /// ``` /// /// Tensors are reference counted with [`Arc`] so cloning them is cheap. pub struct Tensor(Arc<Tensor_>); impl std::ops::Deref for Tensor { type Target = Tensor_; fn deref(&self) -> &Self::Target { self.0.as_ref() } } macro_rules! unary_op { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name(&self) -> Result<Self> { let shape = self.shape(); let storage = self .storage() .unary_impl::<crate::op::$op_name>(self.layout())?; let op = BackpropOp::new1(self, |s| Op::Unary(s, UnaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! binary_op { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name(&self, rhs: &Self) -> Result<Self> { let shape = self.same_shape_binary_op(rhs, stringify!($fn_name))?; let storage = self.storage().binary_impl::<crate::op::$op_name>( &*rhs.storage(), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! binary_op_scalar { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { let rhs = match rhs.to_tensor_scalar()? { crate::scalar::TensorScalar::Tensor(rhs) => rhs, crate::scalar::TensorScalar::Scalar(rhs) => rhs .to_dtype(self.dtype())? .to_device(self.device())? .broadcast_as(self.shape())?, }; let shape = self.same_shape_binary_op(&rhs, stringify!($fn_name))?; let storage = self.storage().binary_impl::<crate::op::$op_name>( &*rhs.storage(), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, &rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! broadcast_binary_op { ($fn_name:ident, $inner_fn_name:ident) => { pub fn $fn_name(&self, rhs: &Self) -> Result<Self> { let lhs = self; let shape = lhs .shape() .broadcast_shape_binary_op(rhs.shape(), stringify!($fn_name))?; let l_broadcast = shape != *lhs.shape(); let r_broadcast = shape != *rhs.shape(); match (l_broadcast, r_broadcast) { (true, true) => lhs .broadcast_as(&shape)? .$inner_fn_name(&rhs.broadcast_as(&shape)?), (false, true) => lhs.$inner_fn_name(&rhs.broadcast_as(&shape)?), (true, false) => lhs.broadcast_as(&shape)?.$inner_fn_name(rhs), (false, false) => lhs.$inner_fn_name(rhs), } } }; } /// Creates a fresh tensor structure based on a storage and a shape, this uses contiguous strides. pub(crate) fn from_storage<S: Into<Shape>>( storage: Storage, shape: S, op: BackpropOp, is_variable: bool, ) -> Tensor { let dtype = storage.dtype(); let device = storage.device(); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(storage)), layout: Layout::contiguous(shape), op, is_variable, dtype, device, }; Tensor(Arc::new(tensor_)) } impl Tensor { pub(crate) fn ones_impl<S: Into<Shape>>( shape: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let none = BackpropOp::none(); let shape = shape.into(); let storage = device.ones(&shape, dtype)?; Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor filled with ones. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::ones((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::from_slice(&[1.0f32, 1.0, 1.0, 1.0, 1.0, 1.0], (2, 3), &Device::Cpu)?; /// // a == b /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn ones<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> { Self::ones_impl(shape, dtype, device, false) } /// Creates a new tensor filled with ones with same shape, dtype, and device as the other tensor. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = a.ones_like()?; /// // b == a + 1 /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn ones_like(&self) -> Result<Self> { Tensor::ones(self.shape(), self.dtype(), self.device()) } // Do not expose outside of the crate, the `is_variable=true` case should only be accessed from // the variable module. pub(crate) fn zeros_impl<S: Into<Shape>>( shape: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let none = BackpropOp::none(); let shape = shape.into(); let storage = device.zeros(&shape, dtype)?; Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor filled with zeros. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::from_slice(&[0.0f32, 0.0, 0.0, 0.0, 0.0, 0.0], (2, 3), &Device::Cpu)?; /// // a == b /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn zeros<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> { Self::zeros_impl(shape, dtype, device, false) } /// Creates a new tensor filled with ones with same shape, dtype, and device as the other /// tensor. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = a.zeros_like()?; /// // b is on CPU f32. /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn zeros_like(&self) -> Result<Self> { Tensor::zeros(self.shape(), self.dtype(), self.device()) } pub(crate) fn rand_impl<S: Into<Shape>, T: crate::FloatDType>( lo: T, up: T, s: S, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_uniform(lo, up, &s)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub(crate) fn rand_f64_impl<S: Into<Shape>>( lo: f64, up: f64, s: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_uniform_f64(lo, up, &s, dtype)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } /// Creates a new tensor initialized with values sampled uniformly between `lo` and `up`. pub fn rand<S: Into<Shape>, T: crate::FloatDType>( lo: T, up: T, s: S, device: &Device, ) -> Result<Self> { Self::rand_impl(lo, up, s, device, false) } pub fn rand_like(&self, lo: f64, up: f64) -> Result<Self> { Tensor::rand_f64_impl(lo, up, self.shape(), self.dtype(), self.device(), false) } pub(crate) fn randn_impl<S: Into<Shape>, T: crate::FloatDType>( mean: T, std: T, s: S, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_normal(mean, std, &s)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub(crate) fn randn_f64_impl<S: Into<Shape>>( mean: f64, std: f64, s: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_normal_f64(mean, std, &s, dtype)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub fn randn_like(&self, mean: f64, stdev: f64) -> Result<Self> { Tensor::randn_f64_impl( mean, stdev, self.shape(), self.dtype(), self.device(), false, ) } /// Creates a new tensor initialized with values sampled from a normal distribution with the /// specified `mean` and standard deviation `std`. pub fn randn<S: Into<Shape>, T: crate::FloatDType>( mean: T, std: T, s: S, device: &Device, ) -> Result<Self> { Self::randn_impl(mean, std, s, device, false) } pub(crate) fn new_impl<A: crate::device::NdArray>( array: A, shape: Shape, device: &Device, is_variable: bool, ) -> Result<Self> { let n: usize = shape.elem_count(); let buffer_size: usize = array.shape()?.elem_count(); if buffer_size != n { return Err(Error::ShapeMismatch { buffer_size, shape }.bt()); } let storage = device.storage(array)?; let none = BackpropOp::none(); Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor on the specified device using the content and shape of the input. pub fn new<A: crate::device::NdArray>(array: A, device: &Device) -> Result<Self> { let shape = array.shape()?; Self::new_impl(array, shape, device, false) } /// Returns a new tensor with all the elements having the same specified value. Note that /// the tensor is not contiguous so you would have to call `.contiguous()` on it if needed. pub fn full<D: crate::WithDType, S: Into<Shape>>( value: D, shape: S, device: &Device, ) -> Result<Self> { Self::from_vec_impl(vec![value], (), device, false)?.broadcast_as(shape) } /// Creates a new 1D tensor from an iterator. pub fn from_iter<D: crate::WithDType>( iter: impl IntoIterator<Item = D>, device: &Device, ) -> Result<Self> { let data = iter.into_iter().collect::<Vec<_>>(); let len = data.len(); Self::from_vec_impl(data, len, device, false) } /// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common /// difference `1` from `start`. pub fn arange<D: crate::WithDType>(start: D, end: D, device: &Device) -> Result<Self> { Self::arange_step(start, end, D::one(), device) } /// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common /// difference `step` from `start`. pub fn arange_step<D: crate::WithDType>( start: D, end: D, step: D, device: &Device, ) -> Result<Self> { if D::is_zero(&step) { bail!("step cannot be zero") } let mut data = vec![]; let mut current = start; if step >= D::zero() { while current < end { data.push(current); current += step; } } else { while current > end { data.push(current); current += step; } } let len = data.len(); Self::from_vec_impl(data, len, device, false) } pub(crate) fn from_vec_impl<S: Into<Shape>, D: crate::WithDType>( data: Vec<D>, shape: S, device: &Device, is_variable: bool, ) -> Result<Self> { let shape = shape.into(); let buffer_size = data.len(); if buffer_size != shape.elem_count() { return Err(Error::ShapeMismatch { buffer_size, shape }.bt()); } let storage = device.storage_owned(data)?; let none = BackpropOp::none(); Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor initialized with values from the input vector. The number of elements /// in this vector must be the same as the number of elements defined by the shape. /// If the device is cpu, no data copy is made. pub fn from_vec<S: Into<Shape>, D: crate::WithDType>( data: Vec<D>, shape: S, device: &Device, ) -> Result<Self> { Self::from_vec_impl(data, shape, device, false) } /// Creates a new tensor initialized with values from the input slice. The number of elements /// in this vector must be the same as the number of elements defined by the shape. pub fn from_slice<S: Into<Shape>, D: crate::WithDType>( array: &[D], shape: S, device: &Device, ) -> Result<Self> { Self::new_impl(array, shape.into(), device, false) } pub(crate) fn same_shape_binary_op(&self, rhs: &Self, op: &'static str) -> Result<&Shape> { let lhs = self.shape(); let rhs = rhs.shape(); if lhs != rhs { Err(Error::ShapeMismatchBinaryOp { lhs: lhs.clone(), rhs: rhs.clone(), op, } .bt()) } else { Ok(lhs) } } /// Returns true if the computation graph should track this op, that is if it is /// a variable or if it has some variable as dependencies. pub fn track_op(&self) -> bool { self.is_variable || self.op.is_some() } // TODO: Also make an inplace version or a pre-allocated? This could be tricky // if this can create cycles in the compute graph. binary_op!(add, Add); binary_op!(mul, Mul); binary_op!(sub, Sub); binary_op!(div, Div); binary_op_scalar!(maximum, Maximum); binary_op_scalar!(minimum, Minimum); broadcast_binary_op!(broadcast_add, add); broadcast_binary_op!(broadcast_mul, mul); broadcast_binary_op!(broadcast_sub, sub); broadcast_binary_op!(broadcast_div, div); broadcast_binary_op!(broadcast_maximum, maximum); broadcast_binary_op!(broadcast_minimum, minimum); broadcast_binary_op!(broadcast_eq, eq); broadcast_binary_op!(broadcast_ne, ne); broadcast_binary_op!(broadcast_lt, lt); broadcast_binary_op!(broadcast_le, le); broadcast_binary_op!(broadcast_gt, gt); broadcast_binary_op!(broadcast_ge, ge); unary_op!(recip, Recip); unary_op!(neg, Neg); unary_op!(exp, Exp); unary_op!(log, Log); unary_op!(sin, Sin); unary_op!(cos, Cos); unary_op!(tanh, Tanh); unary_op!(abs, Abs); unary_op!(sqr, Sqr); unary_op!(sqrt, Sqrt); unary_op!(gelu, Gelu); unary_op!(gelu_erf, GeluErf); unary_op!(erf, Erf); unary_op!(relu, Relu); unary_op!(ceil, Ceil); unary_op!(floor, Floor); unary_op!(round, Round); /// Round element of the input tensor to the nearest integer. /// /// If the number of decimals is negative, it specifies the number of positions to the left of /// the decimal point. pub fn round_to(&self, decimals: i32) -> Result<Self> { let mult = 10f64.powi(decimals); (self * mult)?.round()? * (1f64 / mult) } /// Retrieves the single scalar value hold in the tensor. If the tensor contains multiple /// dimensions, an error is returned instead. pub fn to_scalar<S: crate::WithDType>(&self) -> Result<S> { if self.rank() != 0 { Err(Error::UnexpectedNumberOfDims { expected: 0, got: self.rank(), shape: self.shape().clone(), } .bt())? } let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; Ok::<_, Error>(data[self.layout().start_offset()]) }; match &*self.storage() { Storage::Cpu(cpu_storage) => from_cpu_storage(cpu_storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// An alias for `to_scalar`. pub fn to_vec0<S: crate::WithDType>(&self) -> Result<S> { self.to_scalar::<S>() } /// Repeat this tensor along the specified dimensions. pub fn repeat<S: Into<Shape>>(&self, shape: S) -> Result<Tensor> { // Similar to PyTorch, we extend the number of dimensions of self if needed. let repeats = shape.into(); let repeats = repeats.dims(); let mut inp = if self.rank() < repeats.len() { let shape = [vec![1; repeats.len() - self.rank()], self.dims().to_vec()].concat(); self.reshape(shape)? } else { self.clone() }; for (idx, &repeat) in repeats.iter().enumerate() { if repeat > 1 { inp = Tensor::cat(&vec![&inp; repeat], idx)? } } Ok(inp) } /// Creates grids of coordinates specified by the 1D inputs. /// /// # Arguments /// /// * `args` - A slice of 1D tensors. /// * `xy_indexing` - Whether to use xy indexing or ij indexing. If xy is selected, the /// first dimension corresponds to the cardinality of the second input and the second /// dimension corresponds to the cardinality of the first input. If ij is selected, the /// dimensions are in the same order as the cardinality of the inputs. /// /// # Examples /// /// ```rust /// use candle_core::{Tensor, Device, Shape}; /// let x = Tensor::new(&[1f32, 2., 3.], &Device::Cpu)?; /// let y = Tensor::new(&[4f32, 5., 6.], &Device::Cpu)?; /// /// let grids_xy = Tensor::meshgrid(&[&x, &y], true)?; /// /// assert_eq!(grids_xy.len(), 2); /// assert_eq!(grids_xy[0].dims(), &[3, 3]); /// /// assert_eq!(grids_xy[0].to_vec2::<f32>()?, &[[1., 2., 3.], [1., 2., 3.], [1., 2., 3.]]); /// assert_eq!(grids_xy[1].to_vec2::<f32>()?, &[[4., 4., 4.], [5., 5., 5.], [6., 6., 6.]]); /// /// let grids_ij = Tensor::meshgrid(&[&x, &y], false)?; /// /// assert_eq!(grids_ij[0].to_vec2::<f32>()?, &[[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]); /// assert_eq!(grids_ij[1].to_vec2::<f32>()?, &[[4., 5., 6.], [4., 5., 6.], [4., 5., 6.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` /// /// # Errors /// /// * Will return `Err` if `args` contains less than 2 tensors. /// pub fn meshgrid<A: AsRef<Tensor>>(args: &[A], xy_indexing: bool) -> Result<Vec<Self>> { if args.len() <= 1 { Err(Error::OpRequiresAtLeastTwoTensors { op: "meshgrid" }.bt())? } let args: Vec<_> = if xy_indexing { args.iter().rev().collect() } else { args.iter().collect() }; let mut shape = Vec::with_capacity(args.len()); for arg in args.iter() { shape.push(arg.as_ref().dims1()?) } let mut grids = Vec::with_capacity(args.len()); for idx in 0..args.len() { let mut ones = vec![1usize; args.len()]; ones[idx] = shape[idx]; let arg = args[idx].as_ref().reshape(ones)?; let mut repeats = shape.clone(); repeats[idx] = 1; let repeated_tensor = arg.repeat(repeats)?; grids.push(repeated_tensor); } if xy_indexing { grids.reverse(); } Ok(grids) } /// This operation multiplies the input tensor by `mul` then adds `add` and return the result. /// The input values `mul` and `add` are casted to the appropriate type so some rounding might /// be performed. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let a = a.affine(4., -2.)?; /// assert_eq!(a.to_vec2::<f32>()?, &[[-2.0, 2.0], [6.0, 10.0]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn affine(&self, mul: f64, add: f64) -> Result<Self> { let storage = self.storage().affine(self.layout(), mul, add)?; let op = BackpropOp::new1(self, |arg| Op::Affine { arg, mul, add }); Ok(from_storage(storage, self.shape(), op, false)) } /// Applies the Exponential Linear Unit (ELU) function on each element of the input tensor. pub fn elu(&self, alpha: f64) -> Result<Self> { let storage = self.storage().elu(self.layout(), alpha)?; let op = BackpropOp::new1(self, |t| Op::Elu(t, alpha)); Ok(from_storage(storage, self.shape(), op, false)) } /// Raise the tensor to some float exponent `e`. pub fn powf(&self, e: f64) -> Result<Self> { let storage = self.storage().powf(self.layout(), e)?; let op = BackpropOp::new1(self, |t| Op::Powf(t, e)); Ok(from_storage(storage, self.shape(), op, false)) } fn check_dim(&self, dim: usize, op: &'static str) -> Result<()> { if dim >= self.dims().len() { Err(Error::DimOutOfRange { shape: self.shape().clone(), dim: dim as i32, op, } .bt())? } else { Ok(()) } } /// Split a tensor into the specified number of chunks, this may return less chunks than /// specified. pub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>> { let dim = dim.to_index(self.shape(), "chunk")?; let size = self.dim(dim)?; if size < chunks { (0..size).map(|i| self.narrow(dim, i, 1)).collect() } else { let chunk_size = size / chunks; let cnt_additional = size % chunks; let mut tensors = vec![]; let mut sum_chunk_size = 0; for i in 0..chunks { let chunk_size = if i < cnt_additional { chunk_size + 1 } else { chunk_size }; let tensor = self.narrow(dim, sum_chunk_size, chunk_size)?; tensors.push(tensor); sum_chunk_size += chunk_size } Ok(tensors) } } /// Returns a new tensor that is a narrowed version of the input, the dimension `dim` /// ranges from `start` to `start + len`. pub fn narrow<D: Dim>(&self, dim: D, start: usize, len: usize) -> Result<Self> { let dims = self.dims(); let dim = dim.to_index(self.shape(), "narrow")?; let err = |msg| { Err::<(), _>( Error::NarrowInvalidArgs { shape: self.shape().clone(), dim, start, len, msg, } .bt(), ) }; if start > dims[dim] { err("start > dim_len")? } if start.saturating_add(len) > dims[dim] { err("start + len > dim_len")? } if start == 0 && dims[dim] == len { Ok(self.clone()) } else { let op = BackpropOp::new1(self, |t| Op::Narrow(t, dim, start, len)); let layout = self.layout().narrow(dim, start, len)?; let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } } fn squeeze_dims(self, dims: &[usize]) -> Result<Self> { match dims { [] => Ok(self), [i] => self.squeeze(*i), dims => { let dims = self .dims() .iter() .enumerate() .filter_map(|(dim_idx, &v)| { if dims.contains(&dim_idx) { None } else { Some(v) } }) .collect::<Vec<_>>(); self.reshape(dims) } } } fn reduce_impl<D: Dim>(&self, dim: D, keepdim: bool, op: ReduceOp) -> Result<Self> { let dim = dim.to_index(self.shape(), op.name())?; let storage = self.storage().reduce_op(op, self.layout(), &[dim])?; let mut dims = self.dims().to_vec(); dims[dim] = 1; let op = match op { ReduceOp::Sum | ReduceOp::Min | ReduceOp::Max => { BackpropOp::new1(self, |arg| Op::Reduce(arg, op, dims.to_vec())) } ReduceOp::ArgMin | ReduceOp::ArgMax => BackpropOp::none(), }; let res = from_storage(storage, dims, op, false); if keepdim { Ok(res) } else { res.squeeze_dims(&[dim]) } } fn sum_impl<D: Dims>(&self, sum_dims: D, keepdim: bool) -> Result<Self> { let sum_dims = sum_dims.to_indexes(self.shape(), "sum")?; let storage = self .storage() .reduce_op(ReduceOp::Sum, self.layout(), &sum_dims)?; let mut dims = self.dims().to_vec(); for &sum_dim in sum_dims.iter() { dims[sum_dim] = 1 } let op = BackpropOp::new1(self, |a| Op::Reduce(a, ReduceOp::Sum, dims.to_vec())); let sum = from_storage(storage, dims, op, false); if keepdim { Ok(sum) } else { sum.squeeze_dims(&sum_dims) } } /// Returns the sum of all elements in the input tensor. The sum is performed over all the /// input dimensions. /// /// The resulting tensor has a shape that is similar to the shape of the input tensor, except /// that the number of elements for each dimension index in `sum_dims` is 1. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let s = a.sum_keepdim(0)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[2., 4.]]); /// let s = a.sum_keepdim(1)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1.], [5.]]); /// let s = a.sum_keepdim((0, 1))?; /// assert_eq!(s.to_vec2::<f32>()?, &[[6.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn sum_keepdim<D: Dims>(&self, sum_dims: D) -> Result<Self> { self.sum_impl(sum_dims, true) } /// Returns the sum of all elements in the input tensor. The sum is performed over all the /// input dimensions and compared to `sum_keepdim` these dimensions are squeezed rather than /// kept. pub fn sum<D: Dims>(&self, sum_dims: D) -> Result<Self> { self.sum_impl(sum_dims, false) } /// Returns the mean of all elements in the input tensor. The mean is performed over all the /// input dimensions. /// /// The resulting tensor has a shape that is similar to the shape of the input tensor, except /// that the number of elements for each dimension index in `mean_dims` is 1. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let s = a.mean_keepdim(0)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1., 2.]]); /// let s = a.mean_keepdim(1)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[0.5], [2.5]]); /// let s = a.mean_keepdim((0, 1))?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1.5]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn mean_keepdim<D: Dims>(&self, mean_dims: D) -> Result<Self> { let mean_dims = mean_dims.to_indexes(self.shape(), "mean-keepdim")?; let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product(); let scale = 1f64 / (reduced_dim as f64); self.sum_impl(mean_dims, true)? * scale } /// Returns the mean of all elements in the input tensor. The mean is performed over all the /// input dimensions and compared to `mean_keepdim` these dimensions are squeezed rather than /// kept. pub fn mean<D: Dims>(&self, mean_dims: D) -> Result<Self> { let mean_dims = mean_dims.to_indexes(self.shape(), "mean")?; let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product(); let scale = 1f64 / (reduced_dim as f64); self.sum_impl(mean_dims, false)? * scale } /// Returns the unbiased variance over the selected dimension. pub fn var_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "var")?; let mean = self.mean_keepdim(dim)?; let squares = self.broadcast_sub(&mean)?.sqr()?; squares.sum_impl(dim, true)? / (self.dim(dim)? - 1) as f64 } /// Returns the unbiased variance over the selected dimension. pub fn var<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "var")?; self.var_keepdim(dim)?.squeeze(dim) } /// Gathers the maximum value across the selected dimension. The resulting shape has the same /// number of dimensions as the original tensor and the select dimension has a single element. pub fn max_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::Max) } /// Similar to `max_keepdim` but the target dimension is squeezed. pub fn max<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::Max) } /// Gathers the minimum value across the selected dimension. The resulting shape has the same /// number of dimensions as the original tensor and the select dimension has a single element. pub fn min_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::Min) } /// Similar to `min_keepdim` but the target dimension is squeezed. pub fn min<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::Min) } pub fn argmax_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::ArgMax) } /// Similar to `argmax_keepdim` but the target dimension is squeezed. pub fn argmax<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::ArgMax) } pub fn argmin_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::ArgMin) } /// Similar to `argmin_keepdim` but the target dimension is squeezed. pub fn argmin<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::ArgMin) } /// Element-wise comparison between two tensors, e.g. equality, greater than, ... The actual /// comparison operation is specified by the `op` argument. /// /// The returned tensor has the same shape as the original tensors and uses `u8` elements. pub fn cmp<T: TensorOrScalar>(&self, rhs: T, op: CmpOp) -> Result<Self> { let rhs = match rhs.to_tensor_scalar()? { crate::scalar::TensorScalar::Tensor(rhs) => rhs, crate::scalar::TensorScalar::Scalar(rhs) => rhs .to_dtype(self.dtype())? .to_device(self.device())? .broadcast_as(self.shape())?, }; let shape = self.same_shape_binary_op(&rhs, "cmp")?; let storage = self .storage() .cmp(op, &rhs.storage(), self.layout(), rhs.layout())?; let op = BackpropOp::new1(self, |a| Op::Cmp(a, op)); Ok(from_storage(storage, shape.dims(), op, false)) } /// Element-wise equality. pub fn eq<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Eq) } /// Element-wise non-equality. pub fn ne<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Ne) } /// Element-wise comparison with lower-than, the returned tensor uses value 1 where `self < /// rhs` and 0 otherwise. pub fn lt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Lt) } /// Element-wise comparison with greater-than, the returned tensor uses value 1 where `self > /// rhs` and 0 otherwise. pub fn gt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Gt) } /// Element-wise comparison with greater-equal, the returned tensor uses value 1 where `self >= /// rhs` and 0 otherwise. pub fn ge<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Ge) } /// Element-wise comparison with lower-equal, the returned tensor uses value 1 where `self <= /// rhs` and 0 otherwise. pub fn le<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Le) } /// Clamp the tensor values to be between `min` and `max`. pub fn clamp<T1: TensorOrScalar, T2: TensorOrScalar>(&self, min: T1, max: T2) -> Result<Self> { self.maximum(min)?.minimum(max) } /// Interpolate the input tensor to the `target_size` size, taking the value of the nearest element. /// /// The input tensor should have three dimensions, `(batch, channels, l)`, the returned /// tensor also has three dimensions, `(batch, channels, target_size)`. pub fn interpolate1d(&self, target_size: usize) -> Result<Self> { let (n, c, _l) = self.dims3()?; let op = BackpropOp::new1(self, Op::UpsampleNearest1D); let storage = self .storage() .upsample_nearest1d(self.layout(), target_size)?; Ok(from_storage(storage, (n, c, target_size), op, false)) } /// Alias for `interpolate1d`. pub fn upsample_nearest1d(&self, target_size: usize) -> Result<Self> { self.interpolate1d(target_size) } /// Interpolate the input tensor to the `(target_h, target_w)` size, taking the value of the /// nearest element. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, target_h, target_w)`. pub fn interpolate2d(&self, target_h: usize, target_w: usize) -> Result<Self> { let (n, c, _h, _w) = self.dims4()?; let op = BackpropOp::new1(self, |arg| Op::UpsampleNearest2D { arg, target_h, target_w, }); let storage = self .storage() .upsample_nearest2d(self.layout(), target_h, target_w)?; Ok(from_storage(storage, (n, c, target_h, target_w), op, false)) } /// Alias for `interpolate2d`. pub fn upsample_nearest2d(&self, target_h: usize, target_w: usize) -> Result<Self> { self.interpolate2d(target_h, target_w) } /// 2D average pooling over an input tensor with multiple channels. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on /// the two last dimensions using a kernel of size `sz`. The returned element is the average /// value over the kernel window. pub fn avg_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> { let sz = sz.to_usize2(); self.avg_pool2d_with_stride(sz, sz) } /// Same as `avg_pool2d` but with a `stride` that can be set to a value different from the /// kernel size. pub fn avg_pool2d_with_stride<T: crate::ToUsize2>( &self, kernel_size: T, stride: T, ) -> Result<Self> { let kernel_size = kernel_size.to_usize2(); let stride = stride.to_usize2(); let (n, c, h, w) = self.dims4()?; if h < kernel_size.0 || w < kernel_size.1 { bail!("kernel-size {kernel_size:?} is larger than the input size {h},{w}") } // https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html#torch.nn.AvgPool2d let h_out = (h - kernel_size.0) / stride.0 + 1; let w_out = (w - kernel_size.1) / stride.1 + 1; let op = BackpropOp::new1(self, |arg| Op::AvgPool2D { arg, kernel_size, stride, }); let storage = self .storage() .avg_pool2d(self.layout(), kernel_size, stride)?; Ok(from_storage(storage, (n, c, h_out, w_out), op, false)) } /// 2D max pooling over an input tensor with multiple channels. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on /// the two last dimensions using a kernel of size `sz`, the returned element is the maximum /// value over the kernel window. pub fn max_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> { let sz = sz.to_usize2(); self.max_pool2d_with_stride(sz, sz) } /// Same as `max_pool2d` but with a `stride` that can be set to a value different from the /// kernel size. pub fn max_pool2d_with_stride<T: crate::ToUsize2>( &self, kernel_size: T, stride: T, ) -> Result<Self> { let kernel_size = kernel_size.to_usize2(); let stride = stride.to_usize2(); let (n, c, h, w) = self.dims4()?; if h < kernel_size.0 || w < kernel_size.1 { bail!("kernel-size {kernel_size:?} is larger than the input size {h},{w}") } // https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d let h_out = (h - kernel_size.0) / stride.0 + 1; let w_out = (w - kernel_size.1) / stride.1 + 1; let op = BackpropOp::new1(self, |arg| Op::MaxPool2D { arg, kernel_size, stride, }); let storage = self .storage() .max_pool2d(self.layout(), kernel_size, stride)?; Ok(from_storage(storage, (n, c, h_out, w_out), op, false)) } /// Returns the matrix-multiplication of the input tensor with the other provided tensor. /// /// # Arguments /// /// * `self` - A tensor with dimensions `b1, b2, ..., bi, m, k`. /// * `rhs` - A tensor with dimensions `b1, b2, ..., bi, k, n`. /// /// The resulting tensor has dimensions `b1, b2, ..., bi, m, n`. pub fn matmul(&self, rhs: &Self) -> Result<Self> { let a_dims = self.shape().dims(); let b_dims = rhs.shape().dims(); let dim = a_dims.len(); if dim < 2 || b_dims.len() != dim { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: rhs.shape().clone(), op: "matmul", } .bt())? } let m = a_dims[dim - 2]; let k = a_dims[dim - 1]; let k2 = b_dims[dim - 2]; let n = b_dims[dim - 1]; let c_shape = Shape::from(&a_dims[..dim - 2]).extend(&[m, n]); let batching: usize = a_dims[..dim - 2].iter().product(); let batching_b: usize = b_dims[..dim - 2].iter().product(); if k != k2 || batching != batching_b { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: rhs.shape().clone(), op: "matmul", } .bt())? } let storage = self.storage().matmul( &rhs.storage(), (batching, m, n, k), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, rhs, Op::Matmul); Ok(from_storage(storage, c_shape, op, false)) } /// Matrix-multiplication with broadcasting support. /// /// Compared to `matmul` the two matrixes are allowed to have different dimensions as long as /// they are compatible for broadcast. E.g. if `self` has shape `(j, 1, n, k)` and `rhs` has /// shape `(l, k, m)`, the output will have shape `(j, l, n, m)`. pub fn broadcast_matmul(&self, rhs: &Self) -> Result<Self> { let lhs = self; let (l_shape, r_shape) = lhs.shape().broadcast_shape_matmul(rhs.shape())?; let l_broadcast = l_shape != *lhs.shape(); let r_broadcast = r_shape != *rhs.shape(); // TODO: Avoid concretising the broadcasted matrixes via contiguous. match (l_broadcast, r_broadcast) { (true, true) => lhs .broadcast_as(&l_shape)? .contiguous()? .matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?), (false, true) => lhs.matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?), (true, false) => lhs.broadcast_as(&l_shape)?.contiguous()?.matmul(rhs), (false, false) => lhs.matmul(rhs), } } /// Returns a tensor with the same shape as the input tensor, the values are taken from /// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the /// input tensor is equal to zero. pub fn where_cond(&self, on_true: &Self, on_false: &Self) -> Result<Self> { let _shap = self.same_shape_binary_op(on_true, "where_cond")?; let shape = self.same_shape_binary_op(on_false, "where_cond")?; let storage = self.storage().where_cond( self.layout(), &on_true.storage(), on_true.layout(), &on_false.storage(), on_false.layout(), )?; let op = BackpropOp::new3(self, on_true, on_false, Op::WhereCond); Ok(from_storage(storage, shape, op, false)) } /// Returns a tensor with the values from the `self` tensor at the index corresponding to the /// values hold in the `ids` tensor. /// /// # Arguments /// /// * `self` - A tensor with dimensions `v, h`. /// * `ids` - A tensor with dimensions `s` and with integer values between 0 and v (exclusive). /// /// The resulting tensor has dimensions `s, h`. `s` is called the sequence length, `v` the /// vocabulary size, and `h` the hidden size. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let values = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let ids = Tensor::new(&[2u32, 1u32, 2u32], &Device::Cpu)?; /// let emb = values.embedding(&ids)?; /// assert_eq!(emb.to_vec2::<f32>()?, &[[4., 5.], [2., 3.], [4., 5.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn embedding(&self, ids: &Self) -> Result<Self> { if self.rank() != 2 || ids.rank() != 1 { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: ids.shape().clone(), op: "embedding", } .bt())? } self.index_select(ids, 0) } pub fn scatter_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "scatter-add")?; let source_dims = source.dims(); let self_dims = self.dims(); let mismatch = if source_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() { if i != dim && d1 != d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "scatter-add (self, src)", lhs: self.shape().clone(), rhs: source.shape().clone(), } .bt())? } if indexes.dims() != source.dims() { Err(Error::ShapeMismatchBinaryOp { op: "scatter-add (indexes, src)", lhs: indexes.shape().clone(), rhs: source.shape().clone(), } .bt())? } let storage = self.storage().scatter_add( self.layout(), &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| { Op::ScatterAdd(t1, t2, t3, dim) }); Ok(from_storage(storage, self.shape(), op, false)) } /// Embeds the values of the `src` tensor into the `self` tensor on the specified dimension. pub fn slice_scatter<D: Dim>(&self, src: &Self, dim: D, start: usize) -> Result<Self> { let dim = dim.to_index(self.shape(), "slice-scatter")?; if dim == 0 { self.slice_scatter0(src, start) } else { // TODO: Maybe we want to add a more efficient implementation at some point. self.transpose(0, dim)? .slice_scatter0(&src.transpose(0, dim)?, start)? .transpose(0, dim) } } /// Embeds the values of the `src` tensor into the `self` tensor on the first dimension. pub fn slice_scatter0(&self, src: &Self, start: usize) -> Result<Self> { if self.dtype() != src.dtype() { Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: src.dtype(), op: "slice-scatter", } .bt())? } if self.device().location() != src.device.location() { Err(Error::DeviceMismatchBinaryOp { lhs: self.device().location(), rhs: src.device().location(), op: "slice-scatter", } .bt())? } if self.rank() != src.rank() { Err(Error::UnexpectedNumberOfDims { expected: self.rank(), got: src.rank(), shape: src.shape().clone(), } .bt())? } let shape_ok = self.dims() .iter() .zip(src.dims().iter()) .enumerate() .all(|(dim_idx, (&d1, &d2))| { if 0 == dim_idx { d2 + start <= d1 } else { d1 == d2 } }); if !shape_ok { Err(Error::ShapeMismatchBinaryOp { op: "slice-scatter (self, src)", lhs: self.shape().clone(), rhs: src.shape().clone(), } .bt())? } let mut storage = self.device().zeros(self.shape(), self.dtype())?; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let offset = start * src.dims()[1..].iter().product::<usize>(); src.storage() .copy_strided_src(&mut storage, offset, src.layout())?; let op = BackpropOp::new2(self, src, |t1, t2| Op::SliceScatter0(t1, t2, start)); Ok(from_storage(storage, self.shape(), op, false)) } /// Accumulate element from `source` at indexes `indexes` and add them to `self`. pub fn index_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "index-add")?; let source_dims = source.dims(); let self_dims = self.dims(); let mismatch = if source_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() { if i != dim && d1 != d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "index-add (self, source)", lhs: self.shape().clone(), rhs: source.shape().clone(), } .bt())? } // The number of element in indexes must match the dimension on which the add is // performed on the source tensor (and the index values from `indexes` are taken from // the target tensor self) let indexes_len = indexes.dims1()?; if source_dims[dim] != indexes_len { Err(Error::ShapeMismatchBinaryOp { op: "index-add (ids, source))", lhs: indexes.shape().clone(), rhs: source.shape().clone(), } .bt())? } let storage = self.storage().index_add( self.layout(), &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| { Op::IndexAdd(t1, t2, t3, dim) }); Ok(from_storage(storage, self.shape(), op, false)) } /// Gather values across the target dimension. /// /// # Arguments /// /// * `self` - The input tensor. /// * `indexes` - The indices of elements to gather, this should have the same shape as `self` /// but can have a different number of elements on the target dimension. /// * `dim` - the target dimension. /// /// The resulting tensor has the same shape as `indexes` and use values from `self` indexed on /// dimension `dim` by the values in `indexes`. pub fn gather<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "gather")?; let self_dims = self.dims(); let indexes_dims = indexes.dims(); let mismatch = if indexes_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(indexes_dims.iter()).enumerate() { if i != dim && d1 != d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "gather", lhs: self.shape().clone(), rhs: indexes.shape().clone(), } .bt())? } let storage = self.storage() .gather(self.layout(), &indexes.storage(), indexes.layout(), dim)?; let op = BackpropOp::new2(self, indexes, |t1, t2| Op::Gather(t1, t2, dim)); Ok(from_storage(storage, indexes.shape(), op, false)) } /// Select values for the input tensor at the target indexes across the specified dimension. /// /// The `indexes` is argument is an int tensor with a single dimension. /// The output has the same number of dimension as the `self` input. The target dimension of /// the output has length the length of `indexes` and the values are taken from `self` using /// the index from `indexes`. Other dimensions have the same number of elements as the input /// tensor. pub fn index_select<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "index-select")?; let indexes_len = match indexes.dims() { [l] => *l, _ => Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: indexes.shape().clone(), op: "index-select", } .bt())?, }; let storage = self.storage().index_select( &indexes.storage(), self.layout(), indexes.layout(), dim, )?; let mut dims = self.dims().to_vec(); dims[dim] = indexes_len; let op = BackpropOp::new2(self, indexes, |t1, t2| Op::IndexSelect(t1, t2, dim)); Ok(from_storage(storage, dims, op, false)) } /// Returns an iterator over position of the elements in the storage when ranging over the /// index tuples in lexicographic order. pub fn strided_index(&self) -> crate::StridedIndex { self.layout.strided_index() } /// Similar to `strided_index` but returns the position of the start of each contiguous block /// as well as the length of the contiguous blocks. For a contiguous tensor, the index iterator /// will only return the start offset and the size would be the number of elements in the /// tensor. pub fn strided_blocks(&self) -> crate::StridedBlocks { self.layout.strided_blocks() } /// Returns the data contained in a 1D tensor as a vector of scalar values. pub fn to_vec1<S: crate::WithDType>(&self) -> Result<Vec<S>> { if self.rank() != 1 { Err(Error::UnexpectedNumberOfDims { expected: 1, got: self.rank(), shape: self.shape().clone(), } .bt())? } let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let data = match self.layout.contiguous_offsets() { Some((o1, o2)) => data[o1..o2].to_vec(), None => self.strided_index().map(|i| data[i]).collect(), }; Ok::<Vec<_>, Error>(data) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// Returns the data contained in a 2D tensor as a vector of vector of scalar values. pub fn to_vec2<S: crate::WithDType>(&self) -> Result<Vec<Vec<S>>> { let (dim1, dim2) = self.dims2()?; let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let mut rows = vec![]; match self.layout.contiguous_offsets() { Some((o1, o2)) => { let data = &data[o1..o2]; for idx_row in 0..dim1 { rows.push(data[idx_row * dim2..(idx_row + 1) * dim2].to_vec()) } } None => { let mut src_index = self.strided_index(); for _idx_row in 0..dim1 { let row = (0..dim2).map(|_| data[src_index.next().unwrap()]).collect(); rows.push(row) } assert!(src_index.next().is_none()); } } Ok(rows) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// Returns the data contained in a 3D tensor. pub fn to_vec3<S: crate::WithDType>(&self) -> Result<Vec<Vec<Vec<S>>>> { let (dim1, dim2, dim3) = self.dims3()?; let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let mut top_rows = vec![]; match self.layout.contiguous_offsets() { Some((o1, o2)) => { let data = &data[o1..o2]; let dim23 = dim2 * dim3; for idx1 in 0..dim1 { let data = &data[idx1 * dim23..(idx1 + 1) * dim23]; let mut rows = vec![]; for idx2 in 0..dim2 { rows.push(data[idx2 * dim3..(idx2 + 1) * dim3].to_vec()) } top_rows.push(rows); } } None => { let mut src_index = self.strided_index(); for _idx in 0..dim1 { let mut rows = vec![]; for _jdx in 0..dim2 { let row = (0..dim3).map(|_| data[src_index.next().unwrap()]).collect(); rows.push(row) } top_rows.push(rows); } assert!(src_index.next().is_none()); } } Ok(top_rows) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// The dtype for the elements stored in the input tensor. pub fn dtype(&self) -> DType { self.dtype } /// The device on which the input tensor is located. pub fn device(&self) -> &Device { &self.device } /// The tensor shape, i.e. dimension sizes on each axis. pub fn shape(&self) -> &Shape { self.layout().shape() } /// The dimension size for this tensor on each axis. pub fn dims(&self) -> &[usize] { self.shape().dims() } /// The dimension size for a specified dimension index. pub fn dim<D: Dim>(&self, dim: D) -> Result<usize> { let dim = dim.to_index(self.shape(), "dim")?; Ok(self.dims()[dim]) } /// The layout of the input tensor, this stores both the shape of the tensor as well as the /// strides and the start offset to apply to the underlying storage. pub fn layout(&self) -> &Layout { &self.layout } pub fn stride(&self) -> &[usize] { self.layout.stride() } /// The number of dimensions for this tensor, 0 for a scalar tensor, 1 for a 1D tensor, etc. pub fn rank(&self) -> usize { self.shape().rank() } /// The number of elements stored in this tensor. pub fn elem_count(&self) -> usize { self.shape().elem_count() } /// The unique identifier for this tensor. pub fn id(&self) -> TensorId { self.id } /// Whether this tensor is a variable or not. A variable is a tensor for which gradient is /// tracked and on which backpropagation can be performed. pub fn is_variable(&self) -> bool { self.is_variable } pub(crate) fn op(&self) -> &Option<Op> { &self.op } /// Computes the sum of all the elements in this tensor and returns a tensor holding this /// scalar with zero dimensions. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.sum_all()?; /// assert_eq!(tensor.to_scalar::<f32>()?, 15.); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn sum_all(&self) -> Result<Tensor> { let dims: Vec<_> = (0..self.rank()).collect(); self.sum(dims) } pub fn mean_all(&self) -> Result<Tensor> { self.sum_all()? / self.elem_count() as f64 } fn flatten_<D1: Dim, D2: Dim>( &self, start_dim: Option<D1>, end_dim: Option<D2>, ) -> Result<Tensor> { if self.rank() == 0 { self.reshape(1) } else { let start_dim = match start_dim { None => 0, Some(dim) => dim.to_index(self.shape(), "flatten")?, }; let end_dim = match end_dim { None => self.rank() - 1, Some(dim) => dim.to_index(self.shape(), "flatten")?, }; if start_dim < end_dim { let dims = self.dims(); let mut dst_dims = dims[..start_dim].to_vec(); dst_dims.push(dims[start_dim..end_dim + 1].iter().product::<usize>()); if end_dim + 1 < dims.len() { dst_dims.extend(&dims[end_dim + 1..]); } self.reshape(dst_dims) } else { Ok(self.clone()) } } } /// Flattens the input tensor on the dimension indexes from `start_dim` to `end_dim` (both /// inclusive). pub fn flatten<D1: Dim, D2: Dim>(&self, start_dim: D1, end_dim: D2) -> Result<Tensor> { self.flatten_(Some(start_dim), Some(end_dim)) } /// Flattens the input tensor on the dimension indexes from `0` to `end_dim` (inclusive). pub fn flatten_to<D: Dim>(&self, end_dim: D) -> Result<Tensor> { self.flatten_(None::<usize>, Some(end_dim)) } /// Flattens the input tensor on the dimension indexes from `start_dim` (inclusive) to the last /// dimension. pub fn flatten_from<D: Dim>(&self, start_dim: D) -> Result<Tensor> { self.flatten_(Some(start_dim), None::<usize>) } /// Flattens the input tensor by reshaping it into a one dimension tensor. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.flatten_all()?; /// assert_eq!(tensor.to_vec1::<f32>()?, &[0., 1., 2., 3., 4., 5.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn flatten_all(&self) -> Result<Tensor> { self.flatten_(None::<usize>, None::<usize>) } /// Returns the sub-tensor fixing the index at `i` on the first dimension. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let t = tensor.get(0)?; /// assert_eq!(t.to_vec1::<f32>()?, &[0., 1.]); /// let t = tensor.get(1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn get(&self, i: usize) -> Result<Tensor> { let dims = self.dims(); if dims.is_empty() { Ok(self.clone()) } else { self.narrow(0, i, 1)?.reshape(&dims[1..]) } } /// Returns the sub-tensor fixing the index at `index` on the dimension `dim`. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let t = tensor.get_on_dim(1, 0)?; /// assert_eq!(t.to_vec1::<f32>()?, &[0., 2., 4.]); /// let t = tensor.get_on_dim(1, 1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[1., 3., 5.]); /// let t = tensor.get_on_dim(0, 1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn get_on_dim<D: Dim>(&self, dim: D, index: usize) -> Result<Tensor> { let dim = dim.to_index(self.shape(), "get_on_dim")?; self.narrow(dim, index, 1)?.squeeze(dim) } /// Returns a tensor that is a transposed version of the input, the two last dimensions of the /// input are swapped. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.t()?; /// assert_eq!(tensor.to_vec2::<f32>()?, &[[0.0, 2.0, 4.0], [1.0, 3.0, 5.0]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn t(&self) -> Result<Tensor> { let rank = self.rank(); if rank < 2 { Err(Error::UnexpectedNumberOfDims { expected: 2, got: rank, shape: self.shape().clone(), } .bt())? } self.transpose(rank - 2, rank - 1) } /// Returns a tensor that is a transposed version of the input, the given dimensions are /// swapped. pub fn transpose<D1: Dim, D2: Dim>(&self, dim1: D1, dim2: D2) -> Result<Tensor> { let dim1 = dim1.to_index(self.shape(), "transpose")?; let dim2 = dim2.to_index(self.shape(), "transpose")?; if dim1 == dim2 { return Ok(self.clone()); } let op = BackpropOp::new1(self, |t| Op::Transpose(t, dim1, dim2)); let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.transpose(dim1, dim2)?, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns a tensor with the same data as the input where the dimensions have been permuted. /// dims must be a permutation, i.e. include each dimension index exactly once. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::arange(0u32, 120u32, &Device::Cpu)?.reshape((2, 3, 4, 5))?; /// assert_eq!(tensor.dims(), &[2, 3, 4, 5]); /// let tensor = tensor.permute((2, 3, 1, 0))?; /// assert_eq!(tensor.dims(), &[4, 5, 3, 2]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn permute<D: Dims>(&self, dims: D) -> Result<Tensor> { let dims = dims.to_indexes(self.shape(), "permute")?; // O(n^2) permutation check but these arrays are small. let is_permutation = dims.len() == self.rank() && (0..dims.len()).all(|i| dims.contains(&i)); if !is_permutation { bail!( "dimension mismatch in permute, tensor {:?}, dims: {:?}", self.dims(), dims ) } let op = BackpropOp::new1(self, |t| Op::Permute(t, dims.clone())); let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.permute(&dims)?, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns true if the data is stored in a C contiguous (aka row major) way. pub fn is_contiguous(&self) -> bool { self.layout.is_contiguous() } /// Returns true if the data is stored in a Fortran contiguous (aka column major) way. pub fn is_fortran_contiguous(&self) -> bool { self.layout.is_fortran_contiguous() } /// Compared to clone, this copies the actual storage but may fail because of running out of /// memory. pub fn copy(&self) -> Result<Tensor> { let op = BackpropOp::new1(self, Op::Copy); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(self.storage().try_clone(self.layout())?)), layout: self.layout.clone(), op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns a new tensor detached from the current graph, gradient are not propagated through /// this new node. The storage of this tensor is shared with the initial tensor. /// /// If the tensor is already detached from the computation graph, the same tensor is returned. pub fn detach(&self) -> Result<Tensor> { if self.op.is_none() && !self.is_variable { Ok(self.clone()) } else { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.clone(), op: BackpropOp::none(), is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } } /// If the target device is the same as the tensor device, only a shallow copy is performed. pub fn to_device(&self, device: &Device) -> Result<Tensor> { if self.device().same_device(device) { Ok(self.clone()) } else { let storage = match (&*self.storage(), device) { (Storage::Cpu(storage), Device::Cuda(cuda)) => { Storage::Cuda(cuda.storage_from_cpu_storage(storage)?) } (Storage::Cpu(storage), Device::Metal(metal)) => { Storage::Metal(metal.storage_from_cpu_storage(storage)?) } (Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?), (Storage::Metal(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?), (Storage::Cuda(storage), Device::Cuda(cuda)) => { // TODO: Avoid passing through the cpu storage here, especially if the gpu ids // are the same. let cpu_storage = storage.to_cpu_storage()?; Storage::Cuda(cuda.storage_from_cpu_storage(&cpu_storage)?) } (Storage::Cpu(storage), Device::Cpu) => Storage::Cpu(storage.clone()), _ => { bail!("not implemented yet") } }; let op = BackpropOp::new1(self, Op::ToDevice); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(storage)), layout: self.layout.clone(), op, is_variable: false, dtype: self.dtype, device: device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } } /// Returns a new tensor duplicating data from the original tensor. New dimensions are inserted /// on the left. pub fn broadcast_left<S: Into<Shape>>(&self, left_shape: S) -> Result<Self> { let left_shape = left_shape.into(); let mut dims = left_shape.into_dims(); dims.extend(self.dims()); self.broadcast_as(dims) } /// Broadcast the input tensor to the target shape. This returns an error if the input shape is /// not compatible with the target shape. /// /// If the input shape is `i_1, i_2, ... i_k`, the target shape has to have `k` dimensions or /// more and shape `j_1, ..., j_l, t_1, t_2, ..., t_k`. The dimensions `j_1` to `j_l` can have /// any value, the dimension `t_a` must be equal to `i_a` if `i_a` is different from 1. If /// `i_a` is equal to 1, any value can be used. pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.broadcast_as(shape)?, op: BackpropOp::new1(self, Op::Broadcast), is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// An alias for broadcast_as. pub fn expand<S: Into<Shape>>(&self, shape: S) -> Result<Self> { self.broadcast_as(shape) } /// Casts the input tensor to the target `dtype`. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(3.14159265358979f64, &Device::Cpu)?; /// assert_eq!(tensor.to_scalar::<f64>()?, 3.14159265358979); /// let tensor = tensor.to_dtype(candle_core::DType::F32)?; /// assert_eq!(tensor.to_scalar::<f32>()?, 3.1415927); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn to_dtype(&self, dtype: DType) -> Result<Self> { if self.dtype() == dtype { Ok(self.clone()) } else { let shape = self.shape(); let storage = self.storage().to_dtype(self.layout(), dtype)?; let op = BackpropOp::new1(self, Op::ToDType); Ok(from_storage(storage, shape.clone(), op, false)) } } /// Returns a tensor that is in row major order. This is the same as the original tensor if it /// was already contiguous, otherwise a copy is triggered. pub fn contiguous(&self) -> Result<Tensor> { if self.is_contiguous() { Ok(self.clone()) } else { let shape = self.shape(); let mut storage = self.device().zeros(shape, self.dtype())?; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let op = BackpropOp::new1(self, Op::Copy); Ok(from_storage(storage, shape.clone(), op, false)) } } /// Create a variable based on the values currently stored in a tensor. The storage is always /// copied. pub(crate) fn make_var(&self) -> Result<Tensor> { let shape = self.shape().clone(); let mut storage = self.device().zeros(&shape, self.dtype())?; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; Ok(from_storage(storage, shape, BackpropOp::none(), true)) } /// Reshape returns a tensor with the target shape provided that the number of elements of the /// original tensor is the same. /// If the input tensor is contiguous, this is a view on the original data. Otherwise this uses /// a new storage and copies the data over, the returned tensor is always contiguous. /// /// The shape can be specified using a tuple of `usize` and at most one `()` in which case /// the behavior is the same as when using `-1` in PyTorch: this dimension size is adjusted so /// as to match the number of elements in the tensor. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = a.reshape((1, 6))?; /// assert_eq!(c.shape().dims(), &[1, 6]); /// /// let c = a.reshape((3, 2))?; /// assert_eq!(c.shape().dims(), &[3, 2]); /// /// let c = a.reshape((2, (), 1))?; /// assert_eq!(c.shape().dims(), &[2, 3, 1]); /// /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn reshape<S: crate::shape::ShapeWithOneHole>(&self, s: S) -> Result<Tensor> { let shape = s.into_shape(self.elem_count())?; if shape.elem_count() != self.elem_count() { return Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: shape, op: "reshape", } .bt()); } let op = BackpropOp::new1(self, Op::Reshape); if self.is_contiguous() { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: Layout::contiguous_with_offset(shape, self.layout.start_offset()), op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } else { let mut storage = self.device().zeros(&shape, self.dtype())?; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; Ok(from_storage(storage, shape, op, false)) } } /// Creates a new tensor with the specified dimension removed if its size was one. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3, 1), DType::F32, &Device::Cpu)?; /// /// let c = a.squeeze(2)?; /// assert_eq!(c.shape().dims(), &[2, 3]); /// /// let c = a.squeeze(D::Minus1)?; /// assert_eq!(c.shape().dims(), &[2, 3]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn squeeze<D: Dim>(&self, dim: D) -> Result<Self> { // The PyTorch semantics are to return the same tensor if the target dimension // does not have a size of 1. let dims = self.dims(); let dim = dim.to_index(self.shape(), "squeeze")?; if dims[dim] == 1 { let mut dims = dims.to_vec(); dims.remove(dim); self.reshape(dims) } else { Ok(self.clone()) } } /// Creates a new tensor with a dimension of size one inserted at the specified position. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = a.unsqueeze(0)?; /// assert_eq!(c.shape().dims(), &[1, 2, 3]); /// /// let c = a.unsqueeze(D::Minus1)?; /// assert_eq!(c.shape().dims(), &[2, 3, 1]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn unsqueeze<D: Dim>(&self, dim: D) -> Result<Self> { let mut dims = self.dims().to_vec(); let dim = dim.to_index_plus_one(self.shape(), "unsqueeze")?; // Cannot panic because to_index_plus_one already checks dimensions dims.insert(dim, 1); self.reshape(dims) } /// Stacks two or more tensors along a particular dimension. /// /// All tensors must have the same rank, and the output has one additional rank /// /// ```rust /// # use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = Tensor::stack(&[&a, &b], 0)?; /// assert_eq!(c.shape().dims(), &[2, 2, 3]); /// /// let c = Tensor::stack(&[&a, &b], 2)?; /// assert_eq!(c.shape().dims(), &[2, 3, 2]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn stack<A: AsRef<Tensor>, D: Dim>(args: &[A], dim: D) -> Result<Self> { if args.is_empty() { Err(Error::OpRequiresAtLeastOneTensor { op: "stack" }.bt())? } let dim = dim.to_index_plus_one(args[0].as_ref().shape(), "stack")?; let args = args .iter() .map(|t| t.as_ref().unsqueeze(dim)) .collect::<Result<Vec<_>>>()?; Self::cat(&args, dim) } /// Concatenates two or more tensors along a particular dimension. /// /// All tensors must of the same rank, and the output will have /// the same rank /// /// ```rust /// # use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = Tensor::cat(&[&a, &b], 0)?; /// assert_eq!(c.shape().dims(), &[4, 3]); /// /// let c = Tensor::cat(&[&a, &b], 1)?; /// assert_eq!(c.shape().dims(), &[2, 6]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn cat<A: AsRef<Tensor>, D: Dim>(args: &[A], dim: D) -> Result<Self> { if args.is_empty() { Err(Error::OpRequiresAtLeastOneTensor { op: "cat" }.bt())? } let arg0 = args[0].as_ref(); if args.len() == 1 { return Ok(arg0.clone()); } let dim = dim.to_index(arg0.shape(), "cat")?; for arg in args { arg.as_ref().check_dim(dim, "cat")?; } for (arg_idx, arg) in args.iter().enumerate() { let arg = arg.as_ref(); if arg0.rank() != arg.rank() { Err(Error::UnexpectedNumberOfDims { expected: arg0.rank(), got: arg.rank(), shape: arg.shape().clone(), } .bt())? } for (dim_idx, (v1, v2)) in arg0 .shape() .dims() .iter() .zip(arg.shape().dims().iter()) .enumerate() { if dim_idx != dim && v1 != v2 { Err(Error::ShapeMismatchCat { dim: dim_idx, first_shape: arg0.shape().clone(), n: arg_idx + 1, nth_shape: arg.shape().clone(), } .bt())? } } } if dim == 0 { Self::cat0(args) } else { // TODO: Avoid these transpositions and have an implementation that works // for dim != 0... let args: Vec<Tensor> = args .iter() .map(|a| a.as_ref().transpose(0, dim)) .collect::<Result<Vec<_>>>()?; let cat = Self::cat0(&args)?; cat.transpose(0, dim) } } fn cat0<A: AsRef<Tensor>>(args: &[A]) -> Result<Self> { if args.is_empty() { Err(Error::OpRequiresAtLeastOneTensor { op: "cat" }.bt())? } let arg0 = args[0].as_ref(); if args.len() == 1 { return Ok(arg0.clone()); } let rank = arg0.rank(); let device = arg0.device(); let dtype = arg0.dtype(); let first_dims = arg0.shape().dims(); let mut cat_dims = first_dims.to_vec(); cat_dims[0] = 0; let mut offsets = vec![0usize]; for (arg_idx, arg) in args.iter().enumerate() { let arg = arg.as_ref(); if arg.dtype() != dtype { Err(Error::DTypeMismatchBinaryOp { lhs: dtype, rhs: arg.dtype(), op: "cat", } .bt())? } if arg.device().location() != device.location() { Err(Error::DeviceMismatchBinaryOp { lhs: device.location(), rhs: arg.device().location(), op: "cat", } .bt())? } if rank != arg.rank() { Err(Error::UnexpectedNumberOfDims { expected: rank, got: arg.rank(), shape: arg.shape().clone(), } .bt())? } for (dim_idx, (v1, v2)) in arg0 .shape() .dims() .iter() .zip(arg.shape().dims().iter()) .enumerate() { if dim_idx == 0 { cat_dims[0] += v2; } if dim_idx != 0 && v1 != v2 { Err(Error::ShapeMismatchCat { dim: dim_idx, first_shape: arg0.shape().clone(), n: arg_idx + 1, nth_shape: arg.shape().clone(), } .bt())? } } let next_offset = offsets.last().unwrap() + arg.elem_count(); offsets.push(next_offset); } let shape = Shape::from(cat_dims); let op = BackpropOp::new(args, |args| Op::Cat(args, 0)); let mut storage = device.zeros(&shape, dtype)?; for (arg, &offset) in args.iter().zip(offsets.iter()) { let arg = arg.as_ref(); arg.storage() .copy_strided_src(&mut storage, offset, arg.layout())?; } Ok(from_storage(storage, shape, op, false)) } /// Pad the input tensor using 0s along dimension `dim`. This adds `left` elements before the /// input tensor values and `right` elements after. pub fn pad_with_zeros<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> { if left == 0 && right == 0 { Ok(self.clone()) } else if left == 0 { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = right; let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[self, &right], dim) } else if right == 0 { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = left; let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[&left, self], dim) } else { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = left; let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; dims[dim] = right; let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[&left, self, &right], dim) } } /// Pad the input tensor using same values along dimension `dim`. This adds `left` elements before the /// input tensor values and `right` elements after. pub fn pad_with_same<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> { if left == 0 && right == 0 { Ok(self.clone()) } else if self.elem_count() == 0 { bail!("cannot use pad_with_same on an empty tensor") } else if left == 0 { let dim = dim.to_index(self.shape(), "pad_with_same")?; let r = self.narrow(dim, self.dim(dim)? - 1, 1)?; let mut v = vec![self]; for _ in 0..right { v.push(&r) } Tensor::cat(&v, dim) } else if right == 0 { let dim = dim.to_index(self.shape(), "pad_with_same")?; let l = self.narrow(dim, 0, 1)?; let mut v = vec![]; for _ in 0..left { v.push(&l) } v.push(self); Tensor::cat(&v, dim) } else { let dim = dim.to_index(self.shape(), "pad_with_same")?; let l = self.narrow(dim, 0, 1)?; let r = self.narrow(dim, self.dim(dim)? - 1, 1)?; let mut v = vec![]; for _ in 0..left { v.push(&l) } v.push(self); for _ in 0..right { v.push(&r) } Tensor::cat(&v, dim) } } /// Run the `forward` method of `m` on `self`. pub fn apply<M: crate::Module>(&self, m: &M) -> Result<Self> { m.forward(self) } /// Run the `forward` method of `m` on `self`. pub fn apply_t<M: crate::ModuleT>(&self, m: &M, train: bool) -> Result<Self> { m.forward_t(self, train) } pub(crate) fn storage(&self) -> std::sync::RwLockReadGuard<'_, Storage> { self.storage.read().unwrap() } // If we extend the visibility of this function to be usable outside of this crate, we should // make it unsafe. pub(crate) fn storage_mut_and_layout( &self, ) -> (std::sync::RwLockWriteGuard<'_, Storage>, &Layout) { let storage = self.storage.write().unwrap(); (storage, &self.layout) } /// The storage used by this tensor, together with the layout to use to access it safely. pub fn storage_and_layout(&self) -> (std::sync::RwLockReadGuard<'_, Storage>, &Layout) { let storage = self.storage.read().unwrap(); (storage, &self.layout) } pub(crate) fn same_storage(&self, rhs: &Self) -> bool { let lhs: &RwLock<Storage> = self.storage.as_ref(); let rhs: &RwLock<Storage> = rhs.storage.as_ref(); std::ptr::eq(lhs, rhs) } /// Applies a unary custom op without backward support pub fn apply_op1_no_bwd<C: CustomOp1>(&self, c: &C) -> Result<Self> { let (storage, shape) = self.storage().apply_op1(self.layout(), c)?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a binary custom op without backward support pub fn apply_op2_no_bwd<C: CustomOp2>(&self, rhs: &Self, c: &C) -> Result<Self> { let (storage, shape) = self.storage() .apply_op2(self.layout(), &rhs.storage(), rhs.layout(), c)?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a ternary custom op without backward support pub fn apply_op3_no_bwd<C: CustomOp3>(&self, t2: &Self, t3: &Self, c: &C) -> Result<Self> { let (storage, shape) = self.storage().apply_op3( self.layout(), &t2.storage(), t2.layout(), &t3.storage(), t3.layout(), c, )?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a unary custom op. pub fn apply_op1_arc(&self, c: Arc<Box<dyn CustomOp1 + Send + Sync>>) -> Result<Self> { let (storage, shape) = self .storage() .apply_op1(self.layout(), c.as_ref().as_ref())?; let op = BackpropOp::new1(self, |s| Op::CustomOp1(s, c.clone())); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op1<C: 'static + CustomOp1 + Send + Sync>(&self, c: C) -> Result<Self> { self.apply_op1_arc(Arc::new(Box::new(c))) } /// Applies a binary custom op. pub fn apply_op2_arc( &self, rhs: &Self, c: Arc<Box<dyn CustomOp2 + Send + Sync>>, ) -> Result<Self> { let (storage, shape) = self.storage().apply_op2( self.layout(), &rhs.storage(), rhs.layout(), c.as_ref().as_ref(), )?; let op = BackpropOp::new2(self, rhs, |t1, t2| Op::CustomOp2(t1, t2, c.clone())); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op2<C: 'static + CustomOp2 + Send + Sync>(&self, r: &Self, c: C) -> Result<Self> { self.apply_op2_arc(r, Arc::new(Box::new(c))) } /// Applies a ternary custom op. pub fn apply_op3_arc( &self, t2: &Self, t3: &Self, c: Arc<Box<dyn CustomOp3 + Send + Sync>>, ) -> Result<Self> { let (storage, shape) = self.storage().apply_op3( self.layout(), &t2.storage(), t2.layout(), &t3.storage(), t3.layout(), c.as_ref().as_ref(), )?; let op = BackpropOp::new3(self, t2, t3, |t1, t2, t3| { Op::CustomOp3(t1, t2, t3, c.clone()) }); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op3<C: 'static + CustomOp3 + Send + Sync>( &self, t2: &Self, t3: &Self, c: C, ) -> Result<Self> { self.apply_op3_arc(t2, t3, Arc::new(Box::new(c))) } /// Normalize a 'relative' axis value: positive values are kept, negative /// values means counting the dimensions from the back. pub fn normalize_axis(&self, axis: i64) -> Result<usize> { let rank = self.rank() as i64; if rank <= axis { bail!("axis {axis} is too large, tensor rank {rank}") } else if 0 <= axis { Ok(axis as usize) } else { let naxis = rank + axis; if naxis < 0 { bail!("axis {axis} is too small, tensor rank {rank}") } Ok(naxis as usize) } } /// Returns a lower triangular matrix of ones of size n by n. pub fn tril2(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.le(&t2)?.to_dtype(dtype) } /// Returns an upper triangular matrix of ones of size n by n. pub fn triu2(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.ge(&t2)?.to_dtype(dtype) } /// Returns a matrix with a diagonal of ones of size n by n. pub fn eye(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.eq(&t2)?.to_dtype(dtype) } /// Returns the cumulative sum of elements of the input tensor summed over the specified /// dimension. /// /// This operation is most efficient when dim is the last dimension of the tensor. pub fn cumsum<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "cumsum")?; let rank = self.rank(); if rank == 0 { return Ok(self.clone()); } let n_axis = self.dim(dim)?; let triu = Tensor::triu2(n_axis, self.dtype(), self.device())?; if rank == 1 { self.unsqueeze(0)?.matmul(&triu)?.squeeze(0) } else { let last = rank - 1; let t = self.transpose(dim, last)?; let t = t.broadcast_matmul(&triu)?; t.transpose(dim, last) } } /// Returns a copy of `self` where the values within `ranges` have been replaced with the /// content of `src`. pub fn slice_assign<D: std::ops::RangeBounds<usize>>( &self, ranges: &[D], src: &Tensor, ) -> Result<Self> { let src_dims = src.dims(); let self_dims = self.dims(); if self_dims.len() != src_dims.len() { bail!( "slice-assign requires input with the same rank {} <> {}", self_dims.len(), src_dims.len() ) } if self_dims.len() != ranges.len() { bail!( "slice-assign requires input with the same rank as there are ranges {} <> {}", self_dims.len(), ranges.len() ) } let mut src = src.clone(); let mut mask = Self::ones(src.shape(), DType::U8, src.device())?; for (i, range) in ranges.iter().enumerate() { let start_included = match range.start_bound() { std::ops::Bound::Unbounded => 0, std::ops::Bound::Included(v) => *v, std::ops::Bound::Excluded(v) => *v + 1, }; let end_excluded = match range.end_bound() { std::ops::Bound::Unbounded => self_dims[i], std::ops::Bound::Included(v) => *v + 1, std::ops::Bound::Excluded(v) => *v, }; if end_excluded <= start_included { bail!("slice-assign: empty range for dim {i}, {start_included} {end_excluded}") } if self_dims[i] < end_excluded { bail!( "slice-assign: upper bound is out of range for dim {i}, {end_excluded} {}", self_dims[i] ) } if end_excluded - start_included != src_dims[i] { bail!( "slice-assign: the range for dim {i} ({start_included}..{end_excluded}) does not match the size of src {}", src_dims[i] ) } src = src.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)?; mask = mask.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)? } mask.where_cond(/* on_true= */ &src, /* on_false= */ self) } /// Returns log(sum(exp(tensor), dim)). pub fn log_sum_exp<D: Dims>(&self, sum_dims: D) -> Result<Self> { let exp = self.exp()?; let sum = exp.sum(sum_dims)?; sum.log() } /// Pointwise pow operation. pub fn pow(&self, rhs: &Tensor) -> Result<Self> { rhs.mul(&self.log()?)?.exp() } /// Broadcasting version of `pow`. pub fn broadcast_pow(&self, rhs: &Tensor) -> Result<Self> { rhs.broadcast_mul(&self.log()?)?.exp() } } macro_rules! bin_trait { ($trait:ident, $fn1:ident, $mul:expr, $add:expr) => { impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: B) -> Self::Output { Tensor::$fn1(&self, rhs.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: B) -> Self::Output { Tensor::$fn1(&self, rhs.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Tensor> for Result<B> { type Output = Result<Tensor>; fn $fn1(self, rhs: Tensor) -> Self::Output { Tensor::$fn1(self?.borrow(), &rhs) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<&Tensor> for Result<B> { type Output = Result<Tensor>; fn $fn1(self, rhs: &Tensor) -> Self::Output { Tensor::$fn1(self?.borrow(), rhs) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: Result<B>) -> Self::Output { Tensor::$fn1(&self, rhs?.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: Result<B>) -> Self::Output { Tensor::$fn1(&self, rhs?.borrow()) } } impl std::ops::$trait<f64> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: f64) -> Self::Output { self.affine($mul(rhs), $add(rhs)) } } impl std::ops::$trait<f64> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: f64) -> Self::Output { self.affine($mul(rhs), $add(rhs)) } } }; } bin_trait!(Add, add, |_| 1., |v| v); bin_trait!(Sub, sub, |_| 1., |v: f64| -v); bin_trait!(Mul, mul, |v| v, |_| 0.); bin_trait!(Div, div, |v| 1. / v, |_| 0.); impl std::ops::Add<Tensor> for f64 { type Output = Result<Tensor>; fn add(self, rhs: Tensor) -> Self::Output { rhs + self } } impl std::ops::Add<&Tensor> for f64 { type Output = Result<Tensor>; fn add(self, rhs: &Tensor) -> Self::Output { rhs + self } } impl std::ops::Mul<Tensor> for f64 { type Output = Result<Tensor>; fn mul(self, rhs: Tensor) -> Self::Output { rhs * self } } impl std::ops::Mul<&Tensor> for f64 { type Output = Result<Tensor>; fn mul(self, rhs: &Tensor) -> Self::Output { rhs * self } } impl std::ops::Sub<Tensor> for f64 { type Output = Result<Tensor>; fn sub(self, rhs: Tensor) -> Self::Output { rhs.affine(-1., self) } } impl std::ops::Sub<&Tensor> for f64 { type Output = Result<Tensor>; fn sub(self, rhs: &Tensor) -> Self::Output { rhs.affine(-1., self) } } impl std::ops::Div<Tensor> for f64 { type Output = Result<Tensor>; #[allow(clippy::suspicious_arithmetic_impl)] fn div(self, rhs: Tensor) -> Self::Output { rhs.recip()? * self } } impl std::ops::Div<&Tensor> for f64 { type Output = Result<Tensor>; #[allow(clippy::suspicious_arithmetic_impl)] fn div(self, rhs: &Tensor) -> Self::Output { rhs.recip()? * self } }
candle/candle-core/src/tensor.rs/0
{ "file_path": "candle/candle-core/src/tensor.rs", "repo_id": "candle", "token_count": 49489 }
15
# candle-starcoder: code generation model [StarCoder/BigCode](https://huggingface.co/bigcode/starcoderbase-1b) is a LLM model specialized to code generation. The initial model was trained on 80 programming languages. ## Running some example ```bash cargo run --example bigcode --release -- --prompt "fn fact(n: u64) -> u64 " > fn fact(n: u64) -> u64 { > if n == 0 { > 1 > } else { > n * fact(n - 1) > } > } ```
candle/candle-examples/examples/bigcode/README.md/0
{ "file_path": "candle/candle-examples/examples/bigcode/README.md", "repo_id": "candle", "token_count": 180 }
16
# candle-jina-bert Jina-Bert is a general large language model with a context size of 8192, [model card](https://huggingface.co/jinaai/jina-embeddings-v2-base-en). In this example it can be used for two different tasks: - Compute sentence embeddings for a prompt. - Compute similarities between a set of sentences. ## Sentence embeddings Jina-Bert is used to compute the sentence embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash cargo run --example jina-bert --release -- --prompt "Here is a test sentence" > [[[ 0.1595, -0.9885, 0.6494, ..., 0.3003, -0.6901, -1.2355], > [ 0.0374, -0.1798, 1.3359, ..., 0.6731, 0.2133, -1.6807], > [ 0.1700, -0.8534, 0.8924, ..., -0.1785, -0.0727, -1.5087], > ... > [-0.3113, -1.3665, 0.2027, ..., -0.2519, 0.1711, -1.5811], > [ 0.0907, -1.0492, 0.5382, ..., 0.0242, -0.7077, -1.0830], > [ 0.0369, -0.6343, 0.6105, ..., 0.0671, 0.3778, -1.1505]]] > Tensor[[1, 7, 768], f32] ``` ## Similarities In this example, Jina-Bert is used to compute the sentence embeddings for a set of sentences (hardcoded in the examples). Then cosine similarities are computed for each sentence pair and they are reported by decreasing values, hence the first reported pair contains the two sentences that have the highest similarity score. The sentence embeddings are computed using average pooling through all the sentence tokens, including some potential padding. ```bash cargo run --example jina-bert --release > score: 0.94 'The new movie is awesome' 'The new movie is so great' > score: 0.81 'The cat sits outside' 'The cat plays in the garden' > score: 0.78 'I love pasta' 'Do you like pizza?' > score: 0.68 'I love pasta' 'The new movie is awesome' > score: 0.67 'A man is playing guitar' 'A woman watches TV' ```
candle/candle-examples/examples/jina-bert/README.md/0
{ "file_path": "candle/candle-examples/examples/jina-bert/README.md", "repo_id": "candle", "token_count": 663 }
17
# candle-segment-anything: Segment-Anything Model This example is based on Meta AI [Segment-Anything Model](https://github.com/facebookresearch/segment-anything). This model provides a robust and fast image segmentation pipeline that can be tweaked via some prompting (requesting some points to be in the target mask, requesting some points to be part of the background so _not_ in the target mask, specifying some bounding box). The default backbone can be replaced by the smaller and faster TinyViT model based on [MobileSAM](https://github.com/ChaoningZhang/MobileSAM). ## Running some example. ```bash cargo run --example segment-anything --release -- \ --image candle-examples/examples/yolo-v8/assets/bike.jpg --use-tiny --point 0.6,0.6 --point 0.6,0.55 ``` Running this command generates a `sam_merged.jpg` file containing the original image with a blue overlay of the selected mask. The red dots represent the prompt specified by `--point 0.6,0.6 --point 0.6,0.55`, this prompt is assumed to be part of the target mask. The values used for `--point` should be a comma delimited pair of float values. They are proportional to the image dimension, i.e. use 0.5 for the image center. Original image: ![Leading group, Giro d'Italia 2021](../yolo-v8/assets/bike.jpg) Segment results by prompting with a single point `--point 0.6,0.55`: ![Leading group, Giro d'Italia 2021](./assets/single_pt_prompt.jpg) Segment results by prompting with multiple points `--point 0.6,0.6 --point 0.6,0.55`: ![Leading group, Giro d'Italia 2021](./assets/two_pt_prompt.jpg) ### Command-line flags - `--use-tiny`: use the TinyViT based MobileSAM backbone rather than the default one. - `--point`: specifies the location of the target points. - `--threshold`: sets the threshold value to be part of the mask, a negative value results in a larger mask and can be specified via `--threshold=-1.2`.
candle/candle-examples/examples/segment-anything/README.md/0
{ "file_path": "candle/candle-examples/examples/segment-anything/README.md", "repo_id": "candle", "token_count": 573 }
18
## VGG Model Implementation This example demonstrates the implementation of VGG models (VGG13, VGG16, VGG19) using the Candle library. The VGG models are defined in `candle-transformers/src/models/vgg.rs`. The main function in `candle-examples/examples/vgg/main.rs` loads an image, selects the VGG model based on the provided argument, and applies the model to the loaded image. You can run the example with the following command: ```bash cargo run --example vgg --release -- --image ../yolo-v8/assets/bike.jpg --which vgg13 ``` In the command above, `--image` specifies the path to the image file and `--which` specifies the VGG model to use (vgg13, vgg16, or vgg19).
candle/candle-examples/examples/vgg/README.md/0
{ "file_path": "candle/candle-examples/examples/vgg/README.md", "repo_id": "candle", "token_count": 200 }
19
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle_transformers::object_detection::{non_maximum_suppression, Bbox}; mod darknet; use anyhow::Result; use candle::{DType, Device, Tensor}; use candle_nn::{Module, VarBuilder}; use clap::Parser; use image::{DynamicImage, ImageBuffer}; // Assumes x1 <= x2 and y1 <= y2 pub fn draw_rect( img: &mut ImageBuffer<image::Rgb<u8>, Vec<u8>>, x1: u32, x2: u32, y1: u32, y2: u32, ) { for x in x1..=x2 { let pixel = img.get_pixel_mut(x, y1); *pixel = image::Rgb([255, 0, 0]); let pixel = img.get_pixel_mut(x, y2); *pixel = image::Rgb([255, 0, 0]); } for y in y1..=y2 { let pixel = img.get_pixel_mut(x1, y); *pixel = image::Rgb([255, 0, 0]); let pixel = img.get_pixel_mut(x2, y); *pixel = image::Rgb([255, 0, 0]); } } pub fn report( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, ) -> Result<DynamicImage> { let pred = pred.to_device(&Device::Cpu)?; let (npreds, pred_size) = pred.dims2()?; let nclasses = pred_size - 5; // The bounding boxes grouped by (maximum) class index. let mut bboxes: Vec<Vec<Bbox<()>>> = (0..nclasses).map(|_| vec![]).collect(); // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.get(index)?)?; let confidence = pred[4]; if confidence > confidence_threshold { let mut class_index = 0; for i in 0..nclasses { if pred[5 + i] > pred[5 + class_index] { class_index = i } } if pred[class_index + 5] > 0. { let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, data: (), }; bboxes[class_index].push(bbox) } } } non_maximum_suppression(&mut bboxes, nms_threshold); // Annotate the original image and print boxes information. let (initial_h, initial_w) = (img.height(), img.width()); let w_ratio = initial_w as f32 / w as f32; let h_ratio = initial_h as f32 / h as f32; let mut img = img.to_rgb8(); for (class_index, bboxes_for_class) in bboxes.iter().enumerate() { for b in bboxes_for_class.iter() { println!( "{}: {:?}", candle_examples::coco_classes::NAMES[class_index], b ); let xmin = ((b.xmin * w_ratio) as u32).clamp(0, initial_w - 1); let ymin = ((b.ymin * h_ratio) as u32).clamp(0, initial_h - 1); let xmax = ((b.xmax * w_ratio) as u32).clamp(0, initial_w - 1); let ymax = ((b.ymax * h_ratio) as u32).clamp(0, initial_h - 1); draw_rect(&mut img, xmin, xmax, ymin, ymax); } } Ok(DynamicImage::ImageRgb8(img)) } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Model weights, in safetensors format. #[arg(long)] model: Option<String>, #[arg(long)] config: Option<String>, images: Vec<String>, /// Threshold for the model confidence level. #[arg(long, default_value_t = 0.5)] confidence_threshold: f32, /// Threshold for non-maximum suppression. #[arg(long, default_value_t = 0.4)] nms_threshold: f32, } impl Args { fn config(&self) -> anyhow::Result<std::path::PathBuf> { let path = match &self.config { Some(config) => std::path::PathBuf::from(config), None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-yolo-v3".to_string()); api.get("yolo-v3.cfg")? } }; Ok(path) } fn model(&self) -> anyhow::Result<std::path::PathBuf> { let path = match &self.model { Some(model) => std::path::PathBuf::from(model), None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-yolo-v3".to_string()); api.get("yolo-v3.safetensors")? } }; Ok(path) } } pub fn main() -> Result<()> { let args = Args::parse(); // Create the model and load the weights from the file. let model = args.model()?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &Device::Cpu)? }; let config = args.config()?; let darknet = darknet::parse_config(config)?; let model = darknet.build_model(vb)?; for image_name in args.images.iter() { println!("processing {image_name}"); let mut image_name = std::path::PathBuf::from(image_name); // Load the image file and resize it. let net_width = darknet.width()?; let net_height = darknet.height()?; let original_image = image::io::Reader::open(&image_name)? .decode() .map_err(candle::Error::wrap)?; let image = { let data = original_image .resize_exact( net_width as u32, net_height as u32, image::imageops::FilterType::Triangle, ) .to_rgb8() .into_raw(); Tensor::from_vec(data, (net_width, net_height, 3), &Device::Cpu)?.permute((2, 0, 1))? }; let image = (image.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = model.forward(&image)?.squeeze(0)?; println!("generated predictions {predictions:?}"); let image = report( &predictions, original_image, net_width, net_height, args.confidence_threshold, args.nms_threshold, )?; image_name.set_extension("pp.jpg"); println!("writing {image_name:?}"); image.save(image_name)? } Ok(()) }
candle/candle-examples/examples/yolo-v3/main.rs/0
{ "file_path": "candle/candle-examples/examples/yolo-v3/main.rs", "repo_id": "candle", "token_count": 3180 }
20
#include <cmath> #include <cute/tensor.hpp> #include <cutlass/cutlass.h> #include <cutlass/array.h> #include "utils.h" namespace flash { using namespace cute; //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_causal, typename Engine, typename Layout> inline __device__ void apply_alibi(Tensor<Engine, Layout> &tensor, const int col_idx_offset_, const int max_seqlen_k, const int row_idx_offset, const int max_seqlen_q, const int warp_row_stride, const float alibi_slope) { // tensor has shape (ncol=(2, MMA_M), nrow=(2, MMA_N)) static_assert(Layout::rank == 2, "Only support 2D Tensor"); const int lane_id = threadIdx.x % 32; const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2; if constexpr (Is_causal) { // Simpler, we add the same bias vector to all rows #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { tensor(mi, make_coord(j, nj)) += alibi_slope * col_idx; } } } } else { // Bias depends on both row_idx and col_idx #pragma unroll for (int mi = 0; mi < size<0, 1>(tensor); ++mi) { const int row_idx_base = row_idx_offset + mi * warp_row_stride; #pragma unroll for (int i = 0; i < size<0, 0>(tensor); ++i) { const int row_idx = row_idx_base + i * 8; #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const int col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const int col_idx = col_idx_base + j; tensor(make_coord(i, mi), make_coord(j, nj)) -= alibi_slope * abs(row_idx + max_seqlen_k - max_seqlen_q - col_idx); } } } } } } } // namespace flash
candle/candle-flash-attn/kernels/alibi.h/0
{ "file_path": "candle/candle-flash-attn/kernels/alibi.h", "repo_id": "candle", "token_count": 1367 }
21
# candle-kernels This crate contains CUDA kernels used from candle. Some of these implementations come from the [dfdx crate](https://github.com/coreylowman/dfdx).
candle/candle-kernels/README.md/0
{ "file_path": "candle/candle-kernels/README.md", "repo_id": "candle", "token_count": 45 }
22
# candle-metal-kernels This crate contains Metal kernels used from candle.
candle/candle-metal-kernels/README.md/0
{ "file_path": "candle/candle-metal-kernels/README.md", "repo_id": "candle", "token_count": 18 }
23
use candle_metal_kernels::{call_cast_contiguous, Kernels}; use metal::objc::rc::autoreleasepool; use metal::{Device, MTLResourceOptions}; use rand; use std::any::type_name; use std::time::Instant; fn main() { let device = Device::system_default().unwrap(); let kernels = Kernels::new(); let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>(); let f32_10k = (0..10000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f32_100k = (0..100000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let contiguous_kernels = ["cast_u32_f32"]; println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}", "dtype", "kernel", "size", "runs", "total time", "avg time" ); // f32 run_cast_bench(&device, &kernels, &f32_1k, &contiguous_kernels); run_cast_bench(&device, &kernels, &f32_10k, &contiguous_kernels); run_cast_bench(&device, &kernels, &f32_100k, &contiguous_kernels); } fn run_cast_bench<T: Clone>( device: &Device, kernels: &Kernels, v: &[T], contiguous: &[&'static str], ) { let command_queue = device.new_command_queue(); let options = MTLResourceOptions::StorageModeManaged; let iterations = 1000; let input = device.new_buffer_with_data( v.as_ptr() as *const core::ffi::c_void, core::mem::size_of_val(v) as u64, options, ); let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options); // Contiguous for kernel_name in contiguous { let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_cast_contiguous( device, &command_buffer, kernels, kernel_name, v.len(), &input, &mut output, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), kernel_name.to_string(), v.len(), iterations, total_time, total_time / iterations ); } // Strided? }
candle/candle-metal-kernels/tmp/cast.rs/0
{ "file_path": "candle/candle-metal-kernels/tmp/cast.rs", "repo_id": "candle", "token_count": 1299 }
24
//! Linear layer //! //! This layer applies a linear transformation to the incoming data, `y = [email protected]() + b`. //! The bias is optional. The `forward` method can be used to apply the layer, it supports input //! with a batch dimension (so of shape `(b_sz, in_c)`) or without (of shape `(in_c,)`), the //! output has shape `(b_sz, out_c)` and `(out_c,)` respectively. //! //! ```rust //! use candle::{Tensor, Device::Cpu}; //! use candle_nn::{Linear, Module}; //! # fn main() -> candle::Result<()> { //! //! let w = Tensor::new(&[[1f32, 2.], [3., 4.], [5., 6.]], &Cpu)?; //! let layer = Linear::new(w, None); // Use no bias. //! let xs = Tensor::new(&[[10f32, 100.]], &Cpu)?; //! let ys = layer.forward(&xs)?; //! assert_eq!(ys.to_vec2::<f32>()?, &[[210.0, 430.0, 650.0]]); //! # Ok(()) } //! ``` use candle::{Result, Tensor}; #[derive(Clone, Debug)] pub struct Linear { weight: Tensor, bias: Option<Tensor>, } impl Linear { pub fn new(weight: Tensor, bias: Option<Tensor>) -> Self { Self { weight, bias } } pub fn weight(&self) -> &Tensor { &self.weight } pub fn bias(&self) -> Option<&Tensor> { self.bias.as_ref() } } impl super::Module for Linear { fn forward(&self, x: &Tensor) -> candle::Result<Tensor> { let w = match *x.dims() { [b1, b2, _, _] => self.weight.broadcast_left((b1, b2))?.t()?, [bsize, _, _] => self.weight.broadcast_left(bsize)?.t()?, _ => self.weight.t()?, }; let x = x.matmul(&w)?; match &self.bias { None => Ok(x), Some(bias) => x.broadcast_add(bias), } } } /// Create or initialize a new linear layer. /// /// This uses some default names for weights and biases, namely `"weight"` and `"bias"`. pub fn linear(in_dim: usize, out_dim: usize, vs: crate::VarBuilder) -> Result<Linear> { let init_ws = crate::init::DEFAULT_KAIMING_NORMAL; let ws = vs.get_with_hints((out_dim, in_dim), "weight", init_ws)?; let bound = 1. / (in_dim as f64).sqrt(); let init_bs = crate::Init::Uniform { lo: -bound, up: bound, }; let bs = vs.get_with_hints(out_dim, "bias", init_bs)?; Ok(Linear::new(ws, Some(bs))) } /// Create or initialize a new linear layer without biases. pub fn linear_no_bias(in_dim: usize, out_dim: usize, vs: crate::VarBuilder) -> Result<Linear> { let init_ws = crate::init::DEFAULT_KAIMING_NORMAL; let ws = vs.get_with_hints((out_dim, in_dim), "weight", init_ws)?; Ok(Linear::new(ws, None)) }
candle/candle-nn/src/linear.rs/0
{ "file_path": "candle/candle-nn/src/linear.rs", "repo_id": "candle", "token_count": 1120 }
25
[package] name = "candle-onnx" version = "0.3.3" edition = "2021" description = "ONNX support for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] candle = { path = "../candle-core", package = "candle-core", version = "0.3.3" } candle-nn = { path = "../candle-nn", version = "0.3.3" } prost = "0.12.1" [build-dependencies] prost-build = "0.12.1" [dev-dependencies] anyhow = { version = "1", features = ["backtrace"] } clap = { version = "4.2.4", features = ["derive"] }
candle/candle-onnx/Cargo.toml/0
{ "file_path": "candle/candle-onnx/Cargo.toml", "repo_id": "candle", "token_count": 242 }
26
# Generated content DO NOT EDIT from .. import functional avg_pool2d = functional.avg_pool2d gelu = functional.gelu max_pool2d = functional.max_pool2d relu = functional.relu silu = functional.silu softmax = functional.softmax tanh = functional.tanh
candle/candle-pyo3/py_src/candle/functional/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/functional/__init__.py", "repo_id": "candle", "token_count": 84 }
27
[project] name = 'candle-nn' requires-python = '>=3.7' authors = [ {name = 'The Candle Team'}, ] dynamic = [ 'description', 'license', 'readme', ] [project.urls] Homepage = 'https://github.com/huggingface/candle' Source = 'https://github.com/huggingface/candle' [build-system] requires = ["maturin>=1.0,<2.0"] build-backend = "maturin" [tool.maturin] python-source = "py_src" module-name = "candle.candle" bindings = 'pyo3' features = ["pyo3/extension-module"] [tool.black] line-length = 119 target-version = ['py35'] [project.optional-dependencies] testing = ["pytest", "black==22.3"] huggingface = ["transformers>=4.33.3", "huggingface-hub>=0.17.3"]
candle/candle-pyo3/pyproject.toml/0
{ "file_path": "candle/candle-pyo3/pyproject.toml", "repo_id": "candle", "token_count": 285 }
28
[package] name = "candle-transformers" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } byteorder = { workspace = true } candle = { workspace = true } candle-flash-attn = { workspace = true, optional = true } candle-nn = { workspace = true } intel-mkl-src = { workspace = true, optional = true } num-traits = { workspace = true } rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_plain = { workspace = true } tracing = { workspace = true } wav = { workspace = true } [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate"] cuda = ["candle/cuda", "candle-nn/cuda"] flash-attn = ["cuda", "dep:candle-flash-attn"] mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"] metal = ["candle/metal", "candle-nn/metal"]
candle/candle-transformers/Cargo.toml/0
{ "file_path": "candle/candle-transformers/Cargo.toml", "repo_id": "candle", "token_count": 368 }
29
use byteorder::{LittleEndian, ReadBytesExt}; use candle::{DType, Device, IndexOp, Result, Shape, Tensor}; use candle_nn::VarBuilder; use super::llama2_c::Config; pub struct TransformerWeights { // token embedding table token_embedding_table: Tensor, // (vocab_size, dim) // weights for rmsnorms rms_att_weight: Tensor, // (layer, dim) rmsnorm weights rms_ffn_weight: Tensor, // (layer, dim) // weights for matmuls wq: Tensor, // (layer, dim, dim) wk: Tensor, // (layer, dim, dim) wv: Tensor, // (layer, dim, dim) wo: Tensor, // (layer, dim, dim) // weights for ffn w1: Tensor, // (layer, hidden_dim, dim) w2: Tensor, // (layer, dim, hidden_dim) w3: Tensor, // (layer, hidden_dim, dim) // final rmsnorm rms_final_weight: Tensor, // (dim,) // freq_cis for RoPE relatively positional embeddings freq_cis_real: Tensor, // (seq_len, head_size/2) freq_cis_imag: Tensor, // (seq_len, head_size/2) } fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> { let mut buf = [0u8; 4]; r.read_exact(&mut buf)?; Ok(i32::from_le_bytes(buf)) } fn read_tensor<R: std::io::Read, S: Into<Shape>>( r: &mut R, shape: S, dev: &Device, ) -> Result<Tensor> { let shape = shape.into(); let mut data_t = vec![0f32; shape.elem_count()]; r.read_f32_into::<LittleEndian>(&mut data_t)?; let tensor = Tensor::from_vec(data_t, shape, dev)?; Ok(tensor) } impl Config { pub fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> { let dim = read_i32(r)? as usize; let hidden_dim = read_i32(r)? as usize; let n_layers = read_i32(r)? as usize; let n_heads = read_i32(r)? as usize; let n_kv_heads = read_i32(r)? as usize; let vocab_size = read_i32(r)? as usize; let seq_len = read_i32(r)? as usize; Ok(Self { dim, hidden_dim, n_layers, n_heads, n_kv_heads, vocab_size, seq_len, norm_eps: 1e-5, }) } pub fn head_size(&self) -> usize { self.dim / self.n_heads } } impl TransformerWeights { pub fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> { let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?; let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?; let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let rms_final_weight = read_tensor(r, c.dim, dev)?; let head_size = c.head_size(); let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?; let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?; Ok(Self { token_embedding_table, rms_att_weight, wq, wk, wv, wo, rms_ffn_weight, w1, w2, w3, rms_final_weight, freq_cis_real, freq_cis_imag, }) } pub fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder<'static>> { // TODO: As of 2023-08-04, gemm is slower than expected when multiplying a matrix of // size (1, k) with the transpose of a matrix of size (k, n) as it ends up transposing the // second matrix back. We detect this case here and as a temporary hack make the weight // matrix column major rather than row major. This ends up speeding up text generation from // 120 token/s to 220 token/s on a Ryzen 2600X. let tr = device.is_cpu() && !candle::utils::has_mkl(); let tr = |x: Tensor| if tr { x.t()?.contiguous()?.t() } else { Ok(x) }; let mut ws = std::collections::HashMap::new(); let mut insert = |name: &str, t: Tensor| { ws.insert(name.to_string(), t); }; insert("rot.freq_cis_real", self.freq_cis_real.clone()); insert("rot.freq_cis_imag", self.freq_cis_imag.clone()); insert( "model.embed_tokens.weight", self.token_embedding_table.clone(), ); insert("lm_head.weight", tr(self.token_embedding_table.clone())?); insert("model.norm.weight", self.rms_final_weight.clone()); for layer in 0..cfg.n_layers { ws.insert( format!("model.layers.{layer}.self_attn.q_proj.weight"), tr(self.wq.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.k_proj.weight"), tr(self.wk.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.v_proj.weight"), tr(self.wv.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.o_proj.weight"), tr(self.wo.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.gate_proj.weight"), tr(self.w1.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.down_proj.weight"), tr(self.w2.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.up_proj.weight"), tr(self.w3.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.input_layernorm.weight"), self.rms_att_weight.i(layer)?, ); ws.insert( format!("model.layers.{layer}.post_attention_layernorm.weight"), self.rms_ffn_weight.i(layer)?, ); } let vb = VarBuilder::from_tensors(ws, DType::F32, device); Ok(vb) } }
candle/candle-transformers/src/models/llama2_c_weights.rs/0
{ "file_path": "candle/candle-transformers/src/models/llama2_c_weights.rs", "repo_id": "candle", "token_count": 3322 }
30
use crate::quantized_nn::{layer_norm_no_bias, linear_no_bias, Embedding, Linear}; pub use crate::quantized_var_builder::VarBuilder; /// MPT model used by replit-code-v1_5-3b /// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py use candle::{IndexOp, Module, Result, Tensor, D}; use candle_nn::LayerNorm; pub use super::mpt::Config; #[derive(Debug, Clone)] struct GroupedQueryAttention { wqkv: Linear, out_proj: Linear, kv_cache: Option<(Tensor, Tensor)>, softmax_scale: f64, head_dim: usize, d_model: usize, n_heads: usize, kv_n_heads: usize, attn_bias: Tensor, span: tracing::Span, } impl GroupedQueryAttention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let head_dim = cfg.d_model / cfg.n_heads; let wqkv_size = cfg.d_model + 2 * cfg.kv_n_heads * head_dim; let wqkv = linear_no_bias(cfg.d_model, wqkv_size, vb.pp("Wqkv"))?; let softmax_scale = 1f64 / (head_dim as f64).sqrt(); let out_proj = linear_no_bias(cfg.d_model, cfg.d_model, vb.pp("out_proj"))?; let attn_bias = super::mpt::build_alibi_bias(cfg)?.to_device(vb.device())?; Ok(Self { wqkv, out_proj, kv_cache: None, softmax_scale, head_dim, d_model: cfg.d_model, n_heads: cfg.n_heads, kv_n_heads: cfg.kv_n_heads, attn_bias, span: tracing::span!(tracing::Level::TRACE, "gqa"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len, _n_embd) = xs.dims3()?; let qkv = self.wqkv.forward(xs)?; let query = qkv.narrow(2, 0, self.d_model)?; let kv_size = self.kv_n_heads * self.head_dim; let key = qkv.narrow(2, self.d_model, kv_size)?; let value = qkv.narrow(2, self.d_model + kv_size, kv_size)?; // scaled_multihead_dot_product_attention let query = query .reshape((b_size, seq_len, self.n_heads, ()))? .transpose(1, 2)?; // b,h,s,d let key = key .reshape((b_size, seq_len, self.kv_n_heads, ()))? .permute((0, 2, 3, 1))?; // b,h,d,s let value = value .reshape((b_size, seq_len, self.kv_n_heads, ()))? .transpose(1, 2)?; // b,h,s,d let (key, value) = match &self.kv_cache { None => (key, value), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key], 3)?; let v = Tensor::cat(&[prev_v, &value], 2)?; (k, v) } }; self.kv_cache = Some((key.clone(), value.clone())); let query = query.contiguous()?; let key = super::mpt::repeat_kv(key, self.n_heads / self.kv_n_heads)?.contiguous()?; let value = super::mpt::repeat_kv(value, self.n_heads / self.kv_n_heads)?.contiguous()?; let attn_weights = (query.matmul(&key)? * self.softmax_scale)?; let attn_bias = { let s_q = query.dim(D::Minus2)?; let s_k = key.dim(D::Minus1)?; let (_, _, a_q, a_k) = self.attn_bias.dims4()?; let start_q = a_q.saturating_sub(s_q); let start_k = a_k.saturating_sub(s_k); self.attn_bias.i((.., .., start_q.., start_k..))? }; let attn_weights = attn_weights.broadcast_add(&attn_bias)?; let attn_weights = match mask { None => attn_weights, Some(mask) => super::mpt::masked_fill( &attn_weights, &mask.broadcast_as(attn_weights.shape())?, f32::NEG_INFINITY, )?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights .matmul(&value)? .transpose(1, 2)? .flatten_from(D::Minus2)?; let out = attn_output.apply(&self.out_proj)?; Ok(out) } } #[derive(Debug, Clone)] struct Ffn { up_proj: Linear, down_proj: Linear, } impl Ffn { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden = cfg.d_model * cfg.expansion_ratio; let up_proj = linear_no_bias(cfg.d_model, hidden, vb.pp("up_proj"))?; let down_proj = linear_no_bias(hidden, cfg.d_model, vb.pp("down_proj"))?; Ok(Self { up_proj, down_proj }) } } impl Module for Ffn { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.up_proj)?.gelu_erf()?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct MPTBlock { norm1: LayerNorm, // Do we need the low-precision variant? attn: GroupedQueryAttention, norm2: LayerNorm, ffn: Ffn, } impl MPTBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let norm1 = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_1"))?; let norm2 = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_2"))?; let attn = GroupedQueryAttention::new(cfg, vb.pp("attn"))?; let ffn = Ffn::new(cfg, vb.pp("ffn"))?; Ok(Self { norm1, attn, norm2, ffn, }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.norm1)?; let xs = self.attn.forward(&xs, mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.norm2)?.apply(&self.ffn)?; xs + residual } } #[derive(Debug, Clone)] pub struct Model { wte: Embedding, blocks: Vec<MPTBlock>, norm_f: LayerNorm, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let wte = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("wte"))?; let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(cfg.n_layers); for i in 0..cfg.n_layers { let block = MPTBlock::new(cfg, vb_b.pp(i))?; blocks.push(block) } let norm_f = layer_norm_no_bias(cfg.d_model, 1e-5, vb.pp("norm_f"))?; Ok(Self { wte, blocks, norm_f, }) } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.wte)?; let mask = if seq_len <= 1 { None } else { Some(super::mpt::get_mask(seq_len, xs.device())?) }; for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())?; } let xs = xs.apply(&self.norm_f)?; let logits = xs .narrow(1, seq_len - 1, 1)? .squeeze(1)? .matmul(&self.wte.embeddings().t()?)? .squeeze(1)?; Ok(logits) } }
candle/candle-transformers/src/models/quantized_mpt.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_mpt.rs", "repo_id": "candle", "token_count": 3728 }
31
use candle::{Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug)] pub struct TimestepEmbedding { linear_1: nn::Linear, linear_2: nn::Linear, } impl TimestepEmbedding { // act_fn: "silu" pub fn new(vs: nn::VarBuilder, channel: usize, time_embed_dim: usize) -> Result<Self> { let linear_1 = nn::linear(channel, time_embed_dim, vs.pp("linear_1"))?; let linear_2 = nn::linear(time_embed_dim, time_embed_dim, vs.pp("linear_2"))?; Ok(Self { linear_1, linear_2 }) } } impl Module for TimestepEmbedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = nn::ops::silu(&self.linear_1.forward(xs)?)?; self.linear_2.forward(&xs) } } #[derive(Debug)] pub struct Timesteps { num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64, } impl Timesteps { pub fn new(num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64) -> Self { Self { num_channels, flip_sin_to_cos, downscale_freq_shift, } } } impl Module for Timesteps { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let half_dim = (self.num_channels / 2) as u32; let exponent = (Tensor::arange(0, half_dim, xs.device())?.to_dtype(candle::DType::F32)? * -f64::ln(10000.))?; let exponent = (exponent / (half_dim as f64 - self.downscale_freq_shift))?; let emb = exponent.exp()?.to_dtype(xs.dtype())?; // emb = timesteps[:, None].float() * emb[None, :] let emb = xs.unsqueeze(D::Minus1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let (cos, sin) = (emb.cos()?, emb.sin()?); let emb = if self.flip_sin_to_cos { Tensor::cat(&[&cos, &sin], D::Minus1)? } else { Tensor::cat(&[&sin, &cos], D::Minus1)? }; if self.num_channels % 2 == 1 { emb.pad_with_zeros(D::Minus2, 0, 1) } else { Ok(emb) } } }
candle/candle-transformers/src/models/stable_diffusion/embeddings.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/embeddings.rs", "repo_id": "candle", "token_count": 1008 }
32
use super::Config; use crate::models::with_tracing::{linear, linear_no_bias, Linear}; use candle::{Device, IndexOp, Result, Tensor, D}; use candle_nn::{embedding, Conv1d, Conv1dConfig, Embedding, LayerNorm, Module, VarBuilder}; fn conv1d( in_channels: usize, out_channels: usize, kernel_size: usize, config: Conv1dConfig, vb: VarBuilder, ) -> Result<Conv1d> { let weight = vb.get((out_channels, in_channels, kernel_size), "weight")?; let bias = vb.get(out_channels, "bias")?; Ok(Conv1d::new(weight, Some(bias), config)) } fn layer_norm(size: usize, vb: VarBuilder) -> Result<LayerNorm> { let weight = vb.get(size, "weight")?; let bias = vb.get(size, "bias")?; Ok(LayerNorm::new(weight, bias, 1e-5)) } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L62 #[derive(Debug, Clone)] struct MultiHeadAttention { query: Linear, key: Linear, value: Linear, out: Linear, n_head: usize, span: tracing::Span, softmax_span: tracing::Span, matmul_span: tracing::Span, kv_cache: Option<(Tensor, Tensor)>, } impl MultiHeadAttention { fn load(n_state: usize, n_head: usize, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "multi-head-attn"); let softmax_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-softmax"); let matmul_span = tracing::span!(tracing::Level::TRACE, "multi-head-attn-matmul"); let query = linear(n_state, n_state, vb.pp("q_proj"))?; let value = linear(n_state, n_state, vb.pp("v_proj"))?; let key = linear_no_bias(n_state, n_state, vb.pp("k_proj"))?; let out = linear(n_state, n_state, vb.pp("out_proj"))?; Ok(Self { query, key, value, out, n_head, span, softmax_span, matmul_span, kv_cache: None, }) } fn forward( &mut self, x: &Tensor, xa: Option<&Tensor>, mask: Option<&Tensor>, flush_cache: bool, ) -> Result<Tensor> { let _enter = self.span.enter(); let q = self.query.forward(x)?; let (k, v) = match xa { None => { let k = self.key.forward(x)?; let v = self.value.forward(x)?; (k, v) } Some(x) => { if flush_cache { self.kv_cache = None; } if let Some((k, v)) = &self.kv_cache { (k.clone(), v.clone()) } else { let k = self.key.forward(x)?; let v = self.value.forward(x)?; self.kv_cache = Some((k.clone(), v.clone())); (k, v) } } }; let wv = self.qkv_attention(&q, &k, &v, mask)?; let out = self.out.forward(&wv)?; Ok(out) } fn reshape_head(&self, x: &Tensor) -> Result<Tensor> { let (n_batch, n_ctx, n_state) = x.dims3()?; let target_dims = &[n_batch, n_ctx, self.n_head, n_state / self.n_head]; x.reshape(target_dims)?.transpose(1, 2) } fn qkv_attention( &self, q: &Tensor, k: &Tensor, v: &Tensor, mask: Option<&Tensor>, ) -> Result<Tensor> { let (_, n_ctx, n_state) = q.dims3()?; let scale = ((n_state / self.n_head) as f64).powf(-0.25); let q = (self.reshape_head(q)? * scale)?; let k = (self.reshape_head(k)?.transpose(2, 3)? * scale)?; let v = self.reshape_head(v)?.contiguous()?; let mut qk = { let _enter = self.matmul_span.enter(); q.matmul(&k)? }; if let Some(mask) = mask { let mask = mask.i((0..n_ctx, 0..n_ctx))?; qk = qk.broadcast_add(&mask)? } let w = { let _enter = self.softmax_span.enter(); candle_nn::ops::softmax_last_dim(&qk)? }; let wv = { let _enter = self.matmul_span.enter(); w.matmul(&v)? } .transpose(1, 2)? .flatten_from(2)?; Ok(wv) } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L111 #[derive(Debug, Clone)] struct ResidualAttentionBlock { attn: MultiHeadAttention, attn_ln: LayerNorm, cross_attn: Option<(MultiHeadAttention, LayerNorm)>, mlp_linear1: Linear, mlp_linear2: Linear, mlp_ln: LayerNorm, span: tracing::Span, } impl ResidualAttentionBlock { fn load(n_state: usize, n_head: usize, ca: bool, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "residual-attn"); let attn = MultiHeadAttention::load(n_state, n_head, vb.pp("self_attn"))?; let attn_ln = layer_norm(n_state, vb.pp("self_attn_layer_norm"))?; let cross_attn = if ca { let cross_attn = MultiHeadAttention::load(n_state, n_head, vb.pp("encoder_attn"))?; let cross_attn_ln = layer_norm(n_state, vb.pp("encoder_attn_layer_norm"))?; Some((cross_attn, cross_attn_ln)) } else { None }; let n_mlp = n_state * 4; let mlp_linear1 = linear(n_state, n_mlp, vb.pp("fc1"))?; let mlp_linear2 = linear(n_mlp, n_state, vb.pp("fc2"))?; let mlp_ln = layer_norm(n_state, vb.pp("final_layer_norm"))?; Ok(Self { attn, attn_ln, cross_attn, mlp_linear1, mlp_linear2, mlp_ln, span, }) } fn forward( &mut self, x: &Tensor, xa: Option<&Tensor>, mask: Option<&Tensor>, flush_kv_cache: bool, ) -> Result<Tensor> { let _enter = self.span.enter(); let attn = self .attn .forward(&self.attn_ln.forward(x)?, None, mask, flush_kv_cache)?; let mut x = (x + attn)?; if let Some((attn, ln)) = &mut self.cross_attn { x = (&x + attn.forward(&ln.forward(&x)?, xa, None, flush_kv_cache)?)?; } let mlp = self.mlp_linear2.forward( &self .mlp_linear1 .forward(&self.mlp_ln.forward(&x)?)? .gelu()?, )?; x + mlp } } fn sinusoids(length: usize, channels: usize) -> Result<Tensor> { let max_timescale = 10000f32; let log_timescale_increment = max_timescale.ln() / (channels / 2 - 1) as f32; let inv_timescales: Vec<_> = (0..channels / 2) .map(|i| (i as f32 * (-log_timescale_increment)).exp()) .collect(); let inv_timescales = Tensor::new(inv_timescales.as_slice(), &Device::Cpu)?.unsqueeze(0)?; let arange = Tensor::arange(0, length as u32, &Device::Cpu)? .to_dtype(candle::DType::F32)? .unsqueeze(1)?; let sh = (length, channels / 2); let scaled_time = (arange.broadcast_as(sh)? * inv_timescales.broadcast_as(sh)?)?; let sincos = Tensor::cat(&[scaled_time.sin()?, scaled_time.cos()?], 1)?; Ok(sincos) } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L143 #[derive(Debug, Clone)] pub struct AudioEncoder { conv1: Conv1d, conv2: Conv1d, positional_embedding: Tensor, blocks: Vec<ResidualAttentionBlock>, ln_post: LayerNorm, span: tracing::Span, conv1_span: tracing::Span, conv2_span: tracing::Span, } impl AudioEncoder { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "audio-encoder"); let conv1_span = tracing::span!(tracing::Level::TRACE, "conv1"); let conv2_span = tracing::span!(tracing::Level::TRACE, "conv2"); let n_state = cfg.d_model; let n_head = cfg.encoder_attention_heads; let n_ctx = cfg.max_source_positions; let cfg1 = Conv1dConfig { padding: 1, stride: 1, groups: 1, dilation: 1, }; let cfg2 = Conv1dConfig { padding: 1, stride: 2, groups: 1, dilation: 1, }; let conv1 = conv1d(cfg.num_mel_bins, n_state, 3, cfg1, vb.pp("conv1"))?; let conv2 = conv1d(n_state, n_state, 3, cfg2, vb.pp("conv2"))?; let positional_embedding = sinusoids(n_ctx, n_state)?.to_device(vb.device())?; let blocks = (0..cfg.encoder_layers) .map(|i| { ResidualAttentionBlock::load(n_state, n_head, false, vb.pp(&format!("layers.{i}"))) }) .collect::<Result<Vec<_>>>()?; let ln_post = layer_norm(n_state, vb.pp("layer_norm"))?; Ok(Self { conv1, conv2, positional_embedding, blocks, ln_post, conv1_span, conv2_span, span, }) } pub fn forward(&mut self, x: &Tensor, flush_kv_cache: bool) -> Result<Tensor> { let _enter = self.span.enter(); let x = { let _enter = self.conv1_span.enter(); self.conv1.forward(x)?.gelu()? }; let x = { let _enter = self.conv2_span.enter(); self.conv2.forward(&x)?.gelu()? }; let x = x.transpose(1, 2)?; let (_bsize, seq_len, _hidden) = x.dims3()?; let positional_embedding = self.positional_embedding.narrow(0, 0, seq_len)?; let mut x = x.broadcast_add(&positional_embedding)?; for block in self.blocks.iter_mut() { x = block.forward(&x, None, None, flush_kv_cache)? } let x = self.ln_post.forward(&x)?; Ok(x) } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L176 #[derive(Debug, Clone)] pub struct TextDecoder { token_embedding: Embedding, positional_embedding: Tensor, blocks: Vec<ResidualAttentionBlock>, ln: LayerNorm, mask: Tensor, span: tracing::Span, span_final: tracing::Span, } impl TextDecoder { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "text-decoder"); let span_final = tracing::span!(tracing::Level::TRACE, "text-decoder-final"); let n_state = cfg.d_model; let n_head = cfg.decoder_attention_heads; let n_ctx = cfg.max_target_positions; let token_embedding = embedding(cfg.vocab_size, n_state, vb.pp("embed_tokens"))?; let positional_embedding = vb.get((n_ctx, n_state), "embed_positions.weight")?; let blocks = (0..cfg.decoder_layers) .map(|i| { ResidualAttentionBlock::load(n_state, n_head, true, vb.pp(&format!("layers.{i}"))) }) .collect::<Result<Vec<_>>>()?; let ln = layer_norm(n_state, vb.pp("layer_norm"))?; let mask: Vec<_> = (0..n_ctx) .flat_map(|i| (0..n_ctx).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 })) .collect(); let mask = Tensor::from_vec(mask, (n_ctx, n_ctx), vb.device())?; Ok(Self { token_embedding, positional_embedding, blocks, ln, mask, span, span_final, }) } pub fn forward(&mut self, x: &Tensor, xa: &Tensor, flush_kv_cache: bool) -> Result<Tensor> { let _enter = self.span.enter(); let last = x.dim(D::Minus1)?; let token_embedding = self.token_embedding.forward(x)?; let positional_embedding = self.positional_embedding.narrow(0, 0, last)?; let mut x = token_embedding.broadcast_add(&positional_embedding)?; for block in self.blocks.iter_mut() { x = block.forward(&x, Some(xa), Some(&self.mask), flush_kv_cache)?; } self.ln.forward(&x) } pub fn final_linear(&self, x: &Tensor) -> Result<Tensor> { let b_size = x.dim(0)?; let w = self.token_embedding.embeddings().broadcast_left(b_size)?; let logits = { let _enter = self.span_final.enter(); x.matmul(&w.t()?)? }; Ok(logits) } } // https://github.com/openai/whisper/blob/f572f2161ba831bae131364c3bffdead7af6d210/whisper/model.py#L221 #[derive(Debug, Clone)] pub struct Whisper { pub encoder: AudioEncoder, pub decoder: TextDecoder, pub config: Config, } impl Whisper { pub fn load(vb: &VarBuilder, config: Config) -> Result<Self> { let encoder = AudioEncoder::load(vb.pp("model.encoder"), &config)?; let decoder = TextDecoder::load(vb.pp("model.decoder"), &config)?; Ok(Self { encoder, decoder, config, }) } }
candle/candle-transformers/src/models/whisper/model.rs/0
{ "file_path": "candle/candle-transformers/src/models/whisper/model.rs", "repo_id": "candle", "token_count": 6735 }
33
use candle::{Result, Tensor}; pub fn apply_repeat_penalty(logits: &Tensor, penalty: f32, context: &[u32]) -> Result<Tensor> { let device = logits.device(); let mut logits = logits.to_vec1::<f32>()?; let context: std::collections::HashSet<_> = context.iter().collect(); for (token_id, logit) in logits.iter_mut().enumerate() { if context.contains(&(token_id as u32)) { if *logit >= 0. { *logit /= penalty } else { *logit *= penalty } } } let logits_len = logits.len(); Tensor::from_vec(logits, logits_len, device) }
candle/candle-transformers/src/utils.rs/0
{ "file_path": "candle/candle-transformers/src/utils.rs", "repo_id": "candle", "token_count": 299 }
34
//load Candle Bert Module wasm module let init, ModelEncoder; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Encoder { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelEncoder } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelEncoder } = await import("./build/m.js")); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelEncoder( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, } = event.data; try { self.postMessage({ status: "ready", message: "Starting T5 Encoder" }); const model = await Encoder.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "encoding", message: "Encoding Sentences", }); const output = model.decode({ sentences: sentences, normalize_embeddings: normalize_embeddings || true, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js/0
{ "file_path": "candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js", "repo_id": "candle", "token_count": 873 }
35
use candle_wasm_example_whisper::worker::{Decoder as D, ModelData}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Decoder { decoder: D, } #[wasm_bindgen] impl Decoder { #[wasm_bindgen(constructor)] #[allow(clippy::too_many_arguments)] pub fn new( weights: Vec<u8>, tokenizer: Vec<u8>, mel_filters: Vec<u8>, config: Vec<u8>, quantized: bool, is_multilingual: bool, timestamps: bool, task: Option<String>, language: Option<String>, ) -> Result<Decoder, JsError> { let decoder = D::load(ModelData { tokenizer, mel_filters, config, quantized, weights, is_multilingual, timestamps, task, language, }); match decoder { Ok(decoder) => Ok(Self { decoder }), Err(e) => Err(JsError::new(&e.to_string())), } } #[wasm_bindgen] pub fn decode(&mut self, wav_input: Vec<u8>) -> Result<String, JsError> { let segments = self .decoder .convert_and_run(&wav_input) .map_err(|e| JsError::new(&e.to_string()))?; let json = serde_json::to_string(&segments)?; Ok(json) } } fn main() {}
candle/candle-wasm-examples/whisper/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/bin/m.rs", "repo_id": "candle", "token_count": 694 }
36
mod app; pub mod coco_classes; pub mod model; pub mod worker; pub use app::App; pub use worker::Worker;
candle/candle-wasm-examples/yolo/src/lib.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/lib.rs", "repo_id": "candle", "token_count": 37 }
37
{ "useTabs": true, "trailingComma": "es5", "printWidth": 100, "plugins": ["prettier-plugin-svelte", "prettier-plugin-tailwindcss"], "pluginSearchDirs": ["."], "overrides": [{ "files": "*.svelte", "options": { "parser": "svelte" } }] }
chat-ui/.prettierrc/0
{ "file_path": "chat-ui/.prettierrc", "repo_id": "chat-ui", "token_count": 104 }
38
<!DOCTYPE html> <html lang="en" class="h-full"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no" /> <meta name="theme-color" content="rgb(249, 250, 251)" /> <script> if ( localStorage.theme === "dark" || (!("theme" in localStorage) && window.matchMedia("(prefers-color-scheme: dark)").matches) ) { document.documentElement.classList.add("dark"); document .querySelector('meta[name="theme-color"]') .setAttribute("content", "rgb(26, 36, 50)"); } // For some reason, Sveltekit doesn't let us load env variables from .env here, so we load it from hooks.server.ts window.gaId = "%gaId%"; </script> %sveltekit.head% </head> <body data-sveltekit-preload-data="hover" class="h-full dark:bg-gray-900"> <div id="app" class="contents h-full">%sveltekit.body%</div> <!-- Google Tag Manager --> <script> if (window.gaId) { const script = document.createElement("script"); script.src = "https://www.googletagmanager.com/gtag/js?id=" + window.gaId; script.async = true; document.head.appendChild(script); window.dataLayer = window.dataLayer || []; function gtag() { dataLayer.push(arguments); } gtag("js", new Date()); /// ^ See https://developers.google.com/tag-platform/gtagjs/install gtag("config", window.gaId); gtag("consent", "default", { ad_storage: "denied", analytics_storage: "denied" }); /// ^ See https://developers.google.com/tag-platform/gtagjs/reference#consent /// TODO: ask the user for their consent and update this with gtag('consent', 'update') } </script> </body> </html>
chat-ui/src/app.html/0
{ "file_path": "chat-ui/src/app.html", "repo_id": "chat-ui", "token_count": 677 }
39
<script lang="ts"> import { base } from "$app/paths"; import Logo from "$lib/components/icons/Logo.svelte"; import { switchTheme } from "$lib/switchTheme"; import { isAborted } from "$lib/stores/isAborted"; import { PUBLIC_APP_NAME, PUBLIC_ORIGIN } from "$env/static/public"; import NavConversationItem from "./NavConversationItem.svelte"; import type { LayoutData } from "../../routes/$types"; import type { ConvSidebar } from "$lib/types/ConvSidebar"; import { page } from "$app/stores"; export let conversations: ConvSidebar[] = []; export let canLogin: boolean; export let user: LayoutData["user"]; function handleNewChatClick() { isAborted.set(true); } const dateRanges = [ new Date().setDate(new Date().getDate() - 1), new Date().setDate(new Date().getDate() - 7), new Date().setMonth(new Date().getMonth() - 1), ]; $: groupedConversations = { today: conversations.filter(({ updatedAt }) => updatedAt.getTime() > dateRanges[0]), week: conversations.filter( ({ updatedAt }) => updatedAt.getTime() > dateRanges[1] && updatedAt.getTime() < dateRanges[0] ), month: conversations.filter( ({ updatedAt }) => updatedAt.getTime() > dateRanges[2] && updatedAt.getTime() < dateRanges[1] ), older: conversations.filter(({ updatedAt }) => updatedAt.getTime() < dateRanges[2]), }; const titles: { [key: string]: string } = { today: "Today", week: "This week", month: "This month", older: "Older", } as const; </script> <div class="sticky top-0 flex flex-none items-center justify-between px-3 py-3.5 max-sm:pt-0"> <a class="flex items-center rounded-xl text-lg font-semibold" href="{PUBLIC_ORIGIN}{base}/"> <Logo classNames="mr-1" /> {PUBLIC_APP_NAME} </a> <a href={`${base}/`} on:click={handleNewChatClick} class="flex rounded-lg border bg-white px-2 py-0.5 text-center shadow-sm hover:shadow-none dark:border-gray-600 dark:bg-gray-700" > New Chat </a> </div> <div class="scrollbar-custom flex flex-col gap-1 overflow-y-auto rounded-r-xl from-gray-50 px-3 pb-3 pt-2 max-sm:bg-gradient-to-t md:bg-gradient-to-l dark:from-gray-800/30" > {#each Object.entries(groupedConversations) as [group, convs]} {#if convs.length} <h4 class="mb-1.5 mt-4 pl-0.5 text-sm text-gray-400 first:mt-0 dark:text-gray-500"> {titles[group]} </h4> {#each convs as conv} <NavConversationItem on:editConversationTitle on:deleteConversation {conv} /> {/each} {/if} {/each} </div> <div class="mt-0.5 flex flex-col gap-1 rounded-r-xl p-3 text-sm md:bg-gradient-to-l md:from-gray-50 md:dark:from-gray-800/30" > {#if user?.username || user?.email} <form action="{base}/logout" method="post" class="group flex items-center gap-1.5 rounded-lg pl-2.5 pr-2 hover:bg-gray-100 dark:hover:bg-gray-700" > <span class="flex h-9 flex-none shrink items-center gap-1.5 truncate pr-2 text-gray-500 dark:text-gray-400" >{user?.username || user?.email}</span > <button type="submit" class="ml-auto h-6 flex-none items-center gap-1.5 rounded-md border bg-white px-2 text-gray-700 shadow-sm group-hover:flex hover:shadow-none md:hidden dark:border-gray-600 dark:bg-gray-600 dark:text-gray-400 dark:hover:text-gray-300" > Sign Out </button> </form> {/if} {#if canLogin} <form action="{base}/login" method="POST" target="_parent"> <button type="submit" class="flex h-9 w-full flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Login </button> </form> {/if} <button on:click={switchTheme} type="button" class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Theme </button> {#if $page.data.enableAssistants} <a href="{base}/assistants" class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Assistants <span class="ml-auto rounded-full border border-gray-300 px-2 py-0.5 text-xs text-gray-500 dark:border-gray-500 dark:text-gray-400" >New</span > </a> {/if} <a href="{base}/settings" class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > Settings </a> {#if PUBLIC_APP_NAME === "HuggingChat"} <a href="{base}/privacy" class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700" > About & Privacy </a> {/if} </div>
chat-ui/src/lib/components/NavMenu.svelte/0
{ "file_path": "chat-ui/src/lib/components/NavMenu.svelte", "repo_id": "chat-ui", "token_count": 1944 }
40
<script lang="ts"> import type { Message } from "$lib/types/Message"; import { snapScrollToBottom } from "$lib/actions/snapScrollToBottom"; import ScrollToBottomBtn from "$lib/components/ScrollToBottomBtn.svelte"; import { tick } from "svelte"; import { randomUUID } from "$lib/utils/randomUuid"; import type { Model } from "$lib/types/Model"; import ChatIntroduction from "./ChatIntroduction.svelte"; import ChatMessage from "./ChatMessage.svelte"; import type { WebSearchUpdate } from "$lib/types/MessageUpdate"; import { browser } from "$app/environment"; import SystemPromptModal from "../SystemPromptModal.svelte"; import type { Assistant } from "$lib/types/Assistant"; import AssistantIntroduction from "./AssistantIntroduction.svelte"; import { page } from "$app/stores"; import { base } from "$app/paths"; export let messages: Message[]; export let loading: boolean; export let pending: boolean; export let isAuthor: boolean; export let currentModel: Model; export let assistant: Assistant | undefined; export let models: Model[]; export let preprompt: string | undefined; export let readOnly: boolean; let chatContainer: HTMLElement; export let webSearchMessages: WebSearchUpdate[] = []; async function scrollToBottom() { await tick(); chatContainer.scrollTop = chatContainer.scrollHeight; } // If last message is from user, scroll to bottom $: if (browser && messages[messages.length - 1]?.from === "user") { scrollToBottom(); } </script> <div class="scrollbar-custom mr-1 h-full overflow-y-auto" use:snapScrollToBottom={messages.length ? [...messages, ...webSearchMessages] : false} bind:this={chatContainer} > <div class="mx-auto flex h-full max-w-3xl flex-col gap-6 px-5 pt-6 sm:gap-8 xl:max-w-4xl"> {#each messages as message, i} {#if i === 0 && $page.data?.assistant} <a class="mx-auto flex items-center gap-1.5 rounded-full border border-gray-100 bg-gray-50 py-1 pl-1 pr-3 text-sm text-gray-800 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-200 dark:hover:bg-gray-700" href="{base}/settings/assistants/{$page.data.assistant._id}" > {#if $page.data?.assistant.avatar} <img src="{base}/settings/assistants/{$page.data?.assistant._id.toString()}/avatar.jpg?hash=${$page .data.assistant.avatar}" alt="Avatar" class="size-5 rounded-full object-cover" /> {:else} <div class="flex size-6 items-center justify-center rounded-full bg-gray-300 font-bold uppercase text-gray-500" > {$page.data?.assistant.name[0]} </div> {/if} {$page.data.assistant.name} </a> {:else if i === 0 && preprompt && preprompt != currentModel.preprompt} <SystemPromptModal preprompt={preprompt ?? ""} /> {/if} <ChatMessage loading={loading && i === messages.length - 1} {message} {isAuthor} {readOnly} model={currentModel} webSearchMessages={i === messages.length - 1 ? webSearchMessages : []} on:retry on:vote on:continue /> {:else} {#if !assistant} <ChatIntroduction {models} {currentModel} on:message /> {:else} <AssistantIntroduction {assistant} on:message /> {/if} {/each} {#if pending && messages[messages.length - 1]?.from === "user"} <ChatMessage message={{ from: "assistant", content: "", id: randomUUID() }} model={currentModel} {webSearchMessages} /> {/if} <div class="h-44 flex-none" /> </div> <ScrollToBottomBtn class="bottom-36 right-4 max-md:hidden lg:right-10" scrollNode={chatContainer} /> </div>
chat-ui/src/lib/components/chat/ChatMessages.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatMessages.svelte", "repo_id": "chat-ui", "token_count": 1392 }
41
import { z } from "zod"; import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints"; import { chunk } from "$lib/utils/chunk"; export const embeddingEndpointTeiParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("tei"), url: z.string().url(), authorization: z.string().optional(), }); const getModelInfoByUrl = async (url: string, authorization?: string) => { const { origin } = new URL(url); const response = await fetch(`${origin}/info`, { headers: { Accept: "application/json", "Content-Type": "application/json", ...(authorization ? { Authorization: authorization } : {}), }, }); const json = await response.json(); return json; }; export async function embeddingEndpointTei( input: z.input<typeof embeddingEndpointTeiParametersSchema> ): Promise<EmbeddingEndpoint> { const { url, model, authorization } = embeddingEndpointTeiParametersSchema.parse(input); const { max_client_batch_size, max_batch_tokens } = await getModelInfoByUrl(url); const maxBatchSize = Math.min( max_client_batch_size, Math.floor(max_batch_tokens / model.chunkCharLength) ); return async ({ inputs }) => { const { origin } = new URL(url); const batchesInputs = chunk(inputs, maxBatchSize); const batchesResults = await Promise.all( batchesInputs.map(async (batchInputs) => { const response = await fetch(`${origin}/embed`, { method: "POST", headers: { Accept: "application/json", "Content-Type": "application/json", ...(authorization ? { Authorization: authorization } : {}), }, body: JSON.stringify({ inputs: batchInputs, normalize: true, truncate: true }), }); const embeddings: Embedding[] = await response.json(); return embeddings; }) ); const flatAllEmbeddings = batchesResults.flat(); return flatAllEmbeddings; }; }
chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 664 }
42
import { LLM_SUMMERIZATION } from "$env/static/private"; import { generateFromDefaultEndpoint } from "$lib/server/generateFromDefaultEndpoint"; import type { Message } from "$lib/types/Message"; export async function summarize(prompt: string) { if (!LLM_SUMMERIZATION) { return prompt.split(/\s+/g).slice(0, 5).join(" "); } const messages: Array<Omit<Message, "id">> = [ { from: "user", content: "Who is the president of Gabon?" }, { from: "assistant", content: "🇬🇦 President of Gabon" }, { from: "user", content: "Who is Julien Chaumond?" }, { from: "assistant", content: "🧑 Julien Chaumond" }, { from: "user", content: "what is 1 + 1?" }, { from: "assistant", content: "🔢 Simple math operation" }, { from: "user", content: "What are the latest news?" }, { from: "assistant", content: "📰 Latest news" }, { from: "user", content: "How to make a great cheesecake?" }, { from: "assistant", content: "🍰 Cheesecake recipe" }, { from: "user", content: "what is your favorite movie? do a short answer." }, { from: "assistant", content: "🎥 Favorite movie" }, { from: "user", content: "Explain the concept of artificial intelligence in one sentence" }, { from: "assistant", content: "🤖 AI definition" }, { from: "user", content: prompt }, ]; return await generateFromDefaultEndpoint({ messages, preprompt: `You are a summarization AI. You'll never answer a user's question directly, but instead summarize the user's request into a single short sentence of four words or less. Always start your answer with an emoji relevant to the summary.`, }) .then((summary) => { // add an emoji if none is found in the first three characters if (!/\p{Emoji}/u.test(summary.slice(0, 3))) { return "💬 " + summary; } return summary; }) .catch((e) => { console.error(e); return null; }); }
chat-ui/src/lib/server/summarize.ts/0
{ "file_path": "chat-ui/src/lib/server/summarize.ts", "repo_id": "chat-ui", "token_count": 638 }
43
export interface ConvSidebar { id: string; title: string; updatedAt: Date; model?: string; assistantId?: string; avatarHash?: string; }
chat-ui/src/lib/types/ConvSidebar.ts/0
{ "file_path": "chat-ui/src/lib/types/ConvSidebar.ts", "repo_id": "chat-ui", "token_count": 50 }
44
/** * Chunk array into arrays of length at most `chunkSize` * * @param chunkSize must be greater than or equal to 1 */ export function chunk<T extends unknown[] | string>(arr: T, chunkSize: number): T[] { if (isNaN(chunkSize) || chunkSize < 1) { throw new RangeError("Invalid chunk size: " + chunkSize); } if (!arr.length) { return []; } /// Small optimization to not chunk buffers unless needed if (arr.length <= chunkSize) { return [arr]; } return range(Math.ceil(arr.length / chunkSize)).map((i) => { return arr.slice(i * chunkSize, (i + 1) * chunkSize); }) as T[]; } function range(n: number, b?: number): number[] { return b ? Array(b - n) .fill(0) .map((_, i) => n + i) : Array(n) .fill(0) .map((_, i) => i); }
chat-ui/src/lib/utils/chunk.ts/0
{ "file_path": "chat-ui/src/lib/utils/chunk.ts", "repo_id": "chat-ui", "token_count": 295 }
45
export const timeout = <T>(prom: Promise<T>, time: number): Promise<T> => { let timer: NodeJS.Timeout; return Promise.race([prom, new Promise<T>((_r, rej) => (timer = setTimeout(rej, time)))]).finally( () => clearTimeout(timer) ); };
chat-ui/src/lib/utils/timeout.ts/0
{ "file_path": "chat-ui/src/lib/utils/timeout.ts", "repo_id": "chat-ui", "token_count": 87 }
46
import type { RequestHandler } from "./$types"; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { error, redirect } from "@sveltejs/kit"; import { base } from "$app/paths"; import { z } from "zod"; import type { Message } from "$lib/types/Message"; import { models, validateModel } from "$lib/server/models"; import { defaultEmbeddingModel } from "$lib/server/embeddingModels"; export const POST: RequestHandler = async ({ locals, request }) => { const body = await request.text(); let title = ""; let messages: Message[] = []; const values = z .object({ fromShare: z.string().optional(), model: validateModel(models), assistantId: z.string().optional(), preprompt: z.string().optional(), }) .parse(JSON.parse(body)); let embeddingModel: string; if (values.fromShare) { const conversation = await collections.sharedConversations.findOne({ _id: values.fromShare, }); if (!conversation) { throw error(404, "Conversation not found"); } title = conversation.title; messages = conversation.messages; values.model = conversation.model; values.preprompt = conversation.preprompt; values.assistantId = conversation.assistantId?.toString(); embeddingModel = conversation.embeddingModel; } const model = models.find((m) => m.name === values.model); if (!model) { throw error(400, "Invalid model"); } embeddingModel ??= model.embeddingModel ?? defaultEmbeddingModel.name; if (model.unlisted) { throw error(400, "Can't start a conversation with an unlisted model"); } // Use the model preprompt if there is no conversation/preprompt in the request body const preprompt = await (async () => { if (values.assistantId) { const assistant = await collections.assistants.findOne({ _id: new ObjectId(values.assistantId), }); return assistant?.preprompt; } else { return values?.preprompt ?? model?.preprompt; } })(); const res = await collections.conversations.insertOne({ _id: new ObjectId(), title: title || "New Chat", messages, model: values.model, preprompt: preprompt === model?.preprompt ? model?.preprompt : preprompt, assistantId: values.assistantId ? new ObjectId(values.assistantId) : undefined, createdAt: new Date(), updatedAt: new Date(), embeddingModel, ...(locals.user ? { userId: locals.user._id } : { sessionId: locals.sessionId }), ...(values.fromShare ? { meta: { fromShareId: values.fromShare } } : {}), }); return new Response( JSON.stringify({ conversationId: res.insertedId.toString(), }), { headers: { "Content-Type": "application/json" } } ); }; export const GET: RequestHandler = async () => { throw redirect(302, `${base}/`); };
chat-ui/src/routes/conversation/+server.ts/0
{ "file_path": "chat-ui/src/routes/conversation/+server.ts", "repo_id": "chat-ui", "token_count": 920 }
47
import { redirect } from "@sveltejs/kit"; export const load = async ({ params }) => { throw redirect(302, "../conversation/" + params.id); };
chat-ui/src/routes/r/[id]/+page.ts/0
{ "file_path": "chat-ui/src/routes/r/[id]/+page.ts", "repo_id": "chat-ui", "token_count": 46 }
48
@import "./highlight-js.css"; @tailwind base; @tailwind components; @tailwind utilities; @layer components { .btn { @apply inline-flex flex-shrink-0 cursor-pointer select-none items-center justify-center whitespace-nowrap outline-none transition-all focus:ring disabled:cursor-default; } } @layer utilities { .scrollbar-custom { @apply scrollbar-thin scrollbar-track-transparent scrollbar-thumb-black/10 scrollbar-thumb-rounded-full scrollbar-w-1 hover:scrollbar-thumb-black/20 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20; } }
chat-ui/src/styles/main.css/0
{ "file_path": "chat-ui/src/styles/main.css", "repo_id": "chat-ui", "token_count": 189 }
49
{ "extends": "./.svelte-kit/tsconfig.json", "compilerOptions": { "allowJs": true, "checkJs": true, "esModuleInterop": true, "forceConsistentCasingInFileNames": true, "resolveJsonModule": true, "skipLibCheck": true, "sourceMap": true, "strict": true, "target": "ES2018" } // Path aliases are handled by https://kit.svelte.dev/docs/configuration#alias // // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes // from the referenced tsconfig.json - TypeScript does not merge them in }
chat-ui/tsconfig.json/0
{ "file_path": "chat-ui/tsconfig.json", "repo_id": "chat-ui", "token_count": 197 }
50
{ "license": "Apache-2.0", "creators": [ { "affiliation": "Hugging Face", "name": "Quentin Lhoest" }, { "orcid": "0000-0003-1727-1045", "affiliation": "Hugging Face", "name": "Albert Villanova del Moral" }, { "affiliation": "Hugging Face", "name": "Patrick von Platen" }, { "affiliation": "Hugging Face", "name": "Thomas Wolf" }, { "affiliation": "Hugging Face", "name": "Mario Šaško" }, { "affiliation": "Hugging Face", "name": "Yacine Jernite" }, { "affiliation": "Hugging Face", "name": "Abhishek Thakur" }, { "affiliation": "Hugging Face", "name": "Lewis Tunstall" }, { "affiliation": "Hugging Face", "name": "Suraj Patil" }, { "affiliation": "Hugging Face", "name": "Mariama Drame" }, { "affiliation": "Hugging Face", "name": "Julien Chaumond" }, { "affiliation": "Hugging Face", "name": "Julien Plu" }, { "affiliation": "Hugging Face", "name": "Joe Davison" }, { "affiliation": "Hugging Face", "name": "Simon Brandeis" }, { "affiliation": "Hugging Face", "name": "Victor Sanh" }, { "affiliation": "Hugging Face", "name": "Teven Le Scao" }, { "affiliation": "Hugging Face", "name": "Kevin Canwen Xu" }, { "affiliation": "Hugging Face", "name": "Nicolas Patry" }, { "affiliation": "Hugging Face", "name": "Steven Liu" }, { "affiliation": "Hugging Face", "name": "Angelina McMillan-Major" }, { "affiliation": "Hugging Face", "name": "Philipp Schmid" }, { "affiliation": "Hugging Face", "name": "Sylvain Gugger" }, { "affiliation": "Hugging Face", "name": "Nathan Raw" }, { "affiliation": "Hugging Face", "name": "Sylvain Lesage" }, { "affiliation": "Hugging Face", "name": "Anton Lozhkov" }, { "affiliation": "Hugging Face", "name": "Matthew Carrigan" }, { "affiliation": "Hugging Face", "name": "Th\u00e9o Matussi\u00e8re" }, { "affiliation": "Hugging Face", "name": "Leandro von Werra" }, { "affiliation": "Hugging Face", "name": "Lysandre Debut" }, { "affiliation": "Hugging Face", "name": "Stas Bekman" }, { "affiliation": "Hugging Face", "name": "Cl\u00e9ment Delangue" } ] }
datasets/.zenodo.json/0
{ "file_path": "datasets/.zenodo.json", "repo_id": "datasets", "token_count": 1953 }
51
import json import sys def format_json_to_md(input_json_file, output_md_file): with open(input_json_file, encoding="utf-8") as f: results = json.load(f) output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(results): benchmark_res = results[benchmark_name] benchmark_file_name = benchmark_name.split("/")[-1] output_md.append(f"### Benchmark: {benchmark_file_name}") title = "| metric |" lines = "|--------|" value = "| new / old (diff) |" for metric_name in sorted(benchmark_res): metric_vals = benchmark_res[metric_name] new_val = metric_vals["new"] old_val = metric_vals.get("old", None) dif_val = metric_vals.get("diff", None) val_str = f" {new_val:f}" if isinstance(new_val, (int, float)) else "None" if old_val is not None: val_str += f" / {old_val:f}" if isinstance(old_val, (int, float)) else "None" if dif_val is not None: val_str += f" ({dif_val:f})" if isinstance(dif_val, (int, float)) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>") with open(output_md_file, "w", encoding="utf-8") as f: f.writelines("\n".join(output_md)) if __name__ == "__main__": input_json_file = sys.argv[1] output_md_file = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
datasets/benchmarks/format.py/0
{ "file_path": "datasets/benchmarks/format.py", "repo_id": "datasets", "token_count": 746 }
52
# Batch mapping Combining the utility of [`Dataset.map`] with batch mode is very powerful. It allows you to speed up processing, and freely control the size of the generated dataset. ## Need for speed The primary objective of batch mapping is to speed up processing. Often times, it is faster to work with batches of data instead of single examples. Naturally, batch mapping lends itself to tokenization. For example, the 🤗 [Tokenizers](https://huggingface.co/docs/tokenizers/python/latest/) library works faster with batches because it parallelizes the tokenization of all the examples in a batch. ## Input size != output size The ability to control the size of the generated dataset can be leveraged for many interesting use-cases. In the How-to [map](#map) section, there are examples of using batch mapping to: - Split long sentences into shorter chunks. - Augment a dataset with additional tokens. It is helpful to understand how this works, so you can come up with your own ways to use batch mapping. At this point, you may be wondering how you can control the size of the generated dataset. The answer is: **the mapped function does not have to return an output batch of the same size**. In other words, your mapped function input can be a batch of size `N` and return a batch of size `M`. The output `M` can be greater than or less than `N`. This means you can concatenate your examples, divide it up, and even add more examples! However, remember that all values in the output dictionary must contain the **same number of elements** as the other fields in the output dictionary. Otherwise, it is not possible to define the number of examples in the output returned by the mapped function. The number can vary between successive batches processed by the mapped function. For a single batch though, all values of the output dictionary should have the same length (i.e., the number of elements). For example, from a dataset of 1 column and 3 rows, if you use `map` to return a new column with twice as many rows, then you will have an error. In this case, you end up with one column with 3 rows, and one column with 6 rows. As you can see, the table will not be valid: ```py >>> from datasets import Dataset >>> dataset = Dataset.from_dict({"a": [0, 1, 2]}) >>> dataset.map(lambda batch: {"b": batch["a"] * 2}, batched=True) # new column with 6 elements: [0, 1, 2, 0, 1, 2] 'ArrowInvalid: Column 1 named b expected length 3 but got length 6' ``` To make it valid, you have to drop one of the columns: ```py >>> from datasets import Dataset >>> dataset = Dataset.from_dict({"a": [0, 1, 2]}) >>> dataset_with_duplicates = dataset.map(lambda batch: {"b": batch["a"] * 2}, remove_columns=["a"], batched=True) >>> len(dataset_with_duplicates) 6 ```
datasets/docs/source/about_map_batch.mdx/0
{ "file_path": "datasets/docs/source/about_map_batch.mdx", "repo_id": "datasets", "token_count": 722 }
53
# Metrics <Tip warning={true}> Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. </Tip> Metrics are important for evaluating a model's predictions. In the tutorial, you learned how to compute a metric over an entire evaluation set. You have also seen how to load a metric. This guide will show you how to: - Add predictions and references. - Compute metrics using different methods. - Write your own metric loading script. ## Add predictions and references When you want to add model predictions and references to a [`Metric`] instance, you have two options: - [`Metric.add`] adds a single `prediction` and `reference`. - [`Metric.add_batch`] adds a batch of `predictions` and `references`. Use [`Metric.add_batch`] by passing it your model predictions, and the references the model predictions should be evaluated against: ```py >>> import datasets >>> metric = datasets.load_metric('my_metric') >>> for model_input, gold_references in evaluation_dataset: ... model_predictions = model(model_inputs) ... metric.add_batch(predictions=model_predictions, references=gold_references) >>> final_score = metric.compute() ``` <Tip> Metrics accepts various input formats (Python lists, NumPy arrays, PyTorch tensors, etc.) and converts them to an appropriate format for storage and computation. </Tip> ## Compute scores The most straightforward way to calculate a metric is to call [`Metric.compute`]. But some metrics have additional arguments that allow you to modify the metrics behavior. Let's load the [SacreBLEU](https://huggingface.co/metrics/sacrebleu) metric, and compute it with a different smoothing method. 1. Load the SacreBLEU metric: ```py >>> import datasets >>> metric = datasets.load_metric('sacrebleu') ``` 2. Inspect the different argument methods for computing the metric: ```py >>> print(metric.inputs_description) Produces BLEU scores along with its sufficient statistics from a source against one or more references. Args: predictions: The system stream (a sequence of segments). references: A list of one or more reference streams (each a sequence of segments). smooth_method: The smoothing method to use. (Default: 'exp'). smooth_value: The smoothing value. Only valid for 'floor' and 'add-k'. (Defaults: floor: 0.1, add-k: 1). tokenize: Tokenization method to use for BLEU. If not provided, defaults to 'zh' for Chinese, 'ja-mecab' for Japanese and '13a' (mteval) otherwise. lowercase: Lowercase the data. If True, enables case-insensitivity. (Default: False). force: Insist that your tokenized input is actually detokenized. ... ``` 3. Compute the metric with the `floor` method, and a different `smooth_value`: ```py >>> score = metric.compute(smooth_method="floor", smooth_value=0.2) ``` <a id='metric_script'></a> ## Custom metric loading script Write a metric loading script to use your own custom metric (or one that is not on the Hub). Then you can load it as usual with [`load_metric`]. To help you get started, open the [SQuAD metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/squad/squad.py) and follow along. <Tip> Get jump started with our metric loading script [template](https://github.com/huggingface/datasets/blob/f9713d2e23813142a02f1b0e965095f528785cff/templates/new_metric_script.py)! </Tip> ### Add metric attributes Start by adding some information about your metric in [`Metric._info`]. The most important attributes you should specify are: 1. [`MetricInfo.description`] provides a brief description about your metric. 2. [`MetricInfo.citation`] contains a BibTex citation for the metric. 3. [`MetricInfo.inputs_description`] describes the expected inputs and outputs. It may also provide an example usage of the metric. 4. [`MetricInfo.features`] defines the name and type of the predictions and references. After you've filled out all these fields in the template, it should look like the following example from the SQuAD metric script: ```py class Squad(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")}, "references": { "id": datasets.Value("string"), "answers": datasets.features.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), }, } ), codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], ) ``` ### Download metric files If your metric needs to download, or retrieve local files, you will need to use the [`Metric._download_and_prepare`] method. For this example, let's examine the [BLEURT metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/bleurt/bleurt.py). 1. Provide a dictionary of URLs that point to the metric files: ```py CHECKPOINT_URLS = { "bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip", "bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip", "bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip", "bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip", "bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip", "bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip", } ``` <Tip> If the files are stored locally, provide a dictionary of path(s) instead of URLs. </Tip> 2. [`Metric._download_and_prepare`] will take the URLs and download the metric files specified: ```py def _download_and_prepare(self, dl_manager): # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." ) self.config_name = "bleurt-base-128" if self.config_name not in CHECKPOINT_URLS.keys(): raise KeyError( f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" ) # download the model checkpoint specified by self.config_name and set up the scorer model_path = dl_manager.download_and_extract(CHECKPOINT_URLS[self.config_name]) self.scorer = score.BleurtScorer(os.path.join(model_path, self.config_name)) ``` ### Compute score [`DatasetBuilder._compute`] provides the actual instructions for how to compute a metric given the predictions and references. Now let's take a look at the [GLUE metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/glue/glue.py). 1. Provide the functions for [`DatasetBuilder._compute`] to calculate your metric: ```py def simple_accuracy(preds, labels): return (preds == labels).mean().item() def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds).item() return { "accuracy": acc, "f1": f1, } def pearson_and_spearman(preds, labels): pearson_corr = pearsonr(preds, labels)[0].item() spearman_corr = spearmanr(preds, labels)[0].item() return { "pearson": pearson_corr, "spearmanr": spearman_corr, } ``` 2. Create [`DatasetBuilder._compute`] with instructions for what metric to calculate for each configuration: ```py def _compute(self, predictions, references): if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(references, predictions)} elif self.config_name == "stsb": return pearson_and_spearman(predictions, references) elif self.config_name in ["mrpc", "qqp"]: return acc_and_f1(predictions, references) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(predictions, references)} else: raise KeyError( "You should supply a configuration name selected in " '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) ``` ### Test Once you're finished writing your metric loading script, try to load it locally: ```py >>> from datasets import load_metric >>> metric = load_metric('PATH/TO/MY/SCRIPT.py') ```
datasets/docs/source/how_to_metrics.mdx/0
{ "file_path": "datasets/docs/source/how_to_metrics.mdx", "repo_id": "datasets", "token_count": 3350 }
54
# Loading methods Methods for listing and loading datasets and metrics: ## Datasets [[autodoc]] datasets.list_datasets [[autodoc]] datasets.load_dataset [[autodoc]] datasets.load_from_disk [[autodoc]] datasets.load_dataset_builder [[autodoc]] datasets.get_dataset_config_names [[autodoc]] datasets.get_dataset_infos [[autodoc]] datasets.get_dataset_split_names [[autodoc]] datasets.inspect_dataset ## Metrics <Tip warning={true}> Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. </Tip> [[autodoc]] datasets.list_metrics [[autodoc]] datasets.load_metric [[autodoc]] datasets.inspect_metric ## From files Configurations used to load data files. They are used when loading local files or a dataset repository: - local files: `load_dataset("parquet", data_dir="path/to/data/dir")` - dataset repository: `load_dataset("allenai/c4")` You can pass arguments to `load_dataset` to configure data loading. For example you can specify the `sep` parameter to define the [`~datasets.packaged_modules.csv.CsvConfig`] that is used to load the data: ```python load_dataset("csv", data_dir="path/to/data/dir", sep="\t") ``` ### Text [[autodoc]] datasets.packaged_modules.text.TextConfig [[autodoc]] datasets.packaged_modules.text.Text ### CSV [[autodoc]] datasets.packaged_modules.csv.CsvConfig [[autodoc]] datasets.packaged_modules.csv.Csv ### JSON [[autodoc]] datasets.packaged_modules.json.JsonConfig [[autodoc]] datasets.packaged_modules.json.Json ### Parquet [[autodoc]] datasets.packaged_modules.parquet.ParquetConfig [[autodoc]] datasets.packaged_modules.parquet.Parquet ### Arrow [[autodoc]] datasets.packaged_modules.arrow.ArrowConfig [[autodoc]] datasets.packaged_modules.arrow.Arrow ### SQL [[autodoc]] datasets.packaged_modules.sql.SqlConfig [[autodoc]] datasets.packaged_modules.sql.Sql ### Images [[autodoc]] datasets.packaged_modules.imagefolder.ImageFolderConfig [[autodoc]] datasets.packaged_modules.imagefolder.ImageFolder ### Audio [[autodoc]] datasets.packaged_modules.audiofolder.AudioFolderConfig [[autodoc]] datasets.packaged_modules.audiofolder.AudioFolder ### WebDataset [[autodoc]] datasets.packaged_modules.webdataset.WebDataset
datasets/docs/source/package_reference/loading_methods.mdx/0
{ "file_path": "datasets/docs/source/package_reference/loading_methods.mdx", "repo_id": "datasets", "token_count": 809 }
55
# Use with JAX This document is a quick introduction to using `datasets` with JAX, with a particular focus on how to get `jax.Array` objects out of our datasets, and how to use them to train JAX models. <Tip> `jax` and `jaxlib` are required to reproduce to code above, so please make sure you install them as `pip install datasets[jax]`. </Tip> ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc., and string and binary objects are unchanged, since JAX only supports numbers. To get JAX arrays (numpy-like) instead, you can set the format of the dataset to `jax`: ```py >>> from datasets import Dataset >>> data = [[1, 2], [3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("jax") >>> ds[0] {'data': DeviceArray([1, 2], dtype=int32)} >>> ds[:2] {'data': DeviceArray([ [1, 2], [3, 4]], dtype=int32)} ``` <Tip> A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to JAX arrays. </Tip> Note that the exact same procedure applies to `DatasetDict` objects, so that when setting the format of a `DatasetDict` to `jax`, all the `Dataset`s there will be formatted as `jax`: ```py >>> from datasets import DatasetDict >>> data = {"train": {"data": [[1, 2], [3, 4]]}, "test": {"data": [[5, 6], [7, 8]]}} >>> dds = DatasetDict.from_dict(data) >>> dds = dds.with_format("jax") >>> dds["train"][:2] {'data': DeviceArray([ [1, 2], [3, 4]], dtype=int32)} ``` Another thing you'll need to take into consideration is that the formatting is not applied until you actually access the data. So if you want to get a JAX array out of a dataset, you'll need to access the data first, otherwise the format will remain the same. Finally, to load the data in the device of your choice, you can specify the `device` argument, but note that `jaxlib.xla_extension.Device` is not supported as it's not serializable with neither `pickle` not `dill`, so you'll need to use its string identifier instead: ```py >>> import jax >>> from datasets import Dataset >>> data = [[1, 2], [3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> device = str(jax.devices()[0]) # Not casting to `str` before passing it to `with_format` will raise a `ValueError` >>> ds = ds.with_format("jax", device=device) >>> ds[0] {'data': DeviceArray([1, 2], dtype=int32)} >>> ds[0]["data"].device() TFRT_CPU_0 >>> assert ds[0]["data"].device() == jax.devices()[0] True ``` Note that if the `device` argument is not provided to `with_format` then it will use the default device which is `jax.devices()[0]`. ## N-dimensional arrays If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists. In particular, a JAX formatted dataset outputs a `DeviceArray` object, which is a numpy-like array, so it does not need the [`Array`] feature type to be specified as opposed to PyTorch or TensorFlow formatters. ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3, 4]], [[5, 6],[7, 8]]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("jax") >>> ds[0] {'data': DeviceArray([[1, 2], [3, 4]], dtype=int32)} ``` ## Other feature types [`ClassLabel`] data is properly converted to arrays: ```py >>> from datasets import Dataset, Features, ClassLabel >>> labels = [0, 0, 1] >>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) >>> ds = Dataset.from_dict({"label": labels}, features=features) >>> ds = ds.with_format("jax") >>> ds[:3] {'label': DeviceArray([0, 0, 1], dtype=int32)} ``` String and binary objects are unchanged, since JAX only supports numbers. The [`Image`] and [`Audio`] feature types are also supported. <Tip> To use the [`Image`] feature type, you'll need to install the `vision` extra as `pip install datasets[vision]`. </Tip> ```py >>> from datasets import Dataset, Features, Image >>> images = ["path/to/image.png"] * 10 >>> features = Features({"image": Image()}) >>> ds = Dataset.from_dict({"image": images}, features=features) >>> ds = ds.with_format("jax") >>> ds[0]["image"].shape (512, 512, 3) >>> ds[0] {'image': DeviceArray([[[ 255, 255, 255], [ 255, 255, 255], ..., [ 255, 255, 255], [ 255, 255, 255]]], dtype=uint8)} >>> ds[:2]["image"].shape (2, 512, 512, 3) >>> ds[:2] {'image': DeviceArray([[[[ 255, 255, 255], [ 255, 255, 255], ..., [ 255, 255, 255], [ 255, 255, 255]]]], dtype=uint8)} ``` <Tip> To use the [`Audio`] feature type, you'll need to install the `audio` extra as `pip install datasets[audio]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio >>> audio = ["path/to/audio.wav"] * 10 >>> features = Features({"audio": Audio()}) >>> ds = Dataset.from_dict({"audio": audio}, features=features) >>> ds = ds.with_format("jax") >>> ds[0]["audio"]["array"] DeviceArray([-0.059021 , -0.03894043, -0.00735474, ..., 0.0133667 , 0.01809692, 0.00268555], dtype=float32) >>> ds[0]["audio"]["sampling_rate"] DeviceArray(44100, dtype=int32, weak_type=True) ``` ## Data loading JAX doesn't have any built-in data loading capabilities, so you'll need to use a library such as [PyTorch](https://pytorch.org/) to load your data using a `DataLoader` or [TensorFlow](https://www.tensorflow.org/) using a `tf.data.Dataset`. Citing the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Neural_Network_and_Data_Loading.html#data-loading-with-pytorch) on this topic: "JAX is laser-focused on program transformations and accelerator-backed NumPy, so we don’t include data loading or munging in the JAX library. There are already a lot of great data loaders out there, so let’s just use them instead of reinventing anything. We’ll grab PyTorch’s data loader, and make a tiny shim to make it work with NumPy arrays.". So that's the reason why JAX-formatting in `datasets` is so useful, because it lets you use any model from the HuggingFace Hub with JAX, without having to worry about the data loading part. ### Using `with_format('jax')` The easiest way to get JAX arrays out of a dataset is to use the `with_format('jax')` method. Lets assume that we want to train a neural network on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) available at the HuggingFace Hub at https://huggingface.co/datasets/mnist. ```py >>> from datasets import load_dataset >>> ds = load_dataset("mnist") >>> ds = ds.with_format("jax") >>> ds["train"][0] {'image': DeviceArray([[ 0, 0, 0, ...], [ 0, 0, 0, ...], ..., [ 0, 0, 0, ...], [ 0, 0, 0, ...]], dtype=uint8), 'label': DeviceArray(5, dtype=int32)} ``` Once the format is set we can feed the dataset to the JAX model in batches using the `Dataset.iter()` method: ```py >>> for epoch in range(epochs): ... for batch in ds["train"].iter(batch_size=32): ... x, y = batch["image"], batch["label"] ... ... ```
datasets/docs/source/use_with_jax.mdx/0
{ "file_path": "datasets/docs/source/use_with_jax.mdx", "repo_id": "datasets", "token_count": 2646 }
56
# Copyright 2021 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Chrf(++) metric as available in sacrebleu. """ import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets _CITATION = """\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } """ _DESCRIPTION = """\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. """ _KWARGS_DESCRIPTION = """ Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: 'score' (float): The chrF (chrF++) score, 'char_order' (int): The character n-gram order, 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, 'beta' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class ChrF(datasets.Metric): def _info(self): if version.parse(scb.__version__) < version.parse("1.4.12"): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"), } ), codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"], reference_urls=[ "https://github.com/m-popovic/chrF", ], ) def _compute( self, predictions, references, char_order: int = CHRF.CHAR_ORDER, word_order: int = CHRF.WORD_ORDER, beta: int = CHRF.BETA, lowercase: bool = False, whitespace: bool = False, eps_smoothing: bool = False, ): references_per_prediction = len(references[0]) if any(len(refs) != references_per_prediction for refs in references): raise ValueError("Sacrebleu requires the same number of references for each prediction") transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)] sb_chrf = CHRF(char_order, word_order, beta, lowercase, whitespace, eps_smoothing) output = sb_chrf.corpus_score(predictions, transformed_references) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
datasets/metrics/chrf/chrf.py/0
{ "file_path": "datasets/metrics/chrf/chrf.py", "repo_id": "datasets", "token_count": 3169 }
57
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """F1 metric.""" from sklearn.metrics import f1_score import datasets _DESCRIPTION = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ _KWARGS_DESCRIPTION = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ _CITATION = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class F1(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32"), "references": datasets.Value("int32"), } ), reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"], ) def _compute(self, predictions, references, labels=None, pos_label=1, average="binary", sample_weight=None): score = f1_score( references, predictions, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight ) return {"f1": float(score) if score.size == 1 else score}
datasets/metrics/f1/f1.py/0
{ "file_path": "datasets/metrics/f1/f1.py", "repo_id": "datasets", "token_count": 2364 }
58
# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MAUVE metric from https://github.com/krishnap25/mauve. """ import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _CITATION = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ _DESCRIPTION = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ _KWARGS_DESCRIPTION = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: "c" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Mauve(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage="https://github.com/krishnap25/mauve", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), } ), codebase_urls=["https://github.com/krishnap25/mauve"], reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ], ) def _compute( self, predictions, references, p_features=None, q_features=None, p_tokens=None, q_tokens=None, num_buckets="auto", pca_max_data=-1, kmeans_explained_var=0.9, kmeans_num_redo=5, kmeans_max_iter=500, featurize_model_name="gpt2-large", device_id=-1, max_text_length=1024, divergence_curve_discretization_size=25, mauve_scaling_factor=5, verbose=True, seed=25, ): out = compute_mauve( p_text=predictions, q_text=references, p_features=p_features, q_features=q_features, p_tokens=p_tokens, q_tokens=q_tokens, num_buckets=num_buckets, pca_max_data=pca_max_data, kmeans_explained_var=kmeans_explained_var, kmeans_num_redo=kmeans_num_redo, kmeans_max_iter=kmeans_max_iter, featurize_model_name=featurize_model_name, device_id=device_id, max_text_length=max_text_length, divergence_curve_discretization_size=divergence_curve_discretization_size, mauve_scaling_factor=mauve_scaling_factor, verbose=verbose, seed=seed, ) return out
datasets/metrics/mauve/mauve.py/0
{ "file_path": "datasets/metrics/mauve/mauve.py", "repo_id": "datasets", "token_count": 2588 }
59
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accuracy metric.""" from sklearn.metrics import roc_auc_score import datasets _DESCRIPTION = """ This metric computes the area under the curve (AUC) for the Receiver Operating Characteristic Curve (ROC). The return values represent how well the model used is predicting the correct classes, based on the input data. A score of `0.5` means that the model is predicting exactly at chance, i.e. the model's predictions are correct at the same rate as if the predictions were being decided by the flip of a fair coin or the roll of a fair die. A score above `0.5` indicates that the model is doing better than chance, while a score below `0.5` indicates that the model is doing worse than chance. This metric has three separate use cases: - binary: The case in which there are only two different label classes, and each example gets only one label. This is the default implementation. - multiclass: The case in which there can be more than two different label classes, but each example still gets only one label. - multilabel: The case in which there can be more than two different label classes, and each example can have more than one label. """ _KWARGS_DESCRIPTION = """ Args: - references (array-like of shape (n_samples,) or (n_samples, n_classes)): Ground truth labels. Expects different input based on use case: - binary: expects an array-like of shape (n_samples,) - multiclass: expects an array-like of shape (n_samples,) - multilabel: expects an array-like of shape (n_samples, n_classes) - prediction_scores (array-like of shape (n_samples,) or (n_samples, n_classes)): Model predictions. Expects different inputs based on use case: - binary: expects an array-like of shape (n_samples,) - multiclass: expects an array-like of shape (n_samples, n_classes) - multilabel: expects an array-like of shape (n_samples, n_classes) - average (`str`): Type of average, and is ignored in the binary use case. Defaults to 'macro'. Options are: - `'micro'`: Calculates metrics globally by considering each element of the label indicator matrix as a label. Only works with the multilabel use case. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average, weighted by support (i.e. the number of true instances for each label). - `'samples'`: Calculate metrics for each instance, and find their average. Only works with the multilabel use case. - `None`: No average is calculated, and scores for each class are returned. Only works with the multilabels use case. - sample_weight (array-like of shape (n_samples,)): Sample weights. Defaults to None. - max_fpr (`float`): If not None, the standardized partial AUC over the range [0, `max_fpr`] is returned. Must be greater than `0` and less than or equal to `1`. Defaults to `None`. Note: For the multiclass use case, `max_fpr` should be either `None` or `1.0` as ROC AUC partial computation is not currently supported for `multiclass`. - multi_class (`str`): Only used for multiclass targets, where it is required. Determines the type of configuration to use. Options are: - `'ovr'`: Stands for One-vs-rest. Computes the AUC of each class against the rest. This treats the multiclass case in the same way as the multilabel case. Sensitive to class imbalance even when `average == 'macro'`, because class imbalance affects the composition of each of the 'rest' groupings. - `'ovo'`: Stands for One-vs-one. Computes the average AUC of all possible pairwise combinations of classes. Insensitive to class imbalance when `average == 'macro'`. - labels (array-like of shape (n_classes,)): Only used for multiclass targets. List of labels that index the classes in `prediction_scores`. If `None`, the numerical or lexicographical order of the labels in `prediction_scores` is used. Defaults to `None`. Returns: roc_auc (`float` or array-like of shape (n_classes,)): Returns array if in multilabel use case and `average='None'`. Otherwise, returns `float`. Examples: Example 1: >>> roc_auc_score = datasets.load_metric("roc_auc") >>> refs = [1, 0, 1, 1, 0, 0] >>> pred_scores = [0.5, 0.2, 0.99, 0.3, 0.1, 0.7] >>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores) >>> print(round(results['roc_auc'], 2)) 0.78 Example 2: >>> roc_auc_score = datasets.load_metric("roc_auc", "multiclass") >>> refs = [1, 0, 1, 2, 2, 0] >>> pred_scores = [[0.3, 0.5, 0.2], ... [0.7, 0.2, 0.1], ... [0.005, 0.99, 0.005], ... [0.2, 0.3, 0.5], ... [0.1, 0.1, 0.8], ... [0.1, 0.7, 0.2]] >>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores, multi_class='ovr') >>> print(round(results['roc_auc'], 2)) 0.85 Example 3: >>> roc_auc_score = datasets.load_metric("roc_auc", "multilabel") >>> refs = [[1, 1, 0], ... [1, 1, 0], ... [0, 1, 0], ... [0, 0, 1], ... [0, 1, 1], ... [1, 0, 1]] >>> pred_scores = [[0.3, 0.5, 0.2], ... [0.7, 0.2, 0.1], ... [0.005, 0.99, 0.005], ... [0.2, 0.3, 0.5], ... [0.1, 0.1, 0.8], ... [0.1, 0.7, 0.2]] >>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores, average=None) >>> print([round(res, 2) for res in results['roc_auc']]) [0.83, 0.38, 0.94] """ _CITATION = """\ @article{doi:10.1177/0272989X8900900307, author = {Donna Katzman McClish}, title ={Analyzing a Portion of the ROC Curve}, journal = {Medical Decision Making}, volume = {9}, number = {3}, pages = {190-195}, year = {1989}, doi = {10.1177/0272989X8900900307}, note ={PMID: 2668680}, URL = {https://doi.org/10.1177/0272989X8900900307}, eprint = {https://doi.org/10.1177/0272989X8900900307} } @article{10.1023/A:1010920819831, author = {Hand, David J. and Till, Robert J.}, title = {A Simple Generalisation of the Area Under the ROC Curve for Multiple Class Classification Problems}, year = {2001}, issue_date = {November 2001}, publisher = {Kluwer Academic Publishers}, address = {USA}, volume = {45}, number = {2}, issn = {0885-6125}, url = {https://doi.org/10.1023/A:1010920819831}, doi = {10.1023/A:1010920819831}, journal = {Mach. Learn.}, month = {oct}, pages = {171–186}, numpages = {16}, keywords = {Gini index, AUC, error rate, ROC curve, receiver operating characteristic} } @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class ROCAUC(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "prediction_scores": datasets.Sequence(datasets.Value("float")), "references": datasets.Value("int32"), } if self.config_name == "multiclass" else { "references": datasets.Sequence(datasets.Value("int32")), "prediction_scores": datasets.Sequence(datasets.Value("float")), } if self.config_name == "multilabel" else { "references": datasets.Value("int32"), "prediction_scores": datasets.Value("float"), } ), reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html"], ) def _compute( self, references, prediction_scores, average="macro", sample_weight=None, max_fpr=None, multi_class="raise", labels=None, ): return { "roc_auc": roc_auc_score( references, prediction_scores, average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class, labels=labels, ) }
datasets/metrics/roc_auc/roc_auc.py/0
{ "file_path": "datasets/metrics/roc_auc/roc_auc.py", "repo_id": "datasets", "token_count": 3792 }
60
# Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SQuAD v2 metric. """ import datasets from .evaluate import ( apply_no_ans_threshold, find_all_best_thresh, get_raw_scores, make_eval_dict, make_qid_to_has_ans, merge_eval, ) _CITATION = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ _DESCRIPTION = """ This metric wrap the official scoring script for version 2 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. """ _KWARGS_DESCRIPTION = """ Computes SQuAD v2 scores (F1 and EM). Args: predictions: List of triple for question-answers to score with the following elements: - the question-answer 'id' field as given in the references (see below) - the text of the answer - the probability that the question has no answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a list of Dict {'text': text of the answer as a string} no_answer_threshold: float Probability threshold to decide that a question has no answer. Returns: 'exact': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'total': Number of score considered 'HasAns_exact': Exact match (the normalized answer exactly match the gold answer) 'HasAns_f1': The F-score of predicted tokens versus the gold answer 'HasAns_total': Number of score considered 'NoAns_exact': Exact match (the normalized answer exactly match the gold answer) 'NoAns_f1': The F-score of predicted tokens versus the gold answer 'NoAns_total': Number of score considered 'best_exact': Best exact match (with varying threshold) 'best_exact_thresh': No-answer probability threshold associated to the best exact match 'best_f1': Best F1 (with varying threshold) 'best_f1_thresh': No-answer probability threshold associated to the best F1 Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_v2_metric = datasets.load_metric("squad_v2") >>> results = squad_v2_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact': 100.0, 'f1': 100.0, 'total': 1, 'HasAns_exact': 100.0, 'HasAns_f1': 100.0, 'HasAns_total': 1, 'best_exact': 100.0, 'best_exact_thresh': 0.0, 'best_f1': 100.0, 'best_f1_thresh': 0.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class SquadV2(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": { "id": datasets.Value("string"), "prediction_text": datasets.Value("string"), "no_answer_probability": datasets.Value("float32"), }, "references": { "id": datasets.Value("string"), "answers": datasets.features.Sequence( {"text": datasets.Value("string"), "answer_start": datasets.Value("int32")} ), }, } ), codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], ) def _compute(self, predictions, references, no_answer_threshold=1.0): no_answer_probabilities = {p["id"]: p["no_answer_probability"] for p in predictions} dataset = [{"paragraphs": [{"qas": references}]}] predictions = {p["id"]: p["prediction_text"] for p in predictions} qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(dataset, predictions) exact_thresh = apply_no_ans_threshold(exact_raw, no_answer_probabilities, qid_to_has_ans, no_answer_threshold) f1_thresh = apply_no_ans_threshold(f1_raw, no_answer_probabilities, qid_to_has_ans, no_answer_threshold) out_eval = make_eval_dict(exact_thresh, f1_thresh) if has_ans_qids: has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids) merge_eval(out_eval, has_ans_eval, "HasAns") if no_ans_qids: no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) merge_eval(out_eval, no_ans_eval, "NoAns") find_all_best_thresh(out_eval, predictions, exact_raw, f1_raw, no_answer_probabilities, qid_to_has_ans) return dict(out_eval)
datasets/metrics/squad_v2/squad_v2.py/0
{ "file_path": "datasets/metrics/squad_v2/squad_v2.py", "repo_id": "datasets", "token_count": 2564 }
61
[tool.black] line-length = 119 target_version = ['py37'] [tool.ruff] # Ignored rules: # "E501" -> line length violation # "F821" -> undefined named in type annotation (e.g. Literal["something"]) # "C901" -> `function_name` is too complex ignore = ["E501", "F821", "C901"] select = ["C", "E", "F", "I", "W"] line-length = 119 [tool.ruff.isort] lines-after-imports = 2 known-first-party = ["datasets"] [tool.pytest.ini_options] # Test fails if a FutureWarning is thrown by `huggingface_hub` filterwarnings = [ "error::FutureWarning:huggingface_hub*", ] markers = [ "unit: unit test", "integration: integration test", ]
datasets/pyproject.toml/0
{ "file_path": "datasets/pyproject.toml", "repo_id": "datasets", "token_count": 245 }
62
import os import re from functools import partial from glob import has_magic from pathlib import Path, PurePath from typing import Callable, Dict, List, Optional, Set, Tuple, Union import huggingface_hub from fsspec import get_fs_token_paths from fsspec.implementations.http import HTTPFileSystem from huggingface_hub import HfFileSystem from packaging import version from tqdm.contrib.concurrent import thread_map from . import config from .download import DownloadConfig from .download.streaming_download_manager import _prepare_path_and_storage_options, xbasename, xjoin from .naming import _split_re from .splits import Split from .utils import logging from .utils import tqdm as hf_tqdm from .utils.file_utils import is_local_path, is_relative_path from .utils.py_utils import glob_pattern_to_regex, string_to_dict SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN) logger = logging.get_logger(__name__) class Url(str): pass class EmptyDatasetError(FileNotFoundError): pass SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*" SPLIT_KEYWORDS = { Split.TRAIN: ["train", "training"], Split.VALIDATION: ["validation", "valid", "dev", "val"], Split.TEST: ["test", "testing", "eval", "evaluation"], } NON_WORDS_CHARS = "-._ 0-9" if config.FSSPEC_VERSION < version.parse("2023.9.0"): KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**[{sep}/]{keyword}[{sep}/]**"] else: KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**/*[{sep}/]{keyword}[{sep}/]**"] DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST] DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME = { split: [ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS) for keyword in SPLIT_KEYWORDS[split] for pattern in KEYWORDS_IN_PATH_NAME_BASE_PATTERNS ] for split in DEFAULT_SPLITS } DEFAULT_PATTERNS_ALL = { Split.TRAIN: ["**"], } ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED] ALL_DEFAULT_PATTERNS = [ DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME, DEFAULT_PATTERNS_ALL, ] if config.FSSPEC_VERSION < version.parse("2023.9.0"): METADATA_PATTERNS = [ "metadata.csv", "**/metadata.csv", "metadata.jsonl", "**/metadata.jsonl", ] # metadata file for ImageFolder and AudioFolder else: METADATA_PATTERNS = [ "**/metadata.csv", "**/metadata.jsonl", ] # metadata file for ImageFolder and AudioFolder WILDCARD_CHARACTERS = "*[]" FILES_TO_IGNORE = [ "README.md", "config.json", "dataset_info.json", "dataset_infos.json", "dummy_data.zip", "dataset_dict.json", ] def contains_wildcards(pattern: str) -> bool: return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS) def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]: """ Take the data_files patterns from the user, and format them into a dictionary. Each key is the name of the split, and each value is a list of data files patterns (paths or urls). The default split is "train". Returns: patterns: dictionary of split_name -> list of patterns """ if isinstance(patterns, dict): return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()} elif isinstance(patterns, str): return {SANITIZED_DEFAULT_SPLIT: [patterns]} elif isinstance(patterns, list): if any(isinstance(pattern, dict) for pattern in patterns): for pattern in patterns: if not ( isinstance(pattern, dict) and len(pattern) == 2 and "split" in pattern and isinstance(pattern.get("path"), (str, list)) ): raise ValueError( f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}" ) splits = [pattern["split"] for pattern in patterns] if len(set(splits)) != len(splits): raise ValueError(f"Some splits are duplicated in data_files: {splits}") return { str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]] for pattern in patterns } else: return {SANITIZED_DEFAULT_SPLIT: patterns} else: return sanitize_patterns(list(patterns)) def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool: """ When a path matches a pattern, we additionnally check if it's inside a special directory we ignore by default (if it starts with a double underscore). Users can still explicitly request a filepath inside such a directory if "__pycache__" is mentioned explicitly in the requested pattern. Some examples: base directory: ./ └── __pycache__ └── b.txt >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**") True >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt") True >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*") False >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*") False """ # We just need to check if every special directories from the path is present explicly in the pattern. # Since we assume that the path matches the pattern, it's equivalent to counting that both # the parent path and the parent pattern have the same number of special directories. data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")] data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")] return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern) def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool: """ When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot. Users can still explicitly request a filepath that is hidden or is inside a hidden directory if the hidden part is mentioned explicitly in the requested pattern. Some examples: base directory: ./ └── .hidden_file.txt >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*") False base directory: ./ └── .hidden_dir └── a.txt >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*") False >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*") False base directory: ./ └── .hidden_dir └── .hidden_file.txt >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*") False >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*") False """ # We just need to check if every hidden part from the path is present explicly in the pattern. # Since we assume that the path matches the pattern, it's equivalent to counting that both # the path and the pattern have the same number of hidden parts. hidden_directories_in_path = [ part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."} ] hidden_directories_in_pattern = [ part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."} ] return len(hidden_directories_in_path) != len(hidden_directories_in_pattern) def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]: """ Get the default pattern from a directory or repository by testing all the supported patterns. The first patterns to return a non-empty list of data files is returned. In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS. """ # first check the split patterns like data/{split}-00000-of-00001.parquet for split_pattern in ALL_SPLIT_PATTERNS: pattern = split_pattern.replace("{split}", "*") try: data_files = pattern_resolver(pattern) except FileNotFoundError: continue if len(data_files) > 0: splits: Set[str] = { string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))["split"] for p in data_files } if any(not re.match(_split_re, split) for split in splits): raise ValueError(f"Split name should match '{_split_re}'' but got '{splits}'.") sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted( splits - set(DEFAULT_SPLITS) ) return {split: [split_pattern.format(split=split)] for split in sorted_splits} # then check the default patterns based on train/valid/test splits for patterns_dict in ALL_DEFAULT_PATTERNS: non_empty_splits = [] for split, patterns in patterns_dict.items(): for pattern in patterns: try: data_files = pattern_resolver(pattern) except FileNotFoundError: continue if len(data_files) > 0: non_empty_splits.append(split) break if non_empty_splits: return {split: patterns_dict[split] for split in non_empty_splits} raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}") def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]: """ Get the supported metadata patterns from a directory or repository. """ non_empty_patterns = [] for pattern in METADATA_PATTERNS: try: metadata_files = pattern_resolver(pattern) if len(metadata_files) > 0: non_empty_patterns.append(pattern) except FileNotFoundError: pass if non_empty_patterns: return non_empty_patterns raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}") def resolve_pattern( pattern: str, base_path: str, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> List[str]: """ Resolve the paths and URLs of the data files from the pattern passed by the user. You can use patterns to resolve multiple local files. Here are a few examples: - *.csv to match all the CSV files at the first level - **.csv to match all the CSV files at any level - data/* to match all the files inside "data" - data/** to match all the files inside "data" and its subdirectories The patterns are resolved using the fsspec glob. glob.glob, Path.glob, Path.match or fnmatch do not support ** with a prefix/suffix other than a forward slash /. For instance, this means **.json is the same as *.json. On the contrary, the fsspec glob has no limits regarding the ** prefix/suffix, resulting in **.json being equivalent to **/*.json. More generally: - '*' matches any character except a forward-slash (to match just the file or directory name) - '**' matches any character including a forward-slash / Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested. The same applies to special directories that start with a double underscore like "__pycache__". You can still include one if the pattern explicilty mentions it: - to include a hidden file: "*/.hidden.txt" or "*/.*" - to include a hidden directory: ".hidden/*" or ".*/*" - to include a special directory: "__special__/*" or "__*/*" Example:: >>> from datasets.data_files import resolve_pattern >>> base_path = "." >>> resolve_pattern("docs/**/*.py", base_path) [/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py'] Args: pattern (str): Unix pattern or paths or URLs of the data files to resolve. The paths can be absolute or relative to base_path. Remote filesystems using fsspec are supported, e.g. with the hf:// protocol. base_path (str): Base path to use when resolving relative paths. allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions). For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"] Returns: List[str]: List of paths or URLs to the local or remote files that match the patterns. """ if is_relative_path(pattern): pattern = xjoin(base_path, pattern) elif is_local_path(pattern): base_path = os.path.splitdrive(pattern)[0] + os.sep else: base_path = "" pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config) fs, _, _ = get_fs_token_paths(pattern, storage_options=storage_options) fs_base_path = base_path.split("::")[0].split("://")[-1] or fs.root_marker fs_pattern = pattern.split("::")[0].split("://")[-1] files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)} protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0] protocol_prefix = protocol + "://" if protocol != "file" else "" glob_kwargs = {} if protocol == "hf" and config.HF_HUB_VERSION >= version.parse("0.20.0"): # 10 times faster glob with detail=True (ignores costly info like lastCommit) glob_kwargs["expand_info"] = False matched_paths = [ filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath for filepath, info in fs.glob(pattern, detail=True, **glob_kwargs).items() if info["type"] == "file" and (xbasename(filepath) not in files_to_ignore) and not _is_inside_unrequested_special_dir( os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path) ) and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir( os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path) ) ] # ignore .ipynb and __pycache__, but keep /../ if allowed_extensions is not None: out = [ filepath for filepath in matched_paths if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:]) ] if len(out) < len(matched_paths): invalid_matched_files = list(set(matched_paths) - set(out)) logger.info( f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}" ) else: out = matched_paths if not out: error_msg = f"Unable to find '{pattern}'" if allowed_extensions is not None: error_msg += f" with any supported extension {list(allowed_extensions)}" raise FileNotFoundError(error_msg) return out def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]: """ Get the default pattern from a directory testing all the supported patterns. The first patterns to return a non-empty list of data files is returned. Some examples of supported patterns: Input: my_dataset_repository/ ├── README.md └── dataset.csv Output: {"train": ["**"]} Input: my_dataset_repository/ ├── README.md ├── train.csv └── test.csv my_dataset_repository/ ├── README.md └── data/ ├── train.csv └── test.csv my_dataset_repository/ ├── README.md ├── train_0.csv ├── train_1.csv ├── train_2.csv ├── train_3.csv ├── test_0.csv └── test_1.csv Output: {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'], 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]} Input: my_dataset_repository/ ├── README.md └── data/ ├── train/ │ ├── shard_0.csv │ ├── shard_1.csv │ ├── shard_2.csv │ └── shard_3.csv └── test/ ├── shard_0.csv └── shard_1.csv Output: {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'], 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]} Input: my_dataset_repository/ ├── README.md └── data/ ├── train-00000-of-00003.csv ├── train-00001-of-00003.csv ├── train-00002-of-00003.csv ├── test-00000-of-00001.csv ├── random-00000-of-00003.csv ├── random-00001-of-00003.csv └── random-00002-of-00003.csv Output: {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'], 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'], 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']} In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS. """ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config) try: return _get_data_files_patterns(resolver) except FileNotFoundError: raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None def get_metadata_patterns( base_path: str, download_config: Optional[DownloadConfig] = None, ) -> List[str]: """ Get the supported metadata patterns from a local directory. """ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config) try: return _get_metadata_files_patterns(resolver) except FileNotFoundError: raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None def _get_single_origin_metadata( data_file: str, download_config: Optional[DownloadConfig] = None, ) -> Tuple[str]: data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config) fs, _, _ = get_fs_token_paths(data_file, storage_options=storage_options) if isinstance(fs, HfFileSystem): resolved_path = fs.resolve_path(data_file) return (resolved_path.repo_id, resolved_path.revision) elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT): hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token) data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1) resolved_path = hffs.resolve_path(data_file) return (resolved_path.repo_id, resolved_path.revision) info = fs.info(data_file) # s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime for key in ["ETag", "etag", "mtime"]: if key in info: return (str(info[key]),) return () def _get_origin_metadata( data_files: List[str], max_workers=64, download_config: Optional[DownloadConfig] = None, ) -> Tuple[str]: return thread_map( partial(_get_single_origin_metadata, download_config=download_config), data_files, max_workers=max_workers, tqdm_class=hf_tqdm, desc="Resolving data files", # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached disable=len(data_files) <= 16 or None, ) class DataFilesList(List[str]): """ List of data files (absolute local paths or URLs). It has two construction methods given the user's data files patterns : - ``from_hf_repo``: resolve patterns inside a dataset repository - ``from_local_or_remote``: resolve patterns from a local path Moreover DataFilesList has an additional attribute ``origin_metadata``. It can store: - the last modified time of local files - ETag of remote files - commit sha of a dataset repository Thanks to this additional attribute, it is possible to hash the list and get a different hash if and only if at least one file changed. This is useful for caching Dataset objects that are obtained from a list of data files. """ def __init__(self, data_files: List[str], origin_metadata: List[Tuple[str]]): super().__init__(data_files) self.origin_metadata = origin_metadata def __add__(self, other): return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata) @classmethod def from_hf_repo( cls, patterns: List[str], dataset_info: huggingface_hub.hf_api.DatasetInfo, base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/") return cls.from_patterns( patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config ) @classmethod def from_local_or_remote( cls, patterns: List[str], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None else Path().resolve().as_posix() return cls.from_patterns( patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config ) @classmethod def from_patterns( cls, patterns: List[str], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None else Path().resolve().as_posix() data_files = [] for pattern in patterns: try: data_files.extend( resolve_pattern( pattern, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) ) except FileNotFoundError: if not has_magic(pattern): raise origin_metadata = _get_origin_metadata(data_files, download_config=download_config) return cls(data_files, origin_metadata) def filter_extensions(self, extensions: List[str]) -> "DataFilesList": pattern = "|".join("\\" + ext for ext in extensions) pattern = re.compile(f".*({pattern})(\\..+)?$") return DataFilesList( [data_file for data_file in self if pattern.match(data_file)], origin_metadata=self.origin_metadata, ) class DataFilesDict(Dict[str, DataFilesList]): """ Dict of split_name -> list of data files (absolute local paths or URLs). It has two construction methods given the user's data files patterns : - ``from_hf_repo``: resolve patterns inside a dataset repository - ``from_local_or_remote``: resolve patterns from a local path Moreover each list is a DataFilesList. It is possible to hash the dictionary and get a different hash if and only if at least one file changed. For more info, see ``DataFilesList``. This is useful for caching Dataset objects that are obtained from a list of data files. Changing the order of the keys of this dictionary also doesn't change its hash. """ @classmethod def from_local_or_remote( cls, patterns: Dict[str, Union[List[str], DataFilesList]], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = cls() for key, patterns_for_key in patterns.items(): out[key] = ( DataFilesList.from_local_or_remote( patterns_for_key, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) if not isinstance(patterns_for_key, DataFilesList) else patterns_for_key ) return out @classmethod def from_hf_repo( cls, patterns: Dict[str, Union[List[str], DataFilesList]], dataset_info: huggingface_hub.hf_api.DatasetInfo, base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = cls() for key, patterns_for_key in patterns.items(): out[key] = ( DataFilesList.from_hf_repo( patterns_for_key, dataset_info=dataset_info, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) if not isinstance(patterns_for_key, DataFilesList) else patterns_for_key ) return out @classmethod def from_patterns( cls, patterns: Dict[str, Union[List[str], DataFilesList]], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = cls() for key, patterns_for_key in patterns.items(): out[key] = ( DataFilesList.from_patterns( patterns_for_key, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) if not isinstance(patterns_for_key, DataFilesList) else patterns_for_key ) return out def filter_extensions(self, extensions: List[str]) -> "DataFilesDict": out = type(self)() for key, data_files_list in self.items(): out[key] = data_files_list.filter_extensions(extensions) return out class DataFilesPatternsList(List[str]): """ List of data files patterns (absolute local paths or URLs). For each pattern there should also be a list of allowed extensions to keep, or a None ot keep all the files for the pattern. """ def __init__( self, patterns: List[str], allowed_extensions: List[Optional[List[str]]], ): super().__init__(patterns) self.allowed_extensions = allowed_extensions def __add__(self, other): return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions) @classmethod def from_patterns( cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None ) -> "DataFilesPatternsDict": return cls(patterns, [allowed_extensions] * len(patterns)) def resolve( self, base_path: str, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None else Path().resolve().as_posix() data_files = [] for pattern, allowed_extensions in zip(self, self.allowed_extensions): try: data_files.extend( resolve_pattern( pattern, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) ) except FileNotFoundError: if not has_magic(pattern): raise origin_metadata = _get_origin_metadata(data_files, download_config=download_config) return DataFilesList(data_files, origin_metadata) def filter_extensions(self, extensions: List[str]) -> "DataFilesList": return DataFilesPatternsList( self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions] ) class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]): """ Dict of split_name -> list of data files patterns (absolute local paths or URLs). """ @classmethod def from_patterns( cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None ) -> "DataFilesPatternsDict": out = cls() for key, patterns_for_key in patterns.items(): out[key] = ( DataFilesPatternsList.from_patterns( patterns_for_key, allowed_extensions=allowed_extensions, ) if not isinstance(patterns_for_key, DataFilesPatternsList) else patterns_for_key ) return out def resolve( self, base_path: str, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = DataFilesDict() for key, data_files_patterns_list in self.items(): out[key] = data_files_patterns_list.resolve(base_path, download_config) return out def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict": out = type(self)() for key, data_files_patterns_list in self.items(): out[key] = data_files_patterns_list.filter_extensions(extensions) return out
datasets/src/datasets/data_files.py/0
{ "file_path": "datasets/src/datasets/data_files.py", "repo_id": "datasets", "token_count": 13354 }
63
import s3fs from ..utils.deprecation_utils import deprecated @deprecated("Use s3fs.S3FileSystem instead.") class S3FileSystem(s3fs.S3FileSystem): """ `datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html). Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`. Args: anon (`bool`, default to `False`): Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given, or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order). key (`str`): If not anonymous, use this access key ID, if specified. secret (`str`): If not anonymous, use this secret access key, if specified. token (`str`): If not anonymous, use this security token, if specified. use_ssl (`bool`, defaults to `True`): Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is also set in `client_kwargs`, the value set in `client_kwargs` will take priority. s3_additional_kwargs (`dict`): Parameters that are used when calling S3 API methods. Typically used for things like ServerSideEncryption. client_kwargs (`dict`): Parameters for the botocore client. requester_pays (`bool`, defaults to `False`): Whether `RequesterPays` buckets are supported. default_block_size (`int`): If given, the default block size value used for `open()`, if no specific value is given at all time. The built-in default is 5MB. default_fill_cache (`bool`, defaults to `True`): Whether to use cache filling with open by default. Refer to `S3File.open`. default_cache_type (`str`, defaults to `bytes`): If given, the default `cache_type` value used for `open()`. Set to `none` if no caching is desired. See fsspec's documentation for other available `cache_type` values. version_aware (`bool`, defaults to `False`): Whether to support bucket versioning. If enable this will require the user to have the necessary IAM permissions for dealing with versioned objects. cache_regions (`bool`, defaults to `False`): Whether to cache bucket regions. Whenever a new bucket is used, it will first find out which region it belongs to and then use the client for that region. asynchronous (`bool`, defaults to `False`): Whether this instance is to be used from inside coroutines. config_kwargs (`dict`): Parameters passed to `botocore.client.Config`. **kwargs: Other parameters for core session. session (`aiobotocore.session.AioSession`): Session to be used for all connections. This session will be used inplace of creating a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`. skip_instance_cache (`bool`): Control reuse of instances. Passed on to `fsspec`. use_listings_cache (`bool`): Control reuse of directory listings. Passed on to `fsspec`. listings_expiry_time (`int` or `float`): Control reuse of directory listings. Passed on to `fsspec`. max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`. Examples: Listing files from public S3 bucket. ```py >>> import datasets >>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP >>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP ['dataset_info.json.json','dataset.arrow','state.json'] ``` Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`. ```py >>> import datasets >>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP >>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP ['dataset_info.json.json','dataset.arrow','state.json'] ``` Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`. ```py >>> import botocore >>> from datasets.filesystems import S3Filesystem >>> s3_session = botocore.session.Session(profile_name='my_profile_name') >>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP ``` Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`]. ```py >>> from datasets import load_from_disk >>> from datasets.filesystems import S3Filesystem >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP >>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP >>> print(len(dataset)) 25000 ``` Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`]. ```py >>> from datasets import load_dataset >>> from datasets.filesystems import S3Filesystem >>> dataset = load_dataset("imdb") >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP >>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP ``` """
datasets/src/datasets/filesystems/s3filesystem.py/0
{ "file_path": "datasets/src/datasets/filesystems/s3filesystem.py", "repo_id": "datasets", "token_count": 2170 }
64
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class SparkDatasetReader(AbstractDatasetReader): """A dataset reader that reads from a Spark DataFrame. When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be provided. Streaming is not currently supported. """ def __init__( self, df: pyspark.sql.DataFrame, split: Optional[NamedSplit] = None, features: Optional[Features] = None, streaming: bool = True, cache_dir: str = None, keep_in_memory: bool = False, working_dir: str = None, load_from_cache_file: bool = True, file_format: str = "arrow", **kwargs, ): super().__init__( split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, **kwargs, ) self._load_from_cache_file = load_from_cache_file self._file_format = file_format self.builder = Spark( df=df, features=features, cache_dir=cache_dir, working_dir=working_dir, **kwargs, ) def read(self): if self.streaming: return self.builder.as_streaming_dataset(split=self.split) download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=download_mode, file_format=self._file_format, ) return self.builder.as_dataset(split=self.split)
datasets/src/datasets/io/spark.py/0
{ "file_path": "datasets/src/datasets/io/spark.py", "repo_id": "datasets", "token_count": 787 }
65
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal logger = datasets.utils.logging.get_logger(__name__) _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"] _PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"] _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"] @dataclass class CsvConfig(datasets.BuilderConfig): """BuilderConfig for CSV.""" sep: str = "," delimiter: Optional[str] = None header: Optional[Union[int, List[int], str]] = "infer" names: Optional[List[str]] = None column_names: Optional[List[str]] = None index_col: Optional[Union[int, str, List[int], List[str]]] = None usecols: Optional[Union[List[int], List[str]]] = None prefix: Optional[str] = None mangle_dupe_cols: bool = True engine: Optional[Literal["c", "python", "pyarrow"]] = None converters: Dict[Union[int, str], Callable[[Any], Any]] = None true_values: Optional[list] = None false_values: Optional[list] = None skipinitialspace: bool = False skiprows: Optional[Union[int, List[int]]] = None nrows: Optional[int] = None na_values: Optional[Union[str, List[str]]] = None keep_default_na: bool = True na_filter: bool = True verbose: bool = False skip_blank_lines: bool = True thousands: Optional[str] = None decimal: str = "." lineterminator: Optional[str] = None quotechar: str = '"' quoting: int = 0 escapechar: Optional[str] = None comment: Optional[str] = None encoding: Optional[str] = None dialect: Optional[str] = None error_bad_lines: bool = True warn_bad_lines: bool = True skipfooter: int = 0 doublequote: bool = True memory_map: bool = False float_precision: Optional[str] = None chunksize: int = 10_000 features: Optional[datasets.Features] = None encoding_errors: Optional[str] = "strict" on_bad_lines: Literal["error", "warn", "skip"] = "error" date_format: Optional[str] = None def __post_init__(self): if self.delimiter is not None: self.sep = self.delimiter if self.column_names is not None: self.names = self.column_names @property def pd_read_csv_kwargs(self): pd_read_csv_kwargs = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class Csv(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = CsvConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self, files): schema = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str dtype = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values()) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(files)): csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs) try: for batch_idx, df in enumerate(csv_file_reader): pa_table = pa.Table.from_pandas(df) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
datasets/src/datasets/packaged_modules/csv/csv.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/csv/csv.py", "repo_id": "datasets", "token_count": 3790 }
66
import sys from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast if TYPE_CHECKING: import sqlite3 import sqlalchemy logger = datasets.utils.logging.get_logger(__name__) @dataclass class SqlConfig(datasets.BuilderConfig): """BuilderConfig for SQL.""" sql: Union[str, "sqlalchemy.sql.Selectable"] = None con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None index_col: Optional[Union[str, List[str]]] = None coerce_float: bool = True params: Optional[Union[List, Tuple, Dict]] = None parse_dates: Optional[Union[List, Dict]] = None columns: Optional[List[str]] = None chunksize: Optional[int] = 10_000 features: Optional[datasets.Features] = None def __post_init__(self): if self.sql is None: raise ValueError("sql must be specified") if self.con is None: raise ValueError("con must be specified") def create_config_id( self, config_kwargs: dict, custom_features: Optional[datasets.Features] = None, ) -> str: config_kwargs = config_kwargs.copy() # We need to stringify the Selectable object to make its hash deterministic # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html sql = config_kwargs["sql"] if not isinstance(sql, str): if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules: import sqlalchemy if isinstance(sql, sqlalchemy.sql.Selectable): engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://") sql_str = str(sql.compile(dialect=engine.dialect)) config_kwargs["sql"] = sql_str else: raise TypeError( f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" ) else: raise TypeError( f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" ) con = config_kwargs["con"] if not isinstance(con, str): config_kwargs["con"] = id(con) logger.info( f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead." ) return super().create_config_id(config_kwargs, custom_features=custom_features) @property def pd_read_sql_kwargs(self): pd_read_sql_kwargs = { "index_col": self.index_col, "columns": self.columns, "params": self.params, "coerce_float": self.coerce_float, "parse_dates": self.parse_dates, } return pd_read_sql_kwargs class Sql(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = SqlConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})] def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table def _generate_tables(self): chunksize = self.config.chunksize sql_reader = pd.read_sql( self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs ) sql_reader = [sql_reader] if chunksize is None else sql_reader for chunk_idx, df in enumerate(sql_reader): pa_table = pa.Table.from_pandas(df) yield chunk_idx, self._cast_table(pa_table)
datasets/src/datasets/packaged_modules/sql/sql.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/sql/sql.py", "repo_id": "datasets", "token_count": 1963 }
67
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=True) class ImageClassification(TaskTemplate): task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"image": Image()}) label_schema: ClassVar[Features] = Features({"labels": ClassLabel}) image_column: str = "image" label_column: str = "labels" def align_with_features(self, features): if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features.") if not isinstance(features[self.label_column], ClassLabel): raise ValueError(f"Column {self.label_column} is not a ClassLabel.") task_template = copy.deepcopy(self) label_schema = self.label_schema.copy() label_schema["labels"] = features[self.label_column] task_template.__dict__["label_schema"] = label_schema return task_template @property def column_mapping(self) -> Dict[str, str]: return { self.image_column: "image", self.label_column: "labels", }
datasets/src/datasets/tasks/image_classification.py/0
{ "file_path": "datasets/src/datasets/tasks/image_classification.py", "repo_id": "datasets", "token_count": 487 }
68
# deprecated, please use the `filelock` package instead from filelock import ( # noqa: F401 # imported for backward compatibility TODO: remove in 3.0.0 BaseFileLock, SoftFileLock, Timeout, UnixFileLock, WindowsFileLock, ) from ._filelock import FileLock # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
datasets/src/datasets/utils/filelock.py/0
{ "file_path": "datasets/src/datasets/utils/filelock.py", "repo_id": "datasets", "token_count": 115 }
69
# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF-specific utils import.""" import os import warnings from functools import partial from math import ceil from uuid import uuid4 import numpy as np import pyarrow as pa from multiprocess import get_context try: from multiprocess.shared_memory import SharedMemory except ImportError: SharedMemory = None # Version checks should prevent this being called on older Python versions from .. import config def minimal_tf_collate_fn(features): if isinstance(features, dict): # case batch_size=None: nothing to collate return features elif config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") first = features[0] batch = {} for k, v in first.items(): if isinstance(v, np.ndarray): batch[k] = np.stack([f[k] for f in features]) elif isinstance(v, tf.Tensor): batch[k] = tf.stack([f[k] for f in features]) else: batch[k] = np.array([f[k] for f in features]) return batch def minimal_tf_collate_fn_with_renaming(features): batch = minimal_tf_collate_fn(features) if "label" in batch: batch["labels"] = batch["label"] del batch["label"] return batch def is_numeric_pa_type(pa_type): if pa.types.is_list(pa_type): return is_numeric_pa_type(pa_type.value_type) return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type) def is_numeric_feature(feature): from .. import ClassLabel, Sequence, Value from ..features.features import _ArrayXD if isinstance(feature, Sequence): return is_numeric_feature(feature.feature) elif isinstance(feature, list): return is_numeric_feature(feature[0]) elif isinstance(feature, _ArrayXD): return is_numeric_pa_type(feature().storage_dtype) elif isinstance(feature, Value): return is_numeric_pa_type(feature()) elif isinstance(feature, ClassLabel): return True else: return False def np_get_batch( indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False ): if not isinstance(indices, np.ndarray): indices = indices.numpy() is_batched = True # Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices if isinstance(indices, np.integer): batch = dataset[indices.item()] is_batched = False elif np.all(np.diff(indices) == 1): batch = dataset[indices[0] : indices[-1] + 1] elif isinstance(indices, np.ndarray): batch = dataset[indices] else: raise RuntimeError("Unexpected type for indices: {}".format(type(indices))) if cols_to_retain is not None: batch = { key: value for key, value in batch.items() if key in cols_to_retain or key in ("label", "label_ids", "labels") } if is_batched: actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)] batch = collate_fn(batch, **collate_fn_args) if return_dict: out_batch = {} for col, cast_dtype in columns_to_np_types.items(): # In case the collate_fn returns something strange array = np.array(batch[col]) array = array.astype(cast_dtype) out_batch[col] = array else: out_batch = [] for col, cast_dtype in columns_to_np_types.items(): # In case the collate_fn returns something strange array = np.array(batch[col]) array = array.astype(cast_dtype) out_batch.append(array) return out_batch def dataset_to_tf( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, ): """Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess equivalent is multiprocess_dataset_to_tf. Args: dataset (`Dataset`): Dataset to wrap with tf.data.Dataset. cols_to_retain (`List[str]`): Dataset column(s) to load in the tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and that do not exist in the original dataset. collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. Can be empty. columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes. output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to `tf.TensorSpec` objects. shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided, defaults to the same setting as shuffle. Returns: `tf.data.Dataset` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") # TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything # to the NumPy multiprocessing path. if hasattr(tf, "random_index_shuffle"): random_index_shuffle = tf.random_index_shuffle elif hasattr(tf.random.experimental, "index_shuffle"): random_index_shuffle = tf.random.experimental.index_shuffle else: if len(dataset) > 10_000_000: warnings.warn( "to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. " "If you are iterating over a dataset with a very large number of samples, consider " "upgrading to TF >= 2.9." ) random_index_shuffle = None getter_fn = partial( np_get_batch, dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, return_dict=False, ) # This works because dictionaries always output in the same order tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()] @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)]) def fetch_function(indices): output = tf.py_function( getter_fn, inp=[indices], Tout=tout, ) return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())} tf_dataset = tf.data.Dataset.range(len(dataset)) if shuffle and random_index_shuffle is not None: base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64)) def scan_random_index(state, index): if tf.reduce_all(state == -1): # This generates a new random seed once per epoch only, # to ensure that we iterate over each sample exactly once per epoch state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64) shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1) return state, shuffled_index tf_dataset = tf_dataset.scan(base_seed, scan_random_index) elif shuffle: tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality()) if batch_size is not None: tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder) tf_dataset = tf_dataset.map(fetch_function) if batch_size is not None: def ensure_shapes(input_dict): return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()} else: # Ensure shape but remove batch dimension of output_signature[key].shape def ensure_shapes(input_dict): return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()} return tf_dataset.map(ensure_shapes) class SharedMemoryContext: # This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted # The process that creates shared memory is always the one responsible for unlinking it in the end def __init__(self): self.created_shms = [] self.opened_shms = [] def get_shm(self, name, size, create): shm = SharedMemory(size=int(size), name=name, create=create) if create: # We only unlink the ones we created in this context self.created_shms.append(shm) else: # If we didn't create it, we only close it when done, we don't unlink it self.opened_shms.append(shm) return shm def get_array(self, name, shape, dtype, create): shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create) return np.ndarray(shape, dtype=dtype, buffer=shm.buf) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): for shm in self.created_shms: shm.close() shm.unlink() for shm in self.opened_shms: shm.close() class NumpyMultiprocessingGenerator: def __init__( self, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, num_workers, ): self.dataset = dataset self.cols_to_retain = cols_to_retain self.collate_fn = collate_fn self.collate_fn_args = collate_fn_args self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype in (np.unicode_, np.str_)] # Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize self.columns_to_np_types = { col: dtype if col not in self.string_columns else np.dtype("U1") for col, dtype in columns_to_np_types.items() } self.output_signature = output_signature self.shuffle = shuffle self.batch_size = batch_size self.drop_remainder = drop_remainder self.num_workers = num_workers # Because strings are converted to characters, we need to add one extra dimension to the shape self.columns_to_ranks = { col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1 for col, spec in output_signature.items() } def __iter__(self): # Make sure we only spawn workers if they have work to do num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size))) # Do the shuffling in iter so that it's done at the start of each epoch per_worker_batches, final_batch, final_batch_worker = self.distribute_batches( self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle ) ctx = get_context("spawn") names = [] shape_arrays = [] workers = [] array_ready_events = [ctx.Event() for _ in range(num_workers)] array_loaded_events = [ctx.Event() for _ in range(num_workers)] base_args = { "dataset": self.dataset, "cols_to_retain": self.cols_to_retain, "collate_fn": self.collate_fn, "collate_fn_args": self.collate_fn_args, "columns_to_np_types": self.columns_to_np_types, "columns_to_ranks": self.columns_to_ranks, "string_columns": self.string_columns, } with SharedMemoryContext() as shm_ctx: for i in range(num_workers): worker_random_id = str(uuid4()) worker_name = f"dw_{i}_{worker_random_id}"[:10] names.append(worker_name) worker_shape_arrays = { col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True) for col, rank in self.columns_to_ranks.items() } shape_arrays.append(worker_shape_arrays) worker_indices = per_worker_batches[i] if i == final_batch_worker and final_batch is not None: final_batch_arg = final_batch else: final_batch_arg = None worker_kwargs = { "worker_name": worker_name, "indices": worker_indices, "extra_batch": final_batch_arg, "array_ready_event": array_ready_events[i], "array_loaded_event": array_loaded_events[i], **base_args, } worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True) worker.start() workers.append(worker) end_signal_received = False while not end_signal_received: for i in range(num_workers): if not array_ready_events[i].wait(timeout=60): raise TimeoutError("Data loading worker timed out!") array_ready_events[i].clear() array_shapes = shape_arrays[i] if any(np.any(shape < 0) for shape in array_shapes.values()): # Child processes send negative array shapes to indicate # that no more data is going to be sent end_signal_received = True break # Matt: Because array shapes are variable we recreate the shared memory each iteration. # I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process. # A future optimization, at the cost of some code complexity, could be to reuse shared memory # between iterations, but this would require knowing in advance the maximum size, or having # a system to only create a new memory block when a new maximum size is seen. # Another potential optimization would be to figure out which memory copies are necessary, # or whether we can yield objects straight out of shared memory. with SharedMemoryContext() as batch_shm_ctx: # This memory context only lasts long enough to copy everything out of the batch arrays = { col: batch_shm_ctx.get_array( f"{names[i]}_{col}", shape=shape, dtype=self.columns_to_np_types[col], create=False, ) for col, shape in array_shapes.items() } # Copy everything out of shm because the memory # will be unlinked by the child process at some point arrays = {col: np.copy(arr) for col, arr in arrays.items()} # Now we convert any unicode char arrays to strings for string_col in self.string_columns: arrays[string_col] = ( arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1) ) yield arrays array_loaded_events[i].set() # Now we just do some cleanup # Shared memory is cleaned up by the context manager, so we just make sure workers finish for worker in workers: worker.join() def __call__(self): return self @staticmethod def worker_loop( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, columns_to_ranks, string_columns, indices, extra_batch, worker_name, array_ready_event, array_loaded_event, ): os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory def send_batch_to_parent(indices): batch = np_get_batch( indices=indices, dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, return_dict=True, ) # Now begins the fun part where we start shovelling shared memory at the parent process out_arrays = {} with SharedMemoryContext() as batch_shm_ctx: # The batch shared memory context exists only as long as it takes for the parent process # to read everything, after which it cleans everything up again for col, cast_dtype in columns_to_np_types.items(): # Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor array = batch[col] if col in string_columns: # We can't send unicode arrays over shared memory, so we convert to single chars ("U1") # which have a fixed width of 4 bytes. The parent process will convert these back to strings. array = array.view("U1").reshape(array.shape + (-1,)) shape_arrays[col][:] = array.shape out_arrays[col] = batch_shm_ctx.get_array( f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True ) out_arrays[col][:] = array array_ready_event.set() array_loaded_event.wait() array_loaded_event.clear() with SharedMemoryContext() as shm_ctx: shape_arrays = { col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False) for col, rank in columns_to_ranks.items() } for batch in indices: send_batch_to_parent(batch) if extra_batch is not None: send_batch_to_parent(extra_batch) # Now we send a batsignal to the parent process that we're done for col, array in shape_arrays.items(): array[:] = -1 array_ready_event.set() @staticmethod def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle): indices = np.arange(len(dataset)) if shuffle: np.random.shuffle(indices) num_samples = len(indices) # We distribute the batches so that reading from the workers in round-robin order yields the exact # order specified in indices. This is only important when shuffle is False, but we do it regardless. incomplete_batch_cutoff = num_samples - (num_samples % batch_size) indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff]) if drop_remainder or len(last_incomplete_batch) == 0: last_incomplete_batch = None indices = indices.reshape(-1, batch_size) num_batches = len(indices) final_batches_cutoff = num_batches - (num_batches % num_workers) indices, final_batches = np.split(indices, [final_batches_cutoff]) indices = indices.reshape(-1, num_workers, batch_size) per_worker_indices = np.split(indices, indices.shape[1], axis=1) per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices] # Distribute the final batches to the first workers for i in range(len(final_batches)): # len(final_batches) can be zero, and is always less than num_workers per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0) # Add the last incomplete batch to the next worker, which might be the first worker if last_incomplete_batch is not None: incomplete_batch_worker_idx = len(final_batches) else: incomplete_batch_worker_idx = None return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx def multiprocess_dataset_to_tf( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, num_workers, ): """Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process equivalent is dataset_to_tf. Args: dataset (`Dataset`): Dataset to wrap with tf.data.Dataset. cols_to_retain (`List[str]`): Dataset column(s) to load in the tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and that do not exist in the original dataset. collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. Can be empty. columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes. output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to `tf.TensorSpec` objects. shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided, defaults to the same setting as shuffle. num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1. Returns: `tf.data.Dataset` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") data_generator = NumpyMultiprocessingGenerator( dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, output_signature=output_signature, shuffle=shuffle, batch_size=batch_size, drop_remainder=drop_remainder, num_workers=num_workers, ) tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature) if drop_remainder: dataset_length = int(len(dataset) // batch_size) else: dataset_length = int(ceil(len(dataset) / batch_size)) return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length))
datasets/src/datasets/utils/tf_utils.py/0
{ "file_path": "datasets/src/datasets/utils/tf_utils.py", "repo_id": "datasets", "token_count": 11176 }
70
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node NUM_SHARDS = 4 NUM_ITEMS_PER_SHARD = 3 class FailedTestError(RuntimeError): pass def gen(shards: List[str]): for shard in shards: for i in range(NUM_ITEMS_PER_SHARD): yield {"i": i, "shard": shard} def main(): rank = int(os.environ["RANK"]) world_size = int(os.environ["WORLD_SIZE"]) parser = ArgumentParser() parser.add_argument("--streaming", type=bool) parser.add_argument("--local_rank", type=int) parser.add_argument("--num_workers", type=int, default=0) args = parser.parse_args() streaming = args.streaming num_workers = args.num_workers gen_kwargs = {"shards": [f"shard_{shard_idx}" for shard_idx in range(NUM_SHARDS)]} ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs) if not streaming: ds = Dataset.from_list(list(ds)) ds = split_dataset_by_node(ds, rank=rank, world_size=world_size) dataloader = torch.utils.data.DataLoader(ds, num_workers=num_workers) full_size = NUM_SHARDS * NUM_ITEMS_PER_SHARD expected_local_size = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) local_size = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}") if __name__ == "__main__": main()
datasets/tests/distributed_scripts/run_torch_distributed.py/0
{ "file_path": "datasets/tests/distributed_scripts/run_torch_distributed.py", "repo_id": "datasets", "token_count": 617 }
71
import os import time import uuid from contextlib import contextmanager from typing import Optional import pytest import requests from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__" CI_HUB_USER_FULL_NAME = "Dummy User" CI_HUB_USER_TOKEN = "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" CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co" CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}" @pytest.fixture def ci_hfh_hf_hub_url(monkeypatch): monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE ) @pytest.fixture def ci_hub_config(monkeypatch): monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL) @pytest.fixture def set_ci_hub_access_token(ci_hub_config): old_environ = dict(os.environ) os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN yield os.environ.clear() os.environ.update(old_environ) @pytest.fixture(scope="session") def hf_api(): return HfApi(endpoint=CI_HUB_ENDPOINT) @pytest.fixture(scope="session") def hf_token(): yield CI_HUB_USER_TOKEN @pytest.fixture def cleanup_repo(hf_api): def _cleanup_repo(repo_id): hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset") return _cleanup_repo @pytest.fixture def temporary_repo(cleanup_repo): @contextmanager def _temporary_repo(repo_id: Optional[str] = None): repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}" try: yield repo_id finally: try: cleanup_repo(repo_id) except RepositoryNotFoundError: pass return _temporary_repo @pytest.fixture(scope="session") def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file): repo_name = f"repo_txt_data-{int(time.time() * 10e6)}" repo_id = f"{CI_HUB_USER}/{repo_name}" hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True) hf_api.upload_file( token=hf_token, path_or_fileobj=str(text_file), path_in_repo="data/text_data.txt", repo_id=repo_id, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url): return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session") def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path): repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}" repo_id = f"{CI_HUB_USER}/{repo_name}" hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True) hf_api.upload_file( token=hf_token, path_or_fileobj=str(zip_csv_with_dir_path), path_in_repo="data.zip", repo_id=repo_id, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def hf_private_dataset_repo_zipped_txt_data( hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url ): return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session") def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path): repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}" repo_id = f"{CI_HUB_USER}/{repo_name}" hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True) hf_api.upload_file( token=hf_token, path_or_fileobj=str(zip_image_path), path_in_repo="data.zip", repo_id=repo_id, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def hf_private_dataset_repo_zipped_img_data( hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url ): return hf_private_dataset_repo_zipped_img_data_
datasets/tests/fixtures/hub.py/0
{ "file_path": "datasets/tests/fixtures/hub.py", "repo_id": "datasets", "token_count": 2270 }
72
import textwrap import pyarrow as pa import pytest from datasets import Features, Value from datasets.packaged_modules.json.json import Json @pytest.fixture def jsonl_file(tmp_path): filename = tmp_path / "file.jsonl" data = textwrap.dedent( """\ {"col_1": -1} {"col_1": 1, "col_2": 2} {"col_1": 10, "col_2": 20} """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def jsonl_file_utf16_encoded(tmp_path): filename = tmp_path / "file_utf16_encoded.jsonl" data = textwrap.dedent( """\ {"col_1": -1} {"col_1": 1, "col_2": 2} {"col_1": 10, "col_2": 20} """ ) with open(filename, "w", encoding="utf-16") as f: f.write(data) return str(filename) @pytest.fixture def json_file_with_list_of_dicts(tmp_path): filename = tmp_path / "file_with_list_of_dicts.json" data = textwrap.dedent( """\ [ {"col_1": -1}, {"col_1": 1, "col_2": 2}, {"col_1": 10, "col_2": 20} ] """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def json_file_with_list_of_dicts_field(tmp_path): filename = tmp_path / "file_with_list_of_dicts_field.json" data = textwrap.dedent( """\ { "field1": 1, "field2": "aabb", "field3": [ {"col_1": -1}, {"col_1": 1, "col_2": 2}, {"col_1": 10, "col_2": 20} ] } """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.mark.parametrize( "file_fixture, config_kwargs", [ ("jsonl_file", {}), ("jsonl_file_utf16_encoded", {"encoding": "utf-16"}), ("json_file_with_list_of_dicts", {}), ("json_file_with_list_of_dicts_field", {"field": "field3"}), ], ) def test_json_generate_tables(file_fixture, config_kwargs, request): json = Json(**config_kwargs) generator = json._generate_tables([[request.getfixturevalue(file_fixture)]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]} @pytest.mark.parametrize( "file_fixture, config_kwargs", [ ( "jsonl_file", {"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})}, ), ( "json_file_with_list_of_dicts", {"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})}, ), ( "json_file_with_list_of_dicts_field", { "field": "field3", "features": Features( {"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")} ), }, ), ], ) def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request): json = Json(**config_kwargs) generator = json._generate_tables([[request.getfixturevalue(file_fixture)]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]}
datasets/tests/packaged_modules/test_json.py/0
{ "file_path": "datasets/tests/packaged_modules/test_json.py", "repo_id": "datasets", "token_count": 1731 }
73
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) FILE_CONTENT = """\ Text data. Second line of data.""" FILE_PATH = "file" @pytest.fixture(scope="session") def zstd_path(tmp_path_factory): path = tmp_path_factory.mktemp("data") / (FILE_PATH + ".zstd") data = bytes(FILE_CONTENT, "utf-8") with zstd.open(path, "wb") as f: f.write(data) return path @pytest.fixture def tmpfs_file(tmpfs): with open(os.path.join(tmpfs.local_root_dir, FILE_PATH), "w") as f: f.write(FILE_CONTENT) return FILE_PATH @pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"]) def test_cached_path_extract(compression_format, gz_file, xz_file, zstd_path, tmp_path, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} input_path = input_paths[compression_format] cache_dir = tmp_path / "cache" download_config = DownloadConfig(cache_dir=cache_dir, extract_compressed_file=True) extracted_path = cached_path(input_path, download_config=download_config) with open(extracted_path) as f: extracted_file_content = f.read() with open(text_file) as f: expected_file_content = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted", [True, False]) @pytest.mark.parametrize("default_cache_dir", [True, False]) def test_extracted_datasets_path(default_extracted, default_cache_dir, xz_file, tmp_path, monkeypatch): custom_cache_dir = "custom_cache" custom_extracted_dir = "custom_extracted_dir" custom_extracted_path = tmp_path / "custom_extracted_path" if default_extracted: expected = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", custom_extracted_dir) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(custom_extracted_path)) expected = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) filename = xz_file download_config = ( DownloadConfig(extract_compressed_file=True) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=True) ) extracted_file_path = cached_path(filename, download_config=download_config) assert Path(extracted_file_path).parent.parts[-2:] == expected def test_cached_path_local(text_file): # input absolute path -> output absolute path text_file_abs = str(Path(text_file).resolve()) assert os.path.samefile(cached_path(text_file_abs), text_file_abs) # input relative path -> output absolute path text_file = __file__ text_file_abs = str(Path(text_file).resolve()) text_file_rel = str(Path(text_file).resolve().relative_to(Path(os.getcwd()))) assert os.path.samefile(cached_path(text_file_rel), text_file_abs) def test_cached_path_missing_local(tmp_path): # absolute path missing_file = str(tmp_path.resolve() / "__missing_file__.txt") with pytest.raises(FileNotFoundError): cached_path(missing_file) # relative path missing_file = "./__missing_file__.txt" with pytest.raises(FileNotFoundError): cached_path(missing_file) def test_get_from_cache_fsspec(tmpfs_file): output_path = get_from_cache(f"tmp://{tmpfs_file}") with open(output_path) as f: output_file_content = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE", True) def test_cached_path_offline(): with pytest.raises(OfflineModeIsEnabled): cached_path("https://huggingface.co") @patch("datasets.config.HF_DATASETS_OFFLINE", True) def test_http_offline(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.html" with pytest.raises(OfflineModeIsEnabled): http_get("https://huggingface.co", temp_file=filename) with pytest.raises(OfflineModeIsEnabled): http_head("https://huggingface.co") @patch("datasets.config.HF_DATASETS_OFFLINE", True) def test_ftp_offline(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.html" with pytest.raises(OfflineModeIsEnabled): ftp_get("ftp://huggingface.co", temp_file=filename) with pytest.raises(OfflineModeIsEnabled): ftp_head("ftp://huggingface.co") @patch("datasets.config.HF_DATASETS_OFFLINE", True) def test_fsspec_offline(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.html" with pytest.raises(OfflineModeIsEnabled): fsspec_get("s3://huggingface.co", temp_file=filename) with pytest.raises(OfflineModeIsEnabled): fsspec_head("s3://huggingface.co")
datasets/tests/test_file_utils.py/0
{ "file_path": "datasets/tests/test_file_utils.py", "repo_id": "datasets", "token_count": 2016 }
74
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def add_one(i): # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def test_parallel_backend_input(): with parallel_backend("spark"): assert ParallelBackendConfig.backend_name == "spark" lst = [1, 2, 3] with pytest.raises(ValueError): with parallel_backend("unsupported backend"): map_nested(add_one, lst, num_proc=2) with pytest.raises(ValueError): with parallel_backend("unsupported backend"): map_nested(add_one, lst, num_proc=-1) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc", [2, -1]) def test_parallel_backend_map_nested(num_proc): s1 = [1, 2] s2 = {"a": 1, "b": 2} s3 = {"a": [1, 2], "b": [3, 4]} s4 = {"a": {"1": 1}, "b": 2} s5 = {"a": 1, "b": 2, "c": 3, "d": 4} expected_map_nested_s1 = [2, 3] expected_map_nested_s2 = {"a": 2, "b": 3} expected_map_nested_s3 = {"a": [2, 3], "b": [4, 5]} expected_map_nested_s4 = {"a": {"1": 2}, "b": 3} expected_map_nested_s5 = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark"): assert map_nested(add_one, s1, num_proc=num_proc) == expected_map_nested_s1 assert map_nested(add_one, s2, num_proc=num_proc) == expected_map_nested_s2 assert map_nested(add_one, s3, num_proc=num_proc) == expected_map_nested_s3 assert map_nested(add_one, s4, num_proc=num_proc) == expected_map_nested_s4 assert map_nested(add_one, s5, num_proc=num_proc) == expected_map_nested_s5
datasets/tests/test_parallel.py/0
{ "file_path": "datasets/tests/test_parallel.py", "repo_id": "datasets", "token_count": 825 }
75
<jupyter_start><jupyter_text>Unit 8 Part 2: Advanced Deep Reinforcement Learning. Using Sample Factory to play Doom from pixelsIn this notebook, we will learn how to train a Deep Neural Network to collect objects in a 3D environment based on the game of Doom, a video of the resulting policy is shown below. We train this policy using [Sample Factory](https://www.samplefactory.dev/), an asynchronous implementation of the PPO algorithm.Please note the following points:* [Sample Factory](https://www.samplefactory.dev/) is an advanced RL framework and **only functions on Linux and Mac** (not Windows).* The framework performs best on a **GPU machine with many CPU cores**, where it can achieve speeds of 100k interactions per second. The resources available on a standard Colab notebook **limit the performance of this library**. So the speed in this setting **does not reflect the real-world performance**.* Benchmarks for Sample Factory are available in a number of settings, check out the [examples](https://github.com/alex-petrenko/sample-factory/tree/master/sf_examples) if you want to find out more.<jupyter_code>from IPython.display import HTML HTML('''<video width="640" height="480" controls> <source src="https://huggingface.co/edbeeching/doom_health_gathering_supreme_3333/resolve/main/replay.mp4" type="video/mp4">Your browser does not support the video tag.</video>''' )<jupyter_output><empty_output><jupyter_text>To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push one model:- `doom_health_gathering_supreme` get a result of >= 5.To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward**If you don't find your model, **go to the bottom of the page and click on the refresh button**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Before starting to train our agent, let's **study the library and environments we're going to use**. Sample Factory[Sample Factory](https://www.samplefactory.dev/) is one of the **fastest RL libraries focused on very efficient synchronous and asynchronous implementations of policy gradients (PPO)**.Sample Factory is thoroughly **tested, used by many researchers and practitioners**, and is actively maintained. Our implementation is known to **reach SOTA performance in a variety of domains while minimizing RL experiment training time and hardware requirements**. Key features- Highly optimized algorithm [architecture](https://www.samplefactory.dev/06-architecture/overview/) for maximum learning throughput- [Synchronous and asynchronous](https://www.samplefactory.dev/07-advanced-topics/sync-async/) training regimes- [Serial (single-process) mode](https://www.samplefactory.dev/07-advanced-topics/serial-mode/) for easy debugging- Optimal performance in both CPU-based and [GPU-accelerated environments](https://www.samplefactory.dev/09-environment-integrations/isaacgym/)- Single- & multi-agent training, self-play, supports [training multiple policies](https://www.samplefactory.dev/07-advanced-topics/multi-policy-training/) at once on one or many GPUs- Population-Based Training ([PBT](https://www.samplefactory.dev/07-advanced-topics/pbt/))- Discrete, continuous, hybrid action spaces- Vector-based, image-based, dictionary observation spaces- Automatically creates a model architecture by parsing action/observation space specification. Supports [custom model architectures](https://www.samplefactory.dev/03-customization/custom-models/)- Designed to be imported into other projects, [custom environments](https://www.samplefactory.dev/03-customization/custom-environments/) are first-class citizens- Detailed [WandB and Tensorboard summaries](https://www.samplefactory.dev/05-monitoring/metrics-reference/), [custom metrics](https://www.samplefactory.dev/05-monitoring/custom-metrics/)- [HuggingFace 🤗 integration](https://www.samplefactory.dev/10-huggingface/huggingface/) (upload trained models and metrics to the Hub)- [Multiple](https://www.samplefactory.dev/09-environment-integrations/mujoco/) [example](https://www.samplefactory.dev/09-environment-integrations/atari/) [environment](https://www.samplefactory.dev/09-environment-integrations/vizdoom/) [integrations](https://www.samplefactory.dev/09-environment-integrations/dmlab/) with tuned parameters and trained modelsAll of the above policies are available on the 🤗 hub. Search for the tag [sample-factory](https://huggingface.co/models?library=sample-factory&sort=downloads) How sample-factory worksSample-factory is one of the **most highly optimized RL implementations available to the community**.It works by **spawning multiple processes that run rollout workers, inference workers and a learner worker**.The *workers* **communicate through shared memory, which lowers the communication cost between processes**.The *rollout workers* interact with the environment and send observations to the *inference workers*.The *inferences workers* query a fixed version of the policy and **send actions back to the rollout worker**.After *k* steps the rollout works send a trajectory of experience to the learner worker, **which it uses to update the agent’s policy network**. Actor Critic models in Sample-factoryActor Critic models in Sample Factory are composed of three components:- **Encoder** - Process input observations (images, vectors) and map them to a vector. This is the part of the model you will most likely want to customize.- **Core** - Intergrate vectors from one or more encoders, can optionally include a single- or multi-layer LSTM/GRU in a memory-based agent.- **Decoder** - Apply additional layers to the output of the model core before computing the policy and value outputs.The library has been designed to automatically support any observation and action spaces. Users can easily add their custom models. You can find out more in the [documentation](https://www.samplefactory.dev/03-customization/custom-models/actor-critic-models-in-sample-factory). ViZDoom[ViZDoom](https://vizdoom.cs.put.edu.pl/) is an **open-source python interface for the Doom Engine**.The library was created in 2016 by Marek Wydmuch, Michal Kempka at the Institute of Computing Science, Poznan University of Technology, Poland.The library enables the **training of agents directly from the screen pixels in a number of scenarios**, including team deathmatch, shown in the video below. Because the ViZDoom environment is based on a game the was created in the 90s, it can be run on modern hardware at accelerated speeds, **allowing us to learn complex AI behaviors fairly quickly**.The library includes feature such as:- Multi-platform (Linux, macOS, Windows),- API for Python and C++,- [OpenAI Gym](https://www.gymlibrary.dev/) environment wrappers- Easy-to-create custom scenarios (visual editors, scripting language, and examples available),- Async and sync single-player and multiplayer modes,- Lightweight (few MBs) and fast (up to 7000 fps in sync mode, single-threaded),- Customizable resolution and rendering parameters,- Access to the depth buffer (3D vision),- Automatic labeling of game objects visible in the frame,- Access to the audio buffer- Access to the list of actors/objects and map geometry,- Off-screen rendering and episode recording,- Time scaling in async mode. We first need to install some dependencies that are required for the ViZDoom environmentNow that our Colab runtime is set up, we can start by installing the dependencies required to run ViZDoom on linux.If you are following on your machine on Mac, you will want to follow the installation instructions on the [github page](https://github.com/Farama-Foundation/ViZDoom/blob/master/doc/Quickstart.md-quickstart-for-macos-and-anaconda3-python-36).<jupyter_code>%%capture %%bash # Install ViZDoom deps from # https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux apt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \ nasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \ libopenal-dev timidity libwildmidi-dev unzip ffmpeg # Boost libraries apt-get install libboost-all-dev # Lua binding dependencies apt-get install liblua5.1-dev<jupyter_output><empty_output><jupyter_text>Then we can install Sample Factory and ViZDoom- This can take 7min<jupyter_code># install python libraries # thanks toinsson !pip install faster-fifo==1.4.2 !pip install vizdoom !pip install sample-factory==2.0.2<jupyter_output><empty_output><jupyter_text>Setting up the Doom Environment in sample-factory<jupyter_code>import functools from sample_factory.algo.utils.context import global_model_factory from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args from sample_factory.envs.env_utils import register_env from sample_factory.train import run_rl from sf_examples.vizdoom.doom.doom_model import make_vizdoom_encoder from sf_examples.vizdoom.doom.doom_params import add_doom_env_args, doom_override_defaults from sf_examples.vizdoom.doom.doom_utils import DOOM_ENVS, make_doom_env_from_spec # Registers all the ViZDoom environments def register_vizdoom_envs(): for env_spec in DOOM_ENVS: make_env_func = functools.partial(make_doom_env_from_spec, env_spec) register_env(env_spec.name, make_env_func) # Sample Factory allows the registration of a custom Neural Network architecture # See https://github.com/alex-petrenko/sample-factory/blob/master/sf_examples/vizdoom/doom/doom_model.py for more details def register_vizdoom_models(): global_model_factory().register_encoder_factory(make_vizdoom_encoder) def register_vizdoom_components(): register_vizdoom_envs() register_vizdoom_models() # parse the command line args and create a config def parse_vizdoom_cfg(argv=None, evaluation=False): parser, _ = parse_sf_args(argv=argv, evaluation=evaluation) # parameters specific to Doom envs add_doom_env_args(parser) # override Doom default values for algo parameters doom_override_defaults(parser) # second parsing pass yields the final configuration final_cfg = parse_full_cfg(parser, argv) return final_cfg<jupyter_output><empty_output><jupyter_text>Now that the setup if complete, we can train the agent. We have chosen here to learn a ViZDoom task called `Health Gathering Supreme`. The scenario: Health Gathering SupremeThe objective of this scenario is to **teach the agent how to survive without knowing what makes him survive**. Agent know only that **life is precious** and death is bad so **it must learn what prolongs his existence and that his health is connected with it**.Map is a rectangle containing walls and with a green, acidic floor which **hurts the player periodically**. Initially there are some medkits spread uniformly over the map. A new medkit falls from the skies every now and then. **Medkits heal some portions of player's health** - to survive agent needs to pick them up. Episode finishes after player's death or on timeout.Further configuration:- Living_reward = 1- 3 available buttons: turn left, turn right, move forward- 1 available game variable: HEALTH- death penalty = 100You can find out more about the scenarios available in ViZDoom [here](https://github.com/Farama-Foundation/ViZDoom/tree/master/scenarios).There are also a number of more complex scenarios that have been create for ViZDoom, such as the ones detailed on [this github page](https://github.com/edbeeching/3d_control_deep_rl). Training the agent- We're going to train the agent for 4000000 steps it will take approximately 20min<jupyter_code>## Start the training, this should take around 15 minutes register_vizdoom_components() # The scenario we train on today is health gathering # other scenarios include "doom_basic", "doom_two_colors_easy", "doom_dm", "doom_dwango5", "doom_my_way_home", "doom_deadly_corridor", "doom_defend_the_center", "doom_defend_the_line" env = "doom_health_gathering_supreme" cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=8", "--num_envs_per_worker=4", "--train_for_env_steps=4000000"]) status = run_rl(cfg)<jupyter_output><empty_output><jupyter_text>Let's take a look at the performance of the trained policy and output a video of the agent.<jupyter_code>from sample_factory.enjoy import enjoy cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10"], evaluation=True) status = enjoy(cfg)<jupyter_output><empty_output><jupyter_text>Now lets visualize the performance of the agent<jupyter_code>from base64 import b64encode from IPython.display import HTML mp4 = open('/content/train_dir/default_experiment/replay.mp4','rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url)<jupyter_output><empty_output><jupyter_text>The agent has learned something, but its performance could be better. We would clearly need to train for longer. But let's upload this model to the Hub. Now lets upload your checkpoint and video to the Hugging Face Hub To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**- Copy the token- Run the cell below and paste the token If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login`<jupyter_code>from huggingface_hub import notebook_login notebook_login() !git config --global credential.helper store from sample_factory.enjoy import enjoy hf_username = "ThomasSimonini" # insert your HuggingFace username here cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10", "--max_num_frames=100000", "--push_to_hub", f"--hf_repository={hf_username}/rl_course_vizdoom_health_gathering_supreme"], evaluation=True) status = enjoy(cfg)<jupyter_output><empty_output><jupyter_text>Let's load another model This agent's performance was good, but can do better! Let's download and visualize an agent trained for 10B timesteps from the hub.<jupyter_code>#download the agent from the hub !python -m sample_factory.huggingface.load_from_hub -r edbeeching/doom_health_gathering_supreme_2222 -d ./train_dir !ls train_dir/doom_health_gathering_supreme_2222 env = "doom_health_gathering_supreme" cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=10", "--experiment=doom_health_gathering_supreme_2222", "--train_dir=train_dir"], evaluation=True) status = enjoy(cfg) mp4 = open('/content/train_dir/doom_health_gathering_supreme_2222/replay.mp4','rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url)<jupyter_output><empty_output><jupyter_text>Some additional challenges 🏆: Doom DeathmatchTraining an agent to play a Doom deathmatch **takes many hours on a more beefy machine than is available in Colab**.Fortunately, we have have **already trained an agent in this scenario and it is available in the 🤗 Hub!** Let’s download the model and visualize the agent’s performance.<jupyter_code># Download the agent from the hub !python -m sample_factory.huggingface.load_from_hub -r edbeeching/doom_deathmatch_bots_2222 -d ./train_dir<jupyter_output><empty_output><jupyter_text>Given the agent plays for a long time the video generation can take **10 minutes**.<jupyter_code>from sample_factory.enjoy import enjoy register_vizdoom_components() env = "doom_deathmatch_bots" cfg = parse_vizdoom_cfg(argv=[f"--env={env}", "--num_workers=1", "--save_video", "--no_render", "--max_num_episodes=1", "--experiment=doom_deathmatch_bots_2222", "--train_dir=train_dir"], evaluation=True) status = enjoy(cfg) mp4 = open('/content/train_dir/doom_deathmatch_bots_2222/replay.mp4','rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() HTML(""" <video width=640 controls> <source src="%s" type="video/mp4"> </video> """ % data_url)<jupyter_output><empty_output>
deep-rl-class/notebooks/unit8/unit8_part2.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit8/unit8_part2.ipynb", "repo_id": "deep-rl-class", "token_count": 4950 }
76
# The Reinforcement Learning Framework [[the-reinforcement-learning-framework]] ## The RL Process [[the-rl-process]] <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process.jpg" alt="The RL process" width="100%"> <figcaption>The RL Process: a loop of state, action, reward and next state</figcaption> <figcaption>Source: <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto</a></figcaption> </figure> To understand the RL process, let’s imagine an agent learning to play a platform game: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> - Our Agent receives **state \\(S_0\\)** from the **Environment** — we receive the first frame of our game (Environment). - Based on that **state \\(S_0\\),** the Agent takes **action \\(A_0\\)** — our Agent will move to the right. - The environment goes to a **new** **state \\(S_1\\)** — new frame. - The environment gives some **reward \\(R_1\\)** to the Agent — we’re not dead *(Positive Reward +1)*. This RL loop outputs a sequence of **state, action, reward and next state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/sars.jpg" alt="State, Action, Reward, Next State" width="100%"> The agent's goal is to _maximize_ its cumulative reward, **called the expected return.** ## The reward hypothesis: the central idea of Reinforcement Learning [[reward-hypothesis]] ⇒ Why is the goal of the agent to maximize the expected return? Because RL is based on the **reward hypothesis**, which is that all goals can be described as the **maximization of the expected return** (expected cumulative reward). That’s why in Reinforcement Learning, **to have the best behavior,** we aim to learn to take actions that **maximize the expected cumulative reward.** ## Markov Property [[markov-property]] In papers, you’ll see that the RL process is called a **Markov Decision Process** (MDP). We’ll talk again about the Markov Property in the following units. But if you need to remember something today about it, it's this: the Markov Property implies that our agent needs **only the current state to decide** what action to take and **not the history of all the states and actions** they took before. ## Observations/States Space [[obs-space]] Observations/States are the **information our agent gets from the environment.** In the case of a video game, it can be a frame (a screenshot). In the case of the trading agent, it can be the value of a certain stock, etc. There is a differentiation to make between *observation* and *state*, however: - *State s*: is **a complete description of the state of the world** (there is no hidden information). In a fully observed environment. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/chess.jpg" alt="Chess"> <figcaption>In chess game, we receive a state from the environment since we have access to the whole check board information.</figcaption> </figure> In a chess game, we have access to the whole board information, so we receive a state from the environment. In other words, the environment is fully observed. - *Observation o*: is a **partial description of the state.** In a partially observed environment. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>In Super Mario Bros, we only see the part of the level close to the player, so we receive an observation.</figcaption> </figure> In Super Mario Bros, we only see the part of the level close to the player, so we receive an observation. In Super Mario Bros, we are in a partially observed environment. We receive an observation **since we only see a part of the level.** <Tip> In this course, we use the term "state" to denote both state and observation, but we will make the distinction in implementations. </Tip> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/obs_space_recap.jpg" alt="Obs space recap" width="100%"> ## Action Space [[action-space]] The Action space is the set of **all possible actions in an environment.** The actions can come from a *discrete* or *continuous space*: - *Discrete space*: the number of possible actions is **finite**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>In Super Mario Bros, we have only 4 possible actions: left, right, up (jumping) and down (crouching).</figcaption> </figure> Again, in Super Mario Bros, we have a finite set of actions since we have only 4 directions. - *Continuous space*: the number of possible actions is **infinite**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/self_driving_car.jpg" alt="Self Driving Car"> <figcaption>A Self Driving Car agent has an infinite number of possible actions since it can turn left 20°, 21,1°, 21,2°, honk, turn right 20°… </figcaption> </figure> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/action_space.jpg" alt="Action space recap" width="100%"> Taking this information into consideration is crucial because it will **have importance when choosing the RL algorithm in the future.** ## Rewards and the discounting [[rewards]] The reward is fundamental in RL because it’s **the only feedback** for the agent. Thanks to it, our agent knows **if the action taken was good or not.** The cumulative reward at each time step **t** can be written as: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_1.jpg" alt="Rewards"> <figcaption>The cumulative reward equals the sum of all rewards in the sequence. </figcaption> </figure> Which is equivalent to: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_2.jpg" alt="Rewards"> <figcaption>The cumulative reward = rt+1 (rt+k+1 = rt+0+1 = rt+1)+ rt+2 (rt+k+1 = rt+1+1 = rt+2) + ... </figcaption> </figure> However, in reality, **we can’t just add them like that.** The rewards that come sooner (at the beginning of the game) **are more likely to happen** since they are more predictable than the long-term future reward. Let’s say your agent is this tiny mouse that can move one tile each time step, and your opponent is the cat (that can move too). The mouse's goal is **to eat the maximum amount of cheese before being eaten by the cat.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_3.jpg" alt="Rewards" width="100%"> As we can see in the diagram, **it’s more probable to eat the cheese near us than the cheese close to the cat** (the closer we are to the cat, the more dangerous it is). Consequently, **the reward near the cat, even if it is bigger (more cheese), will be more discounted** since we’re not really sure we’ll be able to eat it. To discount the rewards, we proceed like this: 1. We define a discount rate called gamma. **It must be between 0 and 1.** Most of the time between **0.95 and 0.99**. - The larger the gamma, the smaller the discount. This means our agent **cares more about the long-term reward.** - On the other hand, the smaller the gamma, the bigger the discount. This means our **agent cares more about the short term reward (the nearest cheese).** 2. Then, each reward will be discounted by gamma to the exponent of the time step. As the time step increases, the cat gets closer to us, **so the future reward is less and less likely to happen.** Our discounted expected cumulative reward is: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/rewards_4.jpg" alt="Rewards" width="100%">
deep-rl-class/units/en/unit1/rl-framework.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/rl-framework.mdx", "repo_id": "deep-rl-class", "token_count": 2504 }
77
# Introducing Q-Learning [[q-learning]] ## What is Q-Learning? [[what-is-q-learning]] Q-Learning is an **off-policy value-based method that uses a TD approach to train its action-value function:** - *Off-policy*: we'll talk about that at the end of this unit. - *Value-based method*: finds the optimal policy indirectly by training a value or action-value function that will tell us **the value of each state or each state-action pair.** - *TD approach:* **updates its action-value function at each step instead of at the end of the episode.** **Q-Learning is the algorithm we use to train our Q-function**, an **action-value function** that determines the value of being at a particular state and taking a specific action at that state. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function.jpg" alt="Q-function"/> <figcaption>Given a state and action, our Q Function outputs a state-action value (also called Q-value)</figcaption> </figure> The **Q comes from "the Quality" (the value) of that action at that state.** Let's recap the difference between value and reward: - The *value of a state*, or a *state-action pair* is the expected cumulative reward our agent gets if it starts at this state (or state-action pair) and then acts accordingly to its policy. - The *reward* is the **feedback I get from the environment** after performing an action at a state. Internally, our Q-function is encoded by **a Q-table, a table where each cell corresponds to a state-action pair value.** Think of this Q-table as **the memory or cheat sheet of our Q-function.** Let's go through an example of a maze. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-1.jpg" alt="Maze example"/> The Q-table is initialized. That's why all values are = 0. This table **contains, for each state and action, the corresponding state-action values.** For this simple example, the state is only defined by the position of the mouse. Therefore, we have 2*3 rows in our Q-table, one row for each possible position of the mouse. In more complex scenarios, the state could contain more information than the position of the actor. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-2.jpg" alt="Maze example"/> Here we see that the **state-action value of the initial state and going up is 0:** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-3.jpg" alt="Maze example"/> So: the Q-function uses a Q-table **that has the value of each state-action pair.** Given a state and action, **our Q-function will search inside its Q-table to output the value.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function-2.jpg" alt="Q-function"/> </figure> If we recap, *Q-Learning* **is the RL algorithm that:** - Trains a *Q-function* (an **action-value function**), which internally is a **Q-table that contains all the state-action pair values.** - Given a state and action, our Q-function **will search its Q-table for the corresponding value.** - When the training is done, **we have an optimal Q-function, which means we have optimal Q-table.** - And if we **have an optimal Q-function**, we **have an optimal policy** since we **know the best action to take at each state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy"/> In the beginning, **our Q-table is useless since it gives arbitrary values for each state-action pair** (most of the time, we initialize the Q-table to 0). As the agent **explores the environment and we update the Q-table, it will give us a better and better approximation** to the optimal policy. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-1.jpg" alt="Q-learning"/> <figcaption>We see here that with the training, our Q-table is better since, thanks to it, we can know the value of each state-action pair.</figcaption> </figure> Now that we understand what Q-Learning, Q-functions, and Q-tables are, **let's dive deeper into the Q-Learning algorithm**. ## The Q-Learning algorithm [[q-learning-algo]] This is the Q-Learning pseudocode; let's study each part and **see how it works with a simple example before implementing it.** Don't be intimidated by it, it's simpler than it looks! We'll go over each step. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-2.jpg" alt="Q-learning"/> ### Step 1: We initialize the Q-table [[step1]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-3.jpg" alt="Q-learning"/> We need to initialize the Q-table for each state-action pair. **Most of the time, we initialize with values of 0.** ### Step 2: Choose an action using the epsilon-greedy strategy [[step2]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-4.jpg" alt="Q-learning"/> The epsilon-greedy strategy is a policy that handles the exploration/exploitation trade-off. The idea is that, with an initial value of ɛ = 1.0: - *With probability 1 — ɛ* : we do **exploitation** (aka our agent selects the action with the highest state-action pair value). - With probability ɛ: **we do exploration** (trying random action). At the beginning of the training, **the probability of doing exploration will be huge since ɛ is very high, so most of the time, we'll explore.** But as the training goes on, and consequently our **Q-table gets better and better in its estimations, we progressively reduce the epsilon value** since we will need less and less exploration and more exploitation. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-5.jpg" alt="Q-learning"/> ### Step 3: Perform action At, get reward Rt+1 and next state St+1 [[step3]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-6.jpg" alt="Q-learning"/> ### Step 4: Update Q(St, At) [[step4]] Remember that in TD Learning, we update our policy or value function (depending on the RL method we choose) **after one step of the interaction.** To produce our TD target, **we used the immediate reward \\(R_{t+1}\\) plus the discounted value of the next state**, computed by finding the action that maximizes the current Q-function at the next state. (We call that bootstrap). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-7.jpg" alt="Q-learning"/> Therefore, our \\(Q(S_t, A_t)\\) **update formula goes like this:** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-8.jpg" alt="Q-learning"/> This means that to update our \\(Q(S_t, A_t)\\): - We need \\(S_t, A_t, R_{t+1}, S_{t+1}\\). - To update our Q-value at a given state-action pair, we use the TD target. How do we form the TD target? 1. We obtain the reward \\(R_{t+1}\\) after taking the action \\(A_t\\). 2. To get the **best state-action pair value** for the next state, we use a greedy policy to select the next best action. Note that this is not an epsilon-greedy policy, this will always take the action with the highest state-action value. Then when the update of this Q-value is done, we start in a new state and select our action **using a epsilon-greedy policy again.** **This is why we say that Q Learning is an off-policy algorithm.** ## Off-policy vs On-policy [[off-vs-on]] The difference is subtle: - *Off-policy*: using **a different policy for acting (inference) and updating (training).** For instance, with Q-Learning, the epsilon-greedy policy (acting policy), is different from the greedy policy that is **used to select the best next-state action value to update our Q-value (updating policy).** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-1.jpg" alt="Off-on policy"/> <figcaption>Acting Policy</figcaption> </figure> Is different from the policy we use during the training part: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-2.jpg" alt="Off-on policy"/> <figcaption>Updating policy</figcaption> </figure> - *On-policy:* using the **same policy for acting and updating.** For instance, with Sarsa, another value-based algorithm, **the epsilon-greedy policy selects the next state-action pair, not a greedy policy.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-3.jpg" alt="Off-on policy"/> <figcaption>Sarsa</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-4.jpg" alt="Off-on policy"/> </figure>
deep-rl-class/units/en/unit2/q-learning.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/q-learning.mdx", "repo_id": "deep-rl-class", "token_count": 2955 }
78
# Glossary This is a community-created glossary. Contributions are welcome! - **Deep Q-Learning:** A value-based deep reinforcement learning algorithm that uses a deep neural network to approximate Q-values for actions in a given state. The goal of Deep Q-learning is to find the optimal policy that maximizes the expected cumulative reward by learning the action-values. - **Value-based methods:** Reinforcement Learning methods that estimate a value function as an intermediate step towards finding an optimal policy. - **Policy-based methods:** Reinforcement Learning methods that directly learn to approximate the optimal policy without learning a value function. In practice they output a probability distribution over actions. The benefits of using policy-gradient methods over value-based methods include: - simplicity of integration: no need to store action values; - ability to learn a stochastic policy: the agent explores the state space without always taking the same trajectory, and avoids the problem of perceptual aliasing; - effectiveness in high-dimensional and continuous action spaces; and - improved convergence properties. - **Policy Gradient:** A subset of policy-based methods where the objective is to maximize the performance of a parameterized policy using gradient ascent. The goal of a policy-gradient is to control the probability distribution of actions by tuning the policy such that good actions (that maximize the return) are sampled more frequently in the future. - **Monte Carlo Reinforce:** A policy-gradient algorithm that uses an estimated return from an entire episode to update the policy parameter. If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls) This glossary was made possible thanks to: - [Diego Carpintero](https://github.com/dcarpintero)
deep-rl-class/units/en/unit4/glossary.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/glossary.mdx", "repo_id": "deep-rl-class", "token_count": 421 }
79
# Additional Readings [[additional-readings]] ## Bias-variance tradeoff in Reinforcement Learning If you want to dive deeper into the question of variance and bias tradeoff in Deep Reinforcement Learning, you can check out these two articles: - [Making Sense of the Bias / Variance Trade-off in (Deep) Reinforcement Learning](https://blog.mlreview.com/making-sense-of-the-bias-variance-trade-off-in-deep-reinforcement-learning-79cf1e83d565) - [Bias-variance Tradeoff in Reinforcement Learning](https://www.endtoend.ai/blog/bias-variance-tradeoff-in-reinforcement-learning/) ## Advantage Functions - [Advantage Functions, SpinningUp RL](https://spinningup.openai.com/en/latest/spinningup/rl_intro.html?highlight=advantage%20functio#advantage-functions) ## Actor Critic - [Foundations of Deep RL Series, L3 Policy Gradients and Advantage Estimation by Pieter Abbeel](https://www.youtube.com/watch?v=AKbX1Zvo7r8) - [A2C Paper: Asynchronous Methods for Deep Reinforcement Learning](https://arxiv.org/abs/1602.01783v2)
deep-rl-class/units/en/unit6/additional-readings.mdx/0
{ "file_path": "deep-rl-class/units/en/unit6/additional-readings.mdx", "repo_id": "deep-rl-class", "token_count": 321 }
80
# Introducing the Clipped Surrogate Objective Function ## Recap: The Policy Objective Function Let’s remember what the objective is to optimize in Reinforce: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/lpg.jpg" alt="Reinforce"/> The idea was that by taking a gradient ascent step on this function (equivalent to taking gradient descent of the negative of this function), we would **push our agent to take actions that lead to higher rewards and avoid harmful actions.** However, the problem comes from the step size: - Too small, **the training process was too slow** - Too high, **there was too much variability in the training** With PPO, the idea is to constrain our policy update with a new objective function called the *Clipped surrogate objective function* that **will constrain the policy change in a small range using a clip.** This new function **is designed to avoid destructively large weights updates** : <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ppo-surrogate.jpg" alt="PPO surrogate function"/> Let’s study each part to understand how it works. ## The Ratio Function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ratio1.jpg" alt="Ratio"/> This ratio is calculated as follows: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/ratio2.jpg" alt="Ratio"/> It’s the probability of taking action \\( a_t \\) at state \\( s_t \\) in the current policy, divided by the same for the previous policy. As we can see, \\( r_t(\theta) \\) denotes the probability ratio between the current and old policy: - If \\( r_t(\theta) > 1 \\), the **action \\( a_t \\) at state \\( s_t \\) is more likely in the current policy than the old policy.** - If \\( r_t(\theta) \\) is between 0 and 1, the **action is less likely for the current policy than for the old one**. So this probability ratio is an **easy way to estimate the divergence between old and current policy.** ## The unclipped part of the Clipped Surrogate Objective function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/unclipped1.jpg" alt="PPO"/> This ratio **can replace the log probability we use in the policy objective function**. This gives us the left part of the new objective function: multiplying the ratio by the advantage. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/unclipped2.jpg" alt="PPO"/> <figcaption><a href="https://arxiv.org/pdf/1707.06347.pdf">Proximal Policy Optimization Algorithms</a></figcaption> </figure> However, without a constraint, if the action taken is much more probable in our current policy than in our former, **this would lead to a significant policy gradient step** and, therefore, an **excessive policy update.** ## The clipped Part of the Clipped Surrogate Objective function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/clipped.jpg" alt="PPO"/> Consequently, we need to constrain this objective function by penalizing changes that lead to a ratio far away from 1 (in the paper, the ratio can only vary from 0.8 to 1.2). **By clipping the ratio, we ensure that we do not have a too large policy update because the current policy can't be too different from the older one.** To do that, we have two solutions: - *TRPO (Trust Region Policy Optimization)* uses KL divergence constraints outside the objective function to constrain the policy update. But this method **is complicated to implement and takes more computation time.** - *PPO* clip probability ratio directly in the objective function with its **Clipped surrogate objective function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/clipped.jpg" alt="PPO"/> This clipped part is a version where \\( r_t(\theta) \\) is clipped between \\( [1 - \epsilon, 1 + \epsilon] \\). With the Clipped Surrogate Objective function, we have two probability ratios, one non-clipped and one clipped in a range between \\( [1 - \epsilon, 1 + \epsilon] \\), epsilon is a hyperparameter that helps us to define this clip range (in the paper \\( \epsilon = 0.2 \\).). Then, we take the minimum of the clipped and non-clipped objective, **so the final objective is a lower bound (pessimistic bound) of the unclipped objective.** Taking the minimum of the clipped and non-clipped objective means **we'll select either the clipped or the non-clipped objective based on the ratio and advantage situation**.
deep-rl-class/units/en/unit8/clipped-surrogate-objective.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/clipped-surrogate-objective.mdx", "repo_id": "deep-rl-class", "token_count": 1386 }
81
# Optuna Tutorial [[optuna]] The content below comes from [Antonin's Raffin ICRA 2022 presentations](https://araffin.github.io/tools-for-robotic-rl-icra2022/), he's one of the founders of Stable-Baselines and RL-Baselines3-Zoo. ## The theory behind Hyperparameter tuning <Youtube id="AidFTOdGNFQ" /> ## Optuna Tutorial <Youtube id="ihP7E76KGOI" /> The notebook 👉 [here](https://colab.research.google.com/github/araffin/tools-for-robotic-rl-icra2022/blob/main/notebooks/optuna_lab.ipynb)
deep-rl-class/units/en/unitbonus2/optuna.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus2/optuna.mdx", "repo_id": "deep-rl-class", "token_count": 182 }
82
import argparse import sys sys.path.append(".") from base_classes import LCMLoRATextToImageBenchmark # noqa: E402 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="stabilityai/stable-diffusion-xl-base-1.0", ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=4) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() benchmark_pipe = LCMLoRATextToImageBenchmark(args) benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_t2i_lcm_lora.py/0
{ "file_path": "diffusers/benchmarks/benchmark_t2i_lcm_lora.py", "repo_id": "diffusers", "token_count": 273 }
83
- sections: - local: index title: 🧨 Diffusers - local: quicktour title: Quicktour - local: stable_diffusion title: Effective and efficient diffusion - local: installation title: Installation title: Get started - sections: - local: tutorials/tutorial_overview title: Overview - local: using-diffusers/write_own_pipeline title: Understanding pipelines, models and schedulers - local: tutorials/autopipeline title: AutoPipeline - local: tutorials/basic_training title: Train a diffusion model - local: tutorials/using_peft_for_inference title: Inference with PEFT - local: tutorials/fast_diffusion title: Accelerate inference of text-to-image diffusion models title: Tutorials - sections: - sections: - local: using-diffusers/loading_overview title: Overview - local: using-diffusers/loading title: Load pipelines, models, and schedulers - local: using-diffusers/schedulers title: Load and compare different schedulers - local: using-diffusers/custom_pipeline_overview title: Load community pipelines and components - local: using-diffusers/using_safetensors title: Load safetensors - local: using-diffusers/other-formats title: Load different Stable Diffusion formats - local: using-diffusers/loading_adapters title: Load adapters - local: using-diffusers/push_to_hub title: Push files to the Hub title: Loading & Hub - sections: - local: using-diffusers/pipeline_overview title: Overview - local: using-diffusers/unconditional_image_generation title: Unconditional image generation - local: using-diffusers/conditional_image_generation title: Text-to-image - local: using-diffusers/img2img title: Image-to-image - local: using-diffusers/inpaint title: Inpainting - local: using-diffusers/depth2img title: Depth-to-image title: Tasks - sections: - local: using-diffusers/textual_inversion_inference title: Textual inversion - local: training/distributed_inference title: Distributed inference with multiple GPUs - local: using-diffusers/reusing_seeds title: Improve image quality with deterministic generation - local: using-diffusers/control_brightness title: Control image brightness - local: using-diffusers/weighted_prompts title: Prompt weighting - local: using-diffusers/freeu title: Improve generation quality with FreeU title: Techniques - sections: - local: using-diffusers/pipeline_overview title: Overview - local: using-diffusers/sdxl title: Stable Diffusion XL - local: using-diffusers/sdxl_turbo title: SDXL Turbo - local: using-diffusers/kandinsky title: Kandinsky - local: using-diffusers/controlnet title: ControlNet - local: using-diffusers/shap-e title: Shap-E - local: using-diffusers/diffedit title: DiffEdit - local: using-diffusers/distilled_sd title: Distilled Stable Diffusion inference - local: using-diffusers/callback title: Pipeline callbacks - local: using-diffusers/reproducibility title: Create reproducible pipelines - local: using-diffusers/custom_pipeline_examples title: Community pipelines - local: using-diffusers/contribute_pipeline title: Contribute a community pipeline - local: using-diffusers/inference_with_lcm_lora title: Latent Consistency Model-LoRA - local: using-diffusers/inference_with_lcm title: Latent Consistency Model - local: using-diffusers/svd title: Stable Video Diffusion title: Specific pipeline examples - sections: - local: training/overview title: Overview - local: training/create_dataset title: Create a dataset for training - local: training/adapt_a_model title: Adapt a model to a new task - sections: - local: training/unconditional_training title: Unconditional image generation - local: training/text2image title: Text-to-image - local: training/sdxl title: Stable Diffusion XL - local: training/kandinsky title: Kandinsky 2.2 - local: training/wuerstchen title: Wuerstchen - local: training/controlnet title: ControlNet - local: training/t2i_adapters title: T2I-Adapters - local: training/instructpix2pix title: InstructPix2Pix title: Models - sections: - local: training/text_inversion title: Textual Inversion - local: training/dreambooth title: DreamBooth - local: training/lora title: LoRA - local: training/custom_diffusion title: Custom Diffusion - local: training/lcm_distill title: Latent Consistency Distillation - local: training/ddpo title: Reinforcement learning training with DDPO title: Methods title: Training - sections: - local: using-diffusers/other-modalities title: Other Modalities title: Taking Diffusers Beyond Images title: Using Diffusers - sections: - local: optimization/opt_overview title: Overview - sections: - local: optimization/fp16 title: Speed up inference - local: optimization/memory title: Reduce memory usage - local: optimization/torch2.0 title: PyTorch 2.0 - local: optimization/xformers title: xFormers - local: optimization/tome title: Token merging - local: optimization/deepcache title: DeepCache title: General optimizations - sections: - local: using-diffusers/stable_diffusion_jax_how_to title: JAX/Flax - local: optimization/onnx title: ONNX - local: optimization/open_vino title: OpenVINO - local: optimization/coreml title: Core ML title: Optimized model types - sections: - local: optimization/mps title: Metal Performance Shaders (MPS) - local: optimization/habana title: Habana Gaudi title: Optimized hardware title: Optimization - sections: - local: conceptual/philosophy title: Philosophy - local: using-diffusers/controlling_generation title: Controlled generation - local: conceptual/contribution title: How to contribute? - local: conceptual/ethical_guidelines title: Diffusers' Ethical Guidelines - local: conceptual/evaluation title: Evaluating Diffusion Models title: Conceptual Guides - sections: - sections: - local: api/configuration title: Configuration - local: api/logging title: Logging - local: api/outputs title: Outputs title: Main Classes - sections: - local: api/loaders/ip_adapter title: IP-Adapter - local: api/loaders/lora title: LoRA - local: api/loaders/single_file title: Single files - local: api/loaders/textual_inversion title: Textual Inversion - local: api/loaders/unet title: UNet - local: api/loaders/peft title: PEFT title: Loaders - sections: - local: api/models/overview title: Overview - local: api/models/unet title: UNet1DModel - local: api/models/unet2d title: UNet2DModel - local: api/models/unet2d-cond title: UNet2DConditionModel - local: api/models/unet3d-cond title: UNet3DConditionModel - local: api/models/unet-motion title: UNetMotionModel - local: api/models/uvit2d title: UViT2DModel - local: api/models/vq title: VQModel - local: api/models/autoencoderkl title: AutoencoderKL - local: api/models/asymmetricautoencoderkl title: AsymmetricAutoencoderKL - local: api/models/autoencoder_tiny title: Tiny AutoEncoder - local: api/models/consistency_decoder_vae title: ConsistencyDecoderVAE - local: api/models/transformer2d title: Transformer2D - local: api/models/transformer_temporal title: Transformer Temporal - local: api/models/prior_transformer title: Prior Transformer - local: api/models/controlnet title: ControlNet title: Models - sections: - local: api/pipelines/overview title: Overview - local: api/pipelines/amused title: aMUSEd - local: api/pipelines/animatediff title: AnimateDiff - local: api/pipelines/attend_and_excite title: Attend-and-Excite - local: api/pipelines/audioldm title: AudioLDM - local: api/pipelines/audioldm2 title: AudioLDM 2 - local: api/pipelines/auto_pipeline title: AutoPipeline - local: api/pipelines/blip_diffusion title: BLIP-Diffusion - local: api/pipelines/consistency_models title: Consistency Models - local: api/pipelines/controlnet title: ControlNet - local: api/pipelines/controlnet_sdxl title: ControlNet with Stable Diffusion XL - local: api/pipelines/dance_diffusion title: Dance Diffusion - local: api/pipelines/ddim title: DDIM - local: api/pipelines/ddpm title: DDPM - local: api/pipelines/deepfloyd_if title: DeepFloyd IF - local: api/pipelines/diffedit title: DiffEdit - local: api/pipelines/dit title: DiT - local: api/pipelines/i2vgenxl title: I2VGen-XL - local: api/pipelines/pix2pix title: InstructPix2Pix - local: api/pipelines/kandinsky title: Kandinsky 2.1 - local: api/pipelines/kandinsky_v22 title: Kandinsky 2.2 - local: api/pipelines/kandinsky3 title: Kandinsky 3 - local: api/pipelines/latent_consistency_models title: Latent Consistency Models - local: api/pipelines/latent_diffusion title: Latent Diffusion - local: api/pipelines/panorama title: MultiDiffusion - local: api/pipelines/musicldm title: MusicLDM - local: api/pipelines/paint_by_example title: Paint by Example - local: api/pipelines/pia title: Personalized Image Animator (PIA) - local: api/pipelines/pixart title: PixArt-α - local: api/pipelines/self_attention_guidance title: Self-Attention Guidance - local: api/pipelines/semantic_stable_diffusion title: Semantic Guidance - local: api/pipelines/shap_e title: Shap-E - sections: - local: api/pipelines/stable_diffusion/overview title: Overview - local: api/pipelines/stable_diffusion/text2img title: Text-to-image - local: api/pipelines/stable_diffusion/img2img title: Image-to-image - local: api/pipelines/stable_diffusion/inpaint title: Inpainting - local: api/pipelines/stable_diffusion/depth2img title: Depth-to-image - local: api/pipelines/stable_diffusion/image_variation title: Image variation - local: api/pipelines/stable_diffusion/stable_diffusion_safe title: Safe Stable Diffusion - local: api/pipelines/stable_diffusion/stable_diffusion_2 title: Stable Diffusion 2 - local: api/pipelines/stable_diffusion/stable_diffusion_xl title: Stable Diffusion XL - local: api/pipelines/stable_diffusion/sdxl_turbo title: SDXL Turbo - local: api/pipelines/stable_diffusion/latent_upscale title: Latent upscaler - local: api/pipelines/stable_diffusion/upscale title: Super-resolution - local: api/pipelines/stable_diffusion/k_diffusion title: K-Diffusion - local: api/pipelines/stable_diffusion/ldm3d_diffusion title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D Upscaler - local: api/pipelines/stable_diffusion/adapter title: Stable Diffusion T2I-Adapter - local: api/pipelines/stable_diffusion/gligen title: GLIGEN (Grounded Language-to-Image Generation) title: Stable Diffusion - local: api/pipelines/stable_unclip title: Stable unCLIP - local: api/pipelines/text_to_video title: Text-to-video - local: api/pipelines/text_to_video_zero title: Text2Video-Zero - local: api/pipelines/unclip title: unCLIP - local: api/pipelines/unidiffuser title: UniDiffuser - local: api/pipelines/value_guided_sampling title: Value-guided sampling - local: api/pipelines/wuerstchen title: Wuerstchen title: Pipelines - sections: - local: api/schedulers/overview title: Overview - local: api/schedulers/cm_stochastic_iterative title: CMStochasticIterativeScheduler - local: api/schedulers/consistency_decoder title: ConsistencyDecoderScheduler - local: api/schedulers/ddim_inverse title: DDIMInverseScheduler - local: api/schedulers/ddim title: DDIMScheduler - local: api/schedulers/ddpm title: DDPMScheduler - local: api/schedulers/deis title: DEISMultistepScheduler - local: api/schedulers/multistep_dpm_solver_inverse title: DPMSolverMultistepInverse - local: api/schedulers/multistep_dpm_solver title: DPMSolverMultistepScheduler - local: api/schedulers/dpm_sde title: DPMSolverSDEScheduler - local: api/schedulers/singlestep_dpm_solver title: DPMSolverSinglestepScheduler - local: api/schedulers/euler_ancestral title: EulerAncestralDiscreteScheduler - local: api/schedulers/euler title: EulerDiscreteScheduler - local: api/schedulers/heun title: HeunDiscreteScheduler - local: api/schedulers/ipndm title: IPNDMScheduler - local: api/schedulers/stochastic_karras_ve title: KarrasVeScheduler - local: api/schedulers/dpm_discrete_ancestral title: KDPM2AncestralDiscreteScheduler - local: api/schedulers/dpm_discrete title: KDPM2DiscreteScheduler - local: api/schedulers/lcm title: LCMScheduler - local: api/schedulers/lms_discrete title: LMSDiscreteScheduler - local: api/schedulers/pndm title: PNDMScheduler - local: api/schedulers/repaint title: RePaintScheduler - local: api/schedulers/score_sde_ve title: ScoreSdeVeScheduler - local: api/schedulers/score_sde_vp title: ScoreSdeVpScheduler - local: api/schedulers/unipc title: UniPCMultistepScheduler - local: api/schedulers/vq_diffusion title: VQDiffusionScheduler title: Schedulers - sections: - local: api/internal_classes_overview title: Overview - local: api/attnprocessor title: Attention Processor - local: api/activations title: Custom activation functions - local: api/normalization title: Custom normalization layers - local: api/utilities title: Utilities - local: api/image_processor title: VAE Image Processor title: Internal classes title: API
diffusers/docs/source/en/_toctree.yml/0
{ "file_path": "diffusers/docs/source/en/_toctree.yml", "repo_id": "diffusers", "token_count": 5944 }
84
# Consistency Decoder Consistency decoder can be used to decode the latents from the denoising UNet in the [`StableDiffusionPipeline`]. This decoder was introduced in the [DALL-E 3 technical report](https://openai.com/dall-e-3). The original codebase can be found at [openai/consistencydecoder](https://github.com/openai/consistencydecoder). <Tip warning={true}> Inference is only supported for 2 iterations as of now. </Tip> The pipeline could not have been contributed without the help of [madebyollin](https://github.com/madebyollin) and [mrsteyk](https://github.com/mrsteyk) from [this issue](https://github.com/openai/consistencydecoder/issues/1). ## ConsistencyDecoderVAE [[autodoc]] ConsistencyDecoderVAE - all - decode
diffusers/docs/source/en/api/models/consistency_decoder_vae.md/0
{ "file_path": "diffusers/docs/source/en/api/models/consistency_decoder_vae.md", "repo_id": "diffusers", "token_count": 242 }
85
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky 2.1 Kandinsky 2.1 is created by [Arseniy Shakhmatov](https://github.com/cene555), [Anton Razzhigaev](https://github.com/razzant), [Aleksandr Nikolich](https://github.com/AlexWortega), [Vladimir Arkhipkin](https://github.com/oriBetelgeuse), [Igor Pavlov](https://github.com/boomb0om), [Andrey Kuznetsov](https://github.com/kuznetsoffandrey), and [Denis Dimitrov](https://github.com/denndimitrov). The description from it's GitHub page is: *Kandinsky 2.1 inherits best practicies from Dall-E 2 and Latent diffusion, while introducing some new ideas. As text and image encoder it uses CLIP model and diffusion image prior (mapping) between latent spaces of CLIP modalities. This approach increases the visual performance of the model and unveils new horizons in blending images and text-guided image manipulation.* The original codebase can be found at [ai-forever/Kandinsky-2](https://github.com/ai-forever/Kandinsky-2). <Tip> Check out the [Kandinsky Community](https://huggingface.co/kandinsky-community) organization on the Hub for the official model checkpoints for tasks like text-to-image, image-to-image, and inpainting. </Tip> <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## KandinskyPriorPipeline [[autodoc]] KandinskyPriorPipeline - all - __call__ - interpolate ## KandinskyPipeline [[autodoc]] KandinskyPipeline - all - __call__ ## KandinskyCombinedPipeline [[autodoc]] KandinskyCombinedPipeline - all - __call__ ## KandinskyImg2ImgPipeline [[autodoc]] KandinskyImg2ImgPipeline - all - __call__ ## KandinskyImg2ImgCombinedPipeline [[autodoc]] KandinskyImg2ImgCombinedPipeline - all - __call__ ## KandinskyInpaintPipeline [[autodoc]] KandinskyInpaintPipeline - all - __call__ ## KandinskyInpaintCombinedPipeline [[autodoc]] KandinskyInpaintCombinedPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/kandinsky.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/kandinsky.md", "repo_id": "diffusers", "token_count": 851 }
86
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <Tip warning={true}> 🧪 This pipeline is for research purposes only. </Tip> # Text-to-video [ModelScope Text-to-Video Technical Report](https://arxiv.org/abs/2308.06571) is by Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, Shiwei Zhang. The abstract from the paper is: *This paper introduces ModelScopeT2V, a text-to-video synthesis model that evolves from a text-to-image synthesis model (i.e., Stable Diffusion). ModelScopeT2V incorporates spatio-temporal blocks to ensure consistent frame generation and smooth movement transitions. The model could adapt to varying frame numbers during training and inference, rendering it suitable for both image-text and video-text datasets. ModelScopeT2V brings together three components (i.e., VQGAN, a text encoder, and a denoising UNet), totally comprising 1.7 billion parameters, in which 0.5 billion parameters are dedicated to temporal capabilities. The model demonstrates superior performance over state-of-the-art methods across three evaluation metrics. The code and an online demo are available at https://modelscope.cn/models/damo/text-to-video-synthesis/summary.* You can find additional information about Text-to-Video on the [project page](https://modelscope.cn/models/damo/text-to-video-synthesis/summary), [original codebase](https://github.com/modelscope/modelscope/), and try it out in a [demo](https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis). Official checkpoints can be found at [damo-vilab](https://huggingface.co/damo-vilab) and [cerspense](https://huggingface.co/cerspense). ## Usage example ### `text-to-video-ms-1.7b` Let's start by generating a short video with the default length of 16 frames (2s at 8 fps): ```python import torch from diffusers import DiffusionPipeline from diffusers.utils import export_to_video pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") pipe = pipe.to("cuda") prompt = "Spiderman is surfing" video_frames = pipe(prompt).frames video_path = export_to_video(video_frames) video_path ``` Diffusers supports different optimization techniques to improve the latency and memory footprint of a pipeline. Since videos are often more memory-heavy than images, we can enable CPU offloading and VAE slicing to keep the memory footprint at bay. Let's generate a video of 8 seconds (64 frames) on the same GPU using CPU offloading and VAE slicing: ```python import torch from diffusers import DiffusionPipeline from diffusers.utils import export_to_video pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") pipe.enable_model_cpu_offload() # memory optimization pipe.enable_vae_slicing() prompt = "Darth Vader surfing a wave" video_frames = pipe(prompt, num_frames=64).frames video_path = export_to_video(video_frames) video_path ``` It just takes **7 GBs of GPU memory** to generate the 64 video frames using PyTorch 2.0, "fp16" precision and the techniques mentioned above. We can also use a different scheduler easily, using the same method we'd use for Stable Diffusion: ```python import torch from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from diffusers.utils import export_to_video pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() prompt = "Spiderman is surfing" video_frames = pipe(prompt, num_inference_steps=25).frames video_path = export_to_video(video_frames) video_path ``` Here are some sample outputs: <table> <tr> <td><center> An astronaut riding a horse. <br> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astr.gif" alt="An astronaut riding a horse." style="width: 300px;" /> </center></td> <td ><center> Darth vader surfing in waves. <br> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/vader.gif" alt="Darth vader surfing in waves." style="width: 300px;" /> </center></td> </tr> </table> ### `cerspense/zeroscope_v2_576w` & `cerspense/zeroscope_v2_XL` Zeroscope are watermark-free model and have been trained on specific sizes such as `576x320` and `1024x576`. One should first generate a video using the lower resolution checkpoint [`cerspense/zeroscope_v2_576w`](https://huggingface.co/cerspense/zeroscope_v2_576w) with [`TextToVideoSDPipeline`], which can then be upscaled using [`VideoToVideoSDPipeline`] and [`cerspense/zeroscope_v2_XL`](https://huggingface.co/cerspense/zeroscope_v2_XL). ```py import torch from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from diffusers.utils import export_to_video from PIL import Image pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() # memory optimization pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) pipe.enable_vae_slicing() prompt = "Darth Vader surfing a wave" video_frames = pipe(prompt, num_frames=24).frames video_path = export_to_video(video_frames) video_path ``` Now the video can be upscaled: ```py pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() # memory optimization pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) pipe.enable_vae_slicing() video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames] video_frames = pipe(prompt, video=video, strength=0.6).frames video_path = export_to_video(video_frames) video_path ``` Here are some sample outputs: <table> <tr> <td ><center> Darth vader surfing in waves. <br> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/darthvader_cerpense.gif" alt="Darth vader surfing in waves." style="width: 576px;" /> </center></td> </tr> </table> <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## TextToVideoSDPipeline [[autodoc]] TextToVideoSDPipeline - all - __call__ ## VideoToVideoSDPipeline [[autodoc]] VideoToVideoSDPipeline - all - __call__ ## TextToVideoSDPipelineOutput [[autodoc]] pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput
diffusers/docs/source/en/api/pipelines/text_to_video.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/text_to_video.md", "repo_id": "diffusers", "token_count": 2519 }
87
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # OpenVINO 🤗 [Optimum](https://github.com/huggingface/optimum-intel) provides Stable Diffusion pipelines compatible with OpenVINO to perform inference on a variety of Intel processors (see the [full list](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) of supported devices). You'll need to install 🤗 Optimum Intel with the `--upgrade-strategy eager` option to ensure [`optimum-intel`](https://github.com/huggingface/optimum-intel) is using the latest version: ```bash pip install --upgrade-strategy eager optimum["openvino"] ``` This guide will show you how to use the Stable Diffusion and Stable Diffusion XL (SDXL) pipelines with OpenVINO. ## Stable Diffusion To load and run inference, use the [`~optimum.intel.OVStableDiffusionPipeline`]. If you want to load a PyTorch model and convert it to the OpenVINO format on-the-fly, set `export=True`: ```python from optimum.intel import OVStableDiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, export=True) prompt = "sailing ship in storm by Rembrandt" image = pipeline(prompt).images[0] # Don't forget to save the exported model pipeline.save_pretrained("openvino-sd-v1-5") ``` To further speed-up inference, statically reshape the model. If you change any parameters such as the outputs height or width, you’ll need to statically reshape your model again. ```python # Define the shapes related to the inputs and desired outputs batch_size, num_images, height, width = 1, 1, 512, 512 # Statically reshape the model pipeline.reshape(batch_size, height, width, num_images) # Compile the model before inference pipeline.compile() image = pipeline( prompt, height=height, width=width, num_images_per_prompt=num_images, ).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/intel/openvino/stable_diffusion_v1_5_sail_boat_rembrandt.png"> </div> You can find more examples in the 🤗 Optimum [documentation](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion), and Stable Diffusion is supported for text-to-image, image-to-image, and inpainting. ## Stable Diffusion XL To load and run inference with SDXL, use the [`~optimum.intel.OVStableDiffusionXLPipeline`]: ```python from optimum.intel import OVStableDiffusionXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipeline = OVStableDiffusionXLPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Rembrandt" image = pipeline(prompt).images[0] ``` To further speed-up inference, [statically reshape](#stable-diffusion) the model as shown in the Stable Diffusion section. You can find more examples in the 🤗 Optimum [documentation](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion-xl), and running SDXL in OpenVINO is supported for text-to-image and image-to-image.
diffusers/docs/source/en/optimization/open_vino.md/0
{ "file_path": "diffusers/docs/source/en/optimization/open_vino.md", "repo_id": "diffusers", "token_count": 1110 }
88
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Latent Consistency Distillation [Latent Consistency Models (LCMs)](https://hf.co/papers/2310.04378) are able to generate high-quality images in just a few steps, representing a big leap forward because many pipelines require at least 25+ steps. LCMs are produced by applying the latent consistency distillation method to any Stable Diffusion model. This method works by applying *one-stage guided distillation* to the latent space, and incorporating a *skipping-step* method to consistently skip timesteps to accelerate the distillation process (refer to section 4.1, 4.2, and 4.3 of the paper for more details). If you're training on a GPU with limited vRAM, try enabling `gradient_checkpointing`, `gradient_accumulation_steps`, and `mixed_precision` to reduce memory-usage and speedup training. You can reduce your memory-usage even more by enabling memory-efficient attention with [xFormers](../optimization/xformers) and [bitsandbytes'](https://github.com/TimDettmers/bitsandbytes) 8-bit optimizer. This guide will explore the [train_lcm_distill_sd_wds.py](https://github.com/huggingface/diffusers/blob/main/examples/consistency_distillation/train_lcm_distill_sd_wds.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: ```bash cd examples/consistency_distillation pip install -r requirements.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment (try enabling `torch.compile` to significantly speedup training): ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```bash from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. ## Script parameters <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/consistency_distillation/train_lcm_distill_sd_wds.py) and let us know if you have any questions or concerns. </Tip> The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/3b37488fa3280aed6a95de044d7a42ffdcb565ef/examples/consistency_distillation/train_lcm_distill_sd_wds.py#L419) function. This function provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to speedup training with mixed precision using the fp16 format, add the `--mixed_precision` parameter to the training command: ```bash accelerate launch train_lcm_distill_sd_wds.py \ --mixed_precision="fp16" ``` Most of the parameters are identical to the parameters in the [Text-to-image](text2image#script-parameters) training guide, so you'll focus on the parameters that are relevant to latent consistency distillation in this guide. - `--pretrained_teacher_model`: the path to a pretrained latent diffusion model to use as the teacher model - `--pretrained_vae_model_name_or_path`: path to a pretrained VAE; the SDXL VAE is known to suffer from numerical instability, so this parameter allows you to specify an alternative VAE (like this [VAE]((https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)) by madebyollin which works in fp16) - `--w_min` and `--w_max`: the minimum and maximum guidance scale values for guidance scale sampling - `--num_ddim_timesteps`: the number of timesteps for DDIM sampling - `--loss_type`: the type of loss (L2 or Huber) to calculate for latent consistency distillation; Huber loss is generally preferred because it's more robust to outliers - `--huber_c`: the Huber loss parameter ## Training script The training script starts by creating a dataset class - [`Text2ImageDataset`](https://github.com/huggingface/diffusers/blob/3b37488fa3280aed6a95de044d7a42ffdcb565ef/examples/consistency_distillation/train_lcm_distill_sd_wds.py#L141) - for preprocessing the images and creating a training dataset. ```py def transform(example): image = example["image"] image = TF.resize(image, resolution, interpolation=transforms.InterpolationMode.BILINEAR) c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(resolution, resolution)) image = TF.crop(image, c_top, c_left, resolution, resolution) image = TF.to_tensor(image) image = TF.normalize(image, [0.5], [0.5]) example["image"] = image return example ``` For improved performance on reading and writing large datasets stored in the cloud, this script uses the [WebDataset](https://github.com/webdataset/webdataset) format to create a preprocessing pipeline to apply transforms and create a dataset and dataloader for training. Images are processed and fed to the training loop without having to download the full dataset first. ```py processing_pipeline = [ wds.decode("pil", handler=wds.ignore_and_continue), wds.rename(image="jpg;png;jpeg;webp", text="text;txt;caption", handler=wds.warn_and_continue), wds.map(filter_keys({"image", "text"})), wds.map(transform), wds.to_tuple("image", "text"), ] ``` In the [`main()`](https://github.com/huggingface/diffusers/blob/3b37488fa3280aed6a95de044d7a42ffdcb565ef/examples/consistency_distillation/train_lcm_distill_sd_wds.py#L768) function, all the necessary components like the noise scheduler, tokenizers, text encoders, and VAE are loaded. The teacher UNet is also loaded here and then you can create a student UNet from the teacher UNet. The student UNet is updated by the optimizer during training. ```py teacher_unet = UNet2DConditionModel.from_pretrained( args.pretrained_teacher_model, subfolder="unet", revision=args.teacher_revision ) unet = UNet2DConditionModel(**teacher_unet.config) unet.load_state_dict(teacher_unet.state_dict(), strict=False) unet.train() ``` Now you can create the [optimizer](https://github.com/huggingface/diffusers/blob/3b37488fa3280aed6a95de044d7a42ffdcb565ef/examples/consistency_distillation/train_lcm_distill_sd_wds.py#L979) to update the UNet parameters: ```py optimizer = optimizer_class( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Create the [dataset](https://github.com/huggingface/diffusers/blob/3b37488fa3280aed6a95de044d7a42ffdcb565ef/examples/consistency_distillation/train_lcm_distill_sd_wds.py#L994): ```py dataset = Text2ImageDataset( train_shards_path_or_url=args.train_shards_path_or_url, num_train_examples=args.max_train_samples, per_gpu_batch_size=args.train_batch_size, global_batch_size=args.train_batch_size * accelerator.num_processes, num_workers=args.dataloader_num_workers, resolution=args.resolution, shuffle_buffer_size=1000, pin_memory=True, persistent_workers=True, ) train_dataloader = dataset.train_dataloader ``` Next, you're ready to setup the [training loop](https://github.com/huggingface/diffusers/blob/3b37488fa3280aed6a95de044d7a42ffdcb565ef/examples/consistency_distillation/train_lcm_distill_sd_wds.py#L1049) and implement the latent consistency distillation method (see Algorithm 1 in the paper for more details). This section of the script takes care of adding noise to the latents, sampling and creating a guidance scale embedding, and predicting the original image from the noise. ```py pred_x_0 = predicted_origin( noise_pred, start_timesteps, noisy_model_input, noise_scheduler.config.prediction_type, alpha_schedule, sigma_schedule, ) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 ``` It gets the [teacher model predictions](https://github.com/huggingface/diffusers/blob/3b37488fa3280aed6a95de044d7a42ffdcb565ef/examples/consistency_distillation/train_lcm_distill_sd_wds.py#L1172) and the [LCM predictions](https://github.com/huggingface/diffusers/blob/3b37488fa3280aed6a95de044d7a42ffdcb565ef/examples/consistency_distillation/train_lcm_distill_sd_wds.py#L1209) next, calculates the loss, and then backpropagates it to the LCM. ```py if args.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif args.loss_type == "huber": loss = torch.mean( torch.sqrt((model_pred.float() - target.float()) ** 2 + args.huber_c**2) - args.huber_c ) ``` If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers tutorial](../using-diffusers/write_own_pipeline) which breaks down the basic pattern of the denoising process. ## Launch the script Now you're ready to launch the training script and start distilling! For this guide, you'll use the `--train_shards_path_or_url` to specify the path to the [Conceptual Captions 12M](https://github.com/google-research-datasets/conceptual-12m) dataset stored on the Hub [here](https://huggingface.co/datasets/laion/conceptual-captions-12m-webdataset). Set the `MODEL_DIR` environment variable to the name of the teacher model and `OUTPUT_DIR` to where you want to save the model. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path/to/saved/model" accelerate launch train_lcm_distill_sd_wds.py \ --pretrained_teacher_model=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --mixed_precision=fp16 \ --resolution=512 \ --learning_rate=1e-6 --loss_type="huber" --ema_decay=0.95 --adam_weight_decay=0.0 \ --max_train_steps=1000 \ --max_train_samples=4000000 \ --dataloader_num_workers=8 \ --train_shards_path_or_url="pipe:curl -L -s https://huggingface.co/datasets/laion/conceptual-captions-12m-webdataset/resolve/main/data/{00000..01099}.tar?download=true" \ --validation_steps=200 \ --checkpointing_steps=200 --checkpoints_total_limit=10 \ --train_batch_size=12 \ --gradient_checkpointing --enable_xformers_memory_efficient_attention \ --gradient_accumulation_steps=1 \ --use_8bit_adam \ --resume_from_checkpoint=latest \ --report_to=wandb \ --seed=453645634 \ --push_to_hub ``` Once training is complete, you can use your new LCM for inference. ```py from diffusers import UNet2DConditionModel, DiffusionPipeline, LCMScheduler import torch unet = UNet2DConditionModel.from_pretrained("your-username/your-model", torch_dtype=torch.float16, variant="fp16") pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", unet=unet, torch_dtype=torch.float16, variant="fp16") pipeline.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipeline.to("cuda") prompt = "sushi rolls in the form of panda heads, sushi platter" image = pipeline(prompt, num_inference_steps=4, guidance_scale=1.0).images[0] ``` ## LoRA LoRA is a training technique for significantly reducing the number of trainable parameters. As a result, training is faster and it is easier to store the resulting weights because they are a lot smaller (~100MBs). Use the [train_lcm_distill_lora_sd_wds.py](https://github.com/huggingface/diffusers/blob/main/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py) or [train_lcm_distill_lora_sdxl.wds.py](https://github.com/huggingface/diffusers/blob/main/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py) script to train with LoRA. The LoRA training script is discussed in more detail in the [LoRA training](lora) guide. ## Stable Diffusion XL Stable Diffusion XL (SDXL) is a powerful text-to-image model that generates high-resolution images, and it adds a second text-encoder to its architecture. Use the [train_lcm_distill_sdxl_wds.py](https://github.com/huggingface/diffusers/blob/main/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py) script to train a SDXL model with LoRA. The SDXL training script is discussed in more detail in the [SDXL training](sdxl) guide. ## Next steps Congratulations on distilling a LCM model! To learn more about LCM, the following may be helpful: - Learn how to use [LCMs for inference](../using-diffusers/lcm) for text-to-image, image-to-image, and with LoRA checkpoints. - Read the [SDXL in 4 steps with Latent Consistency LoRAs](https://huggingface.co/blog/lcm_lora) blog post to learn more about SDXL LCM-LoRA's for super fast inference, quality comparisons, benchmarks, and more.
diffusers/docs/source/en/training/lcm_distill.md/0
{ "file_path": "diffusers/docs/source/en/training/lcm_distill.md", "repo_id": "diffusers", "token_count": 4583 }
89
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Contribute a community pipeline <Tip> 💡 Take a look at GitHub Issue [#841](https://github.com/huggingface/diffusers/issues/841) for more context about why we're adding community pipelines to help everyone easily share their work without being slowed down. </Tip> Community pipelines allow you to add any additional features you'd like on top of the [`DiffusionPipeline`]. The main benefit of building on top of the `DiffusionPipeline` is anyone can load and use your pipeline by only adding one more argument, making it super easy for the community to access. This guide will show you how to create a community pipeline and explain how they work. To keep things simple, you'll create a "one-step" pipeline where the `UNet` does a single forward pass and calls the scheduler once. ## Initialize the pipeline You should start by creating a `one_step_unet.py` file for your community pipeline. In this file, create a pipeline class that inherits from the [`DiffusionPipeline`] to be able to load model weights and the scheduler configuration from the Hub. The one-step pipeline needs a `UNet` and a scheduler, so you'll need to add these as arguments to the `__init__` function: ```python from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() ``` To ensure your pipeline and its components (`unet` and `scheduler`) can be saved with [`~DiffusionPipeline.save_pretrained`], add them to the `register_modules` function: ```diff from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) ``` Cool, the `__init__` step is done and you can move to the forward pass now! 🔥 ## Define the forward pass In the forward pass, which we recommend defining as `__call__`, you have complete creative freedom to add whatever feature you'd like. For our amazing one-step pipeline, create a random image and only call the `unet` and `scheduler` once by setting `timestep=1`: ```diff from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) + def __call__(self): + image = torch.randn( + (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), + ) + timestep = 1 + model_output = self.unet(image, timestep).sample + scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample + return scheduler_output ``` That's it! 🚀 You can now run this pipeline by passing a `unet` and `scheduler` to it: ```python from diffusers import DDPMScheduler, UNet2DModel scheduler = DDPMScheduler() unet = UNet2DModel() pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler) output = pipeline() ``` But what's even better is you can load pre-existing weights into the pipeline if the pipeline structure is identical. For example, you can load the [`google/ddpm-cifar10-32`](https://huggingface.co/google/ddpm-cifar10-32) weights into the one-step pipeline: ```python pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True) output = pipeline() ``` ## Share your pipeline Open a Pull Request on the 🧨 Diffusers [repository](https://github.com/huggingface/diffusers) to add your awesome pipeline in `one_step_unet.py` to the [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) subfolder. Once it is merged, anyone with `diffusers >= 0.4.0` installed can use this pipeline magically 🪄 by specifying it in the `custom_pipeline` argument: ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", use_safetensors=True ) pipe() ``` Another way to share your community pipeline is to upload the `one_step_unet.py` file directly to your preferred [model repository](https://huggingface.co/docs/hub/models-uploading) on the Hub. Instead of specifying the `one_step_unet.py` file, pass the model repository id to the `custom_pipeline` argument: ```python from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="stevhliu/one_step_unet", use_safetensors=True ) ``` Take a look at the following table to compare the two sharing workflows to help you decide the best option for you: | | GitHub community pipeline | HF Hub community pipeline | |----------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| | usage | same | same | | review process | open a Pull Request on GitHub and undergo a review process from the Diffusers team before merging; may be slower | upload directly to a Hub repository without any review; this is the fastest workflow | | visibility | included in the official Diffusers repository and documentation | included on your HF Hub profile and relies on your own usage/promotion to gain visibility | <Tip> 💡 You can use whatever package you want in your community pipeline file - as long as the user has it installed, everything will work fine. Make sure you have one and only one pipeline class that inherits from `DiffusionPipeline` because this is automatically detected. </Tip> ## How do community pipelines work? A community pipeline is a class that inherits from [`DiffusionPipeline`] which means: - It can be loaded with the [`custom_pipeline`] argument. - The model weights and scheduler configuration are loaded from [`pretrained_model_name_or_path`]. - The code that implements a feature in the community pipeline is defined in a `pipeline.py` file. Sometimes you can't load all the pipeline components weights from an official repository. In this case, the other components should be passed directly to the pipeline: ```python from diffusers import DiffusionPipeline from transformers import CLIPImageProcessor, CLIPModel model_id = "CompVis/stable-diffusion-v1-4" clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16) pipeline = DiffusionPipeline.from_pretrained( model_id, custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True, ) ``` The magic behind community pipelines is contained in the following code. It allows the community pipeline to be loaded from GitHub or the Hub, and it'll be available to all 🧨 Diffusers packages. ```python # 2. Load the pipeline class, if using custom module then load it from the Hub # if we load from explicit class, let's use it if custom_pipeline is not None: pipeline_class = get_class_from_dynamic_module( custom_pipeline, module_file=CUSTOM_PIPELINE_FILE_NAME, cache_dir=custom_pipeline ) elif cls != DiffusionPipeline: pipeline_class = cls else: diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) pipeline_class = getattr(diffusers_module, config_dict["_class_name"]) ```
diffusers/docs/source/en/using-diffusers/contribute_pipeline.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/contribute_pipeline.md", "repo_id": "diffusers", "token_count": 2940 }
90
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Load adapters [[open-in-colab]] There are several [training](../training/overview) techniques for personalizing diffusion models to generate images of a specific subject or images in certain styles. Each of these training methods produces a different type of adapter. Some of the adapters generate an entirely new model, while other adapters only modify a smaller set of embeddings or weights. This means the loading process for each adapter is also different. This guide will show you how to load DreamBooth, textual inversion, and LoRA weights. <Tip> Feel free to browse the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer), [LoRA the Explorer](https://huggingface.co/spaces/multimodalart/LoraTheExplorer), and the [Diffusers Models Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery) for checkpoints and embeddings to use. </Tip> ## DreamBooth [DreamBooth](https://dreambooth.github.io/) finetunes an *entire diffusion model* on just several images of a subject to generate images of that subject in new styles and settings. This method works by using a special word in the prompt that the model learns to associate with the subject image. Of all the training methods, DreamBooth produces the largest file size (usually a few GBs) because it is a full checkpoint model. Let's load the [herge_style](https://huggingface.co/sd-dreambooth-library/herge-style) checkpoint, which is trained on just 10 images drawn by Hergé, to generate images in that style. For it to work, you need to include the special word `herge_style` in your prompt to trigger the checkpoint: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("sd-dreambooth-library/herge-style", torch_dtype=torch.float16).to("cuda") prompt = "A cute herge_style brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration" image = pipeline(prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_dreambooth.png" /> </div> ## Textual inversion [Textual inversion](https://textual-inversion.github.io/) is very similar to DreamBooth and it can also personalize a diffusion model to generate certain concepts (styles, objects) from just a few images. This method works by training and finding new embeddings that represent the images you provide with a special word in the prompt. As a result, the diffusion model weights stay the same and the training process produces a relatively tiny (a few KBs) file. Because textual inversion creates embeddings, it cannot be used on its own like DreamBooth and requires another model. ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") ``` Now you can load the textual inversion embeddings with the [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] method and generate some images. Let's load the [sd-concepts-library/gta5-artwork](https://huggingface.co/sd-concepts-library/gta5-artwork) embeddings and you'll need to include the special word `<gta5-artwork>` in your prompt to trigger it: ```py pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration, <gta5-artwork> style" image = pipeline(prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_txt_embed.png" /> </div> Textual inversion can also be trained on undesirable things to create *negative embeddings* to discourage a model from generating images with those undesirable things like blurry images or extra fingers on a hand. This can be an easy way to quickly improve your prompt. You'll also load the embeddings with [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`], but this time, you'll need two more parameters: - `weight_name`: specifies the weight file to load if the file was saved in the 🤗 Diffusers format with a specific name or if the file is stored in the A1111 format - `token`: specifies the special word to use in the prompt to trigger the embeddings Let's load the [sayakpaul/EasyNegative-test](https://huggingface.co/sayakpaul/EasyNegative-test) embeddings: ```py pipeline.load_textual_inversion( "sayakpaul/EasyNegative-test", weight_name="EasyNegative.safetensors", token="EasyNegative" ) ``` Now you can use the `token` to generate an image with the negative embeddings: ```py prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration, EasyNegative" negative_prompt = "EasyNegative" image = pipeline(prompt, negative_prompt=negative_prompt, num_inference_steps=50).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png" /> </div> ## LoRA [Low-Rank Adaptation (LoRA)](https://huggingface.co/papers/2106.09685) is a popular training technique because it is fast and generates smaller file sizes (a couple hundred MBs). Like the other methods in this guide, LoRA can train a model to learn new styles from just a few images. It works by inserting new weights into the diffusion model and then only the new weights are trained instead of the entire model. This makes LoRAs faster to train and easier to store. <Tip> LoRA is a very general training technique that can be used with other training methods. For example, it is common to train a model with DreamBooth and LoRA. </Tip> LoRAs also need to be used with another model: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") ``` Then use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the [ostris/super-cereal-sdxl-lora](https://huggingface.co/ostris/super-cereal-sdxl-lora) weights and specify the weights filename from the repository: ```py pipeline.load_lora_weights("ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors") prompt = "bears, pizza bites" image = pipeline(prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_lora.png" /> </div> The [`~loaders.LoraLoaderMixin.load_lora_weights`] method loads LoRA weights into both the UNet and text encoder. It is the preferred way for loading LoRAs because it can handle cases where: - the LoRA weights don't have separate identifiers for the UNet and text encoder - the LoRA weights have separate identifiers for the UNet and text encoder But if you only need to load LoRA weights into the UNet, then you can use the [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method. Let's load the [jbilcke-hf/sdxl-cinematic-1](https://huggingface.co/jbilcke-hf/sdxl-cinematic-1) LoRA: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") pipeline.unet.load_attn_procs("jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors") # use cnmt in the prompt to trigger the LoRA prompt = "A cute cnmt eating a slice of pizza, stunning color scheme, masterpiece, illustration" image = pipeline(prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_attn_proc.png" /> </div> <Tip> For both [`~loaders.LoraLoaderMixin.load_lora_weights`] and [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`], you can pass the `cross_attention_kwargs={"scale": 0.5}` parameter to adjust how much of the LoRA weights to use. A value of `0` is the same as only using the base model weights, and a value of `1` is equivalent to using the fully finetuned LoRA. </Tip> To unload the LoRA weights, use the [`~loaders.LoraLoaderMixin.unload_lora_weights`] method to discard the LoRA weights and restore the model to its original weights: ```py pipeline.unload_lora_weights() ``` ### Load multiple LoRAs It can be fun to use multiple LoRAs together to create something entirely new and unique. The [`~loaders.LoraLoaderMixin.fuse_lora`] method allows you to fuse the LoRA weights with the original weights of the underlying model. <Tip> Fusing the weights can lead to a speedup in inference latency because you don't need to separately load the base model and LoRA! You can save your fused pipeline with [`~DiffusionPipeline.save_pretrained`] to avoid loading and fusing the weights every time you want to use the model. </Tip> Load an initial model: ```py from diffusers import StableDiffusionXLPipeline, AutoencoderKL import torch vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, ).to("cuda") ``` Next, load the LoRA checkpoint and fuse it with the original weights. The `lora_scale` parameter controls how much to scale the output by with the LoRA weights. It is important to make the `lora_scale` adjustments in the [`~loaders.LoraLoaderMixin.fuse_lora`] method because it won't work if you try to pass `scale` to the `cross_attention_kwargs` in the pipeline. If you need to reset the original model weights for any reason (use a different `lora_scale`), you should use the [`~loaders.LoraLoaderMixin.unfuse_lora`] method. ```py pipeline.load_lora_weights("ostris/ikea-instructions-lora-sdxl") pipeline.fuse_lora(lora_scale=0.7) # to unfuse the LoRA weights pipeline.unfuse_lora() ``` Then fuse this pipeline with the next set of LoRA weights: ```py pipeline.load_lora_weights("ostris/super-cereal-sdxl-lora") pipeline.fuse_lora(lora_scale=0.7) ``` <Tip warning={true}> You can't unfuse multiple LoRA checkpoints, so if you need to reset the model to its original weights, you'll need to reload it. </Tip> Now you can generate an image that uses the weights from both LoRAs: ```py prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration" image = pipeline(prompt).images[0] image ``` ### 🤗 PEFT <Tip> Read the [Inference with 🤗 PEFT](../tutorials/using_peft_for_inference) tutorial to learn more about its integration with 🤗 Diffusers and how you can easily work with and juggle multiple adapters. You'll need to install 🤗 Diffusers and PEFT from source to run the example in this section. </Tip> Another way you can load and use multiple LoRAs is to specify the `adapter_name` parameter in [`~loaders.LoraLoaderMixin.load_lora_weights`]. This method takes advantage of the 🤗 PEFT integration. For example, load and name both LoRA weights: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") pipeline.load_lora_weights("ostris/ikea-instructions-lora-sdxl", weight_name="ikea_instructions_xl_v1_5.safetensors", adapter_name="ikea") pipeline.load_lora_weights("ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors", adapter_name="cereal") ``` Now use the [`~loaders.UNet2DConditionLoadersMixin.set_adapters`] to activate both LoRAs, and you can configure how much weight each LoRA should have on the output: ```py pipeline.set_adapters(["ikea", "cereal"], adapter_weights=[0.7, 0.5]) ``` Then, generate an image: ```py prompt = "A cute brown bear eating a slice of pizza, stunning color scheme, masterpiece, illustration" image = pipeline(prompt, num_inference_steps=30, cross_attention_kwargs={"scale": 1.0}).images[0] image ``` ### Kohya and TheLastBen Other popular LoRA trainers from the community include those by [Kohya](https://github.com/kohya-ss/sd-scripts/) and [TheLastBen](https://github.com/TheLastBen/fast-stable-diffusion). These trainers create different LoRA checkpoints than those trained by 🤗 Diffusers, but they can still be loaded in the same way. Let's download the [Blueprintify SD XL 1.0](https://civitai.com/models/150986/blueprintify-sd-xl-10) checkpoint from [Civitai](https://civitai.com/): ```sh !wget https://civitai.com/api/download/models/168776 -O blueprintify-sd-xl-10.safetensors ``` Load the LoRA checkpoint with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method, and specify the filename in the `weight_name` parameter: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") pipeline.load_lora_weights("path/to/weights", weight_name="blueprintify-sd-xl-10.safetensors") ``` Generate an image: ```py # use bl3uprint in the prompt to trigger the LoRA prompt = "bl3uprint, a highly detailed blueprint of the eiffel tower, explaining how to build all parts, many txt, blueprint grid backdrop" image = pipeline(prompt).images[0] image ``` <Tip warning={true}> Some limitations of using Kohya LoRAs with 🤗 Diffusers include: - Images may not look like those generated by UIs - like ComfyUI - for multiple reasons, which are explained [here](https://github.com/huggingface/diffusers/pull/4287/#issuecomment-1655110736). - [LyCORIS checkpoints](https://github.com/KohakuBlueleaf/LyCORIS) aren't fully supported. The [`~loaders.LoraLoaderMixin.load_lora_weights`] method loads LyCORIS checkpoints with LoRA and LoCon modules, but Hada and LoKR are not supported. </Tip> Loading a checkpoint from TheLastBen is very similar. For example, to load the [TheLastBen/William_Eggleston_Style_SDXL](https://huggingface.co/TheLastBen/William_Eggleston_Style_SDXL) checkpoint: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") pipeline.load_lora_weights("TheLastBen/William_Eggleston_Style_SDXL", weight_name="wegg.safetensors") # use by william eggleston in the prompt to trigger the LoRA prompt = "a house by william eggleston, sunrays, beautiful, sunlight, sunrays, beautiful" image = pipeline(prompt=prompt).images[0] image ``` ## IP-Adapter [IP-Adapter](https://ip-adapter.github.io/) is an effective and lightweight adapter that adds image prompting capabilities to a diffusion model. This adapter works by decoupling the cross-attention layers of the image and text features. All the other model components are frozen and only the embedded image features in the UNet are trained. As a result, IP-Adapter files are typically only ~100MBs. IP-Adapter works with most of our pipelines, including Stable Diffusion, Stable Diffusion XL (SDXL), ControlNet, T2I-Adapter, AnimateDiff. And you can use any custom models finetuned from the same base models. It also works with LCM-Lora out of box. <Tip> You can find official IP-Adapter checkpoints in [h94/IP-Adapter](https://huggingface.co/h94/IP-Adapter). IP-Adapter was contributed by [okotaku](https://github.com/okotaku). </Tip> Let's first create a Stable Diffusion Pipeline. ```py from diffusers import AutoPipelineForText2Image import torch from diffusers.utils import load_image pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") ``` Now load the [h94/IP-Adapter](https://huggingface.co/h94/IP-Adapter) weights with the [`~loaders.IPAdapterMixin.load_ip_adapter`] method. ```py pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") ``` <Tip> IP-Adapter relies on an image encoder to generate the image features, if your IP-Adapter weights folder contains a "image_encoder" subfolder, the image encoder will be automatically loaded and registered to the pipeline. Otherwise you can so load a [`~transformers.CLIPVisionModelWithProjection`] model and pass it to a Stable Diffusion pipeline when you create it. ```py from diffusers import AutoPipelineForText2Image from transformers import CLIPVisionModelWithProjection import torch image_encoder = CLIPVisionModelWithProjection.from_pretrained( "h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16, ).to("cuda") pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, torch_dtype=torch.float16).to("cuda") ``` </Tip> IP-Adapter allows you to use both image and text to condition the image generation process. For example, let's use the bear image from the [Textual Inversion](#textual-inversion) section as the image prompt (`ip_adapter_image`) along with a text prompt to add "sunglasses". 😎 ```py pipeline.set_ip_adapter_scale(0.6) image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png") generator = torch.Generator(device="cpu").manual_seed(33) images = pipeline(     prompt='best quality, high quality, wearing sunglasses',     ip_adapter_image=image,     negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",     num_inference_steps=50,     generator=generator, ).images images[0] ``` <div class="flex justify-center">     <img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip-bear.png" /> </div> <Tip> You can use the [`~loaders.IPAdapterMixin.set_ip_adapter_scale`] method to adjust the text prompt and image prompt condition ratio.  If you're only using the image prompt, you should set the scale to `1.0`. You can lower the scale to get more generation diversity, but it'll be less aligned with the prompt. `scale=0.5` can achieve good results in most cases when you use both text and image prompts. </Tip> IP-Adapter also works great with Image-to-Image and Inpainting pipelines. See below examples of how you can use it with Image-to-Image and Inpaint. <hfoptions id="tasks"> <hfoption id="image-to-image"> ```py from diffusers import AutoPipelineForImage2Image import torch from diffusers.utils import load_image pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/vermeer.jpg") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/river.png") pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") generator = torch.Generator(device="cpu").manual_seed(33) images = pipeline(     prompt='best quality, high quality',     image = image,     ip_adapter_image=ip_image,     num_inference_steps=50,     generator=generator,     strength=0.6, ).images images[0] ``` </hfoption> <hfoption id="inpaint"> ```py from diffusers import AutoPipelineForInpaint import torch from diffusers.utils import load_image pipeline = AutoPipelineForInpaint.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float).to("cuda") image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/inpaint_image.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/mask.png") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/girl.png") image = image.resize((512, 768)) mask = mask.resize((512, 768)) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") generator = torch.Generator(device="cpu").manual_seed(33) images = pipeline( prompt='best quality, high quality', image = image, mask_image = mask, ip_adapter_image=ip_image, negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps=50, generator=generator, strength=0.5, ).images images[0] ``` </hfoption> </hfoptions> IP-Adapters can also be used with [SDXL](../api/pipelines/stable_diffusion/stable_diffusion_xl.md) ```python from diffusers import AutoPipelineForText2Image from diffusers.utils import load_image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") image = load_image("https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/watercolor_painting.jpeg") pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") generator = torch.Generator(device="cpu").manual_seed(33) image = pipeline( prompt="best quality, high quality", ip_adapter_image=image, negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps=25, generator=generator, ).images[0] image.save("sdxl_t2i.png") ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/watercolor_painting.jpeg"/> <figcaption class="mt-2 text-center text-sm text-gray-500">input image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/sdxl_t2i.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">adapted image</figcaption> </div> </div> You can use the IP-Adapter face model to apply specific faces to your images. It is an effective way to maintain consistent characters in your image generations. Weights are loaded with the same method used for the other IP-Adapters. ```python # Load ip-adapter-full-face_sd15.bin pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin") ``` <Tip> It is recommended to use `DDIMScheduler` and `EulerDiscreteScheduler` for face model. </Tip> ```python import torch from diffusers import StableDiffusionPipeline, DDIMScheduler from diffusers.utils import load_image pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ).to("cuda") pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin") pipeline.set_ip_adapter_scale(0.7) image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ai_face2.png") generator = torch.Generator(device="cpu").manual_seed(33) image = pipeline( prompt="A photo of a girl wearing a black dress, holding red roses in hand, upper body, behind is the Eiffel Tower", ip_adapter_image=image, negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps=50, num_images_per_prompt=1, width=512, height=704, generator=generator, ).images[0] ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ai_face2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">input image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ipadapter_full_face_output.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">output image</figcaption> </div> </div> You can load multiple IP-Adapter models and use multiple reference images at the same time. In this example we use IP-Adapter-Plus face model to create a consistent character and also use IP-Adapter-Plus model along with 10 images to create a coherent style in the image we generate. ```python import torch from diffusers import AutoPipelineForText2Image, DDIMScheduler from transformers import CLIPVisionModelWithProjection from diffusers.utils import load_image image_encoder = CLIPVisionModelWithProjection.from_pretrained( "h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16, ) pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, image_encoder=image_encoder, ) pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus_sdxl_vit-h.safetensors", "ip-adapter-plus-face_sdxl_vit-h.safetensors"] ) pipeline.set_ip_adapter_scale([0.7, 0.3]) pipeline.enable_model_cpu_offload() face_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/women_input.png") style_folder = "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy" style_images = [load_image(f"{style_folder}/img{i}.png") for i in range(10)] generator = torch.Generator(device="cpu").manual_seed(0) image = pipeline( prompt="wonderwoman", ip_adapter_image=[style_images, face_image], negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps=50, num_images_per_prompt=1, generator=generator, ).images[0] ``` <div class="flex justify-center">     <img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_style_grid.png" /> <figcaption class="mt-2 text-center text-sm text-gray-500">style input image</figcaption> </div> <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/women_input.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">face input image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_multi_out.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">output image</figcaption> </div> </div> ### LCM-Lora You can use IP-Adapter with LCM-Lora to achieve "instant fine-tune" with custom images. Note that you need to load IP-Adapter weights before loading the LCM-Lora weights. ```py from diffusers import DiffusionPipeline, LCMScheduler import torch from diffusers.utils import load_image model_id = "sd-dreambooth-library/herge-style" lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5" pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipe.load_lora_weights(lcm_lora_id) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() prompt = "best quality, high quality" image = load_image("https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png") images = pipe( prompt=prompt, ip_adapter_image=image, num_inference_steps=4, guidance_scale=1, ).images[0] ``` ### Other pipelines IP-Adapter is compatible with any pipeline that (1) uses a text prompt and (2) uses Stable Diffusion or Stable Diffusion XL checkpoint. To use IP-Adapter with a different pipeline, all you need to do is to run `load_ip_adapter()` method after you create the pipeline, and then pass your image to the pipeline as `ip_adapter_image` <Tip> 🤗 Diffusers currently only supports using IP-Adapter with some of the most popular pipelines, feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) if you have a cool use-case and require integrating IP-adapters with a pipeline that does not support it yet! </Tip> You can find below examples on how to use IP-Adapter with ControlNet and AnimateDiff. <hfoptions id="model"> <hfoption id="ControlNet"> ``` from diffusers import StableDiffusionControlNetPipeline, ControlNetModel import torch from diffusers.utils import load_image controlnet_model_path = "lllyasviel/control_v11f1p_sd15_depth" controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch.float16) pipeline = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16) pipeline.to("cuda") image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/statue.png") depth_map = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/depth.png") pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") generator = torch.Generator(device="cpu").manual_seed(33) images = pipeline( prompt='best quality, high quality', image=depth_map, ip_adapter_image=image, negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps=50, generator=generator, ).images images[0] ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/statue.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">input image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ipa-controlnet-out.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">adapted image</figcaption> </div> </div> </hfoption> <hfoption id="AnimateDiff"> ```py # animate diff + ip adapter import torch from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler from diffusers.utils import export_to_gif, load_image # Load the motion adapter adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "Lykon/DreamShaper" pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) # scheduler scheduler = DDIMScheduler( clip_sample=False, beta_start=0.00085, beta_end=0.012, beta_schedule="linear", timestep_spacing="trailing", steps_offset=1 ) pipe.scheduler = scheduler # enable memory savings pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() # load ip_adapter pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") # load motion adapters pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out") pipe.load_lora_weights("guoyww/animatediff-motion-lora-tilt-up", adapter_name="tilt-up") pipe.load_lora_weights("guoyww/animatediff-motion-lora-pan-left", adapter_name="pan-left") seed = 42 image = load_image("https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png") images = [image] * 3 prompts = ["best quality, high quality"] * 3 negative_prompt = "bad quality, worst quality" adapter_weights = [[0.75, 0.0, 0.0], [0.0, 0.0, 0.75], [0.0, 0.75, 0.75]] # generate output_frames = [] for prompt, image, adapter_weight in zip(prompts, images, adapter_weights): pipe.set_adapters(["zoom-out", "tilt-up", "pan-left"], adapter_weights=adapter_weight) output = pipe( prompt= prompt, num_frames=16, guidance_scale=7.5, num_inference_steps=30, ip_adapter_image = image, generator=torch.Generator("cpu").manual_seed(seed), ) frames = output.frames[0] output_frames.extend(frames) export_to_gif(output_frames, "test_out_animation.gif") ``` </hfoption> </hfoptions>
diffusers/docs/source/en/using-diffusers/loading_adapters.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/loading_adapters.md", "repo_id": "diffusers", "token_count": 11093 }
91
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Load safetensors [[open-in-colab]] [safetensors](https://github.com/huggingface/safetensors) is a safe and fast file format for storing and loading tensors. Typically, PyTorch model weights are saved or *pickled* into a `.bin` file with Python's [`pickle`](https://docs.python.org/3/library/pickle.html) utility. However, `pickle` is not secure and pickled files may contain malicious code that can be executed. safetensors is a secure alternative to `pickle`, making it ideal for sharing model weights. This guide will show you how you load `.safetensor` files, and how to convert Stable Diffusion model weights stored in other formats to `.safetensor`. Before you start, make sure you have safetensors installed: ```py # uncomment to install the necessary libraries in Colab #!pip install safetensors ``` If you look at the [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main) repository, you'll see weights inside the `text_encoder`, `unet` and `vae` subfolders are stored in the `.safetensors` format. By default, 🤗 Diffusers automatically loads these `.safetensors` files from their subfolders if they're available in the model repository. For more explicit control, you can optionally set `use_safetensors=True` (if `safetensors` is not installed, you'll get an error message asking you to install it): ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) ``` However, model weights are not necessarily stored in separate subfolders like in the example above. Sometimes, all the weights are stored in a single `.safetensors` file. In this case, if the weights are Stable Diffusion weights, you can load the file directly with the [`~diffusers.loaders.FromSingleFileMixin.from_single_file`] method: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_single_file( "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors" ) ``` ## Convert to safetensors Not all weights on the Hub are available in the `.safetensors` format, and you may encounter weights stored as `.bin`. In this case, use the [Convert Space](https://huggingface.co/spaces/diffusers/convert) to convert the weights to `.safetensors`. The Convert Space downloads the pickled weights, converts them, and opens a Pull Request to upload the newly converted `.safetensors` file on the Hub. This way, if there is any malicious code contained in the pickled files, they're uploaded to the Hub - which has a [security scanner](https://huggingface.co/docs/hub/security-pickle#hubs-security-scanner) to detect unsafe files and suspicious pickle imports - instead of your computer. You can use the model with the new `.safetensors` weights by specifying the reference to the Pull Request in the `revision` parameter (you can also test it in this [Check PR](https://huggingface.co/spaces/diffusers/check_pr) Space on the Hub), for example `refs/pr/22`: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", revision="refs/pr/22", use_safetensors=True ) ``` ## Why use safetensors? There are several reasons for using safetensors: - Safety is the number one reason for using safetensors. As open-source and model distribution grows, it is important to be able to trust the model weights you downloaded don't contain any malicious code. The current size of the header in safetensors prevents parsing extremely large JSON files. - Loading speed between switching models is another reason to use safetensors, which performs zero-copy of the tensors. It is especially fast compared to `pickle` if you're loading the weights to CPU (the default case), and just as fast if not faster when directly loading the weights to GPU. You'll only notice the performance difference if the model is already loaded, and not if you're downloading the weights or loading the model for the first time. The time it takes to load the entire pipeline: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", use_safetensors=True) "Loaded in safetensors 0:00:02.033658" "Loaded in PyTorch 0:00:02.663379" ``` But the actual time it takes to load 500MB of the model weights is only: ```bash safetensors: 3.4873ms PyTorch: 172.7537ms ``` - Lazy loading is also supported in safetensors, which is useful in distributed settings to only load some of the tensors. This format allowed the [BLOOM](https://huggingface.co/bigscience/bloom) model to be loaded in 45 seconds on 8 GPUs instead of 10 minutes with regular PyTorch weights.
diffusers/docs/source/en/using-diffusers/using_safetensors.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/using_safetensors.md", "repo_id": "diffusers", "token_count": 1536 }
92
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 메모리와 속도 메모리 또는 속도에 대해 🤗 Diffusers *추론*을 최적화하기 위한 몇 가지 기술과 아이디어를 제시합니다. 일반적으로, memory-efficient attention을 위해 [xFormers](https://github.com/facebookresearch/xformers) 사용을 추천하기 때문에, 추천하는 [설치 방법](xformers)을 보고 설치해 보세요. 다음 설정이 성능과 메모리에 미치는 영향에 대해 설명합니다. | | 지연시간 | 속도 향상 | | ---------------- | ------- | ------- | | 별도 설정 없음 | 9.50s | x1 | | cuDNN auto-tuner | 9.37s | x1.01 | | fp16 | 3.61s | x2.63 | | Channels Last 메모리 형식 | 3.30s | x2.88 | | traced UNet | 3.21s | x2.96 | | memory-efficient attention | 2.63s | x3.61 | <em> NVIDIA TITAN RTX에서 50 DDIM 스텝의 "a photo of an astronaut riding a horse on mars" 프롬프트로 512x512 크기의 단일 이미지를 생성하였습니다. </em> ## cuDNN auto-tuner 활성화하기 [NVIDIA cuDNN](https://developer.nvidia.com/cudnn)은 컨볼루션을 계산하는 많은 알고리즘을 지원합니다. Autotuner는 짧은 벤치마크를 실행하고 주어진 입력 크기에 대해 주어진 하드웨어에서 최고의 성능을 가진 커널을 선택합니다. **컨볼루션 네트워크**를 활용하고 있기 때문에 (다른 유형들은 현재 지원되지 않음), 다음 설정을 통해 추론 전에 cuDNN autotuner를 활성화할 수 있습니다: ```python import torch torch.backends.cudnn.benchmark = True ``` ### fp32 대신 tf32 사용하기 (Ampere 및 이후 CUDA 장치들에서) Ampere 및 이후 CUDA 장치에서 행렬곱 및 컨볼루션은 TensorFloat32(TF32) 모드를 사용하여 더 빠르지만 약간 덜 정확할 수 있습니다. 기본적으로 PyTorch는 컨볼루션에 대해 TF32 모드를 활성화하지만 행렬 곱셈은 활성화하지 않습니다. 네트워크에 완전한 float32 정밀도가 필요한 경우가 아니면 행렬 곱셈에 대해서도 이 설정을 활성화하는 것이 좋습니다. 이는 일반적으로 무시할 수 있는 수치의 정확도 손실이 있지만, 계산 속도를 크게 높일 수 있습니다. 그것에 대해 [여기](https://huggingface.co/docs/transformers/v4.18.0/en/performance#tf32)서 더 읽을 수 있습니다. 추론하기 전에 다음을 추가하기만 하면 됩니다: ```python import torch torch.backends.cuda.matmul.allow_tf32 = True ``` ## 반정밀도 가중치 더 많은 GPU 메모리를 절약하고 더 빠른 속도를 얻기 위해 모델 가중치를 반정밀도(half precision)로 직접 불러오고 실행할 수 있습니다. 여기에는 `fp16`이라는 브랜치에 저장된 float16 버전의 가중치를 불러오고, 그 때 `float16` 유형을 사용하도록 PyTorch에 지시하는 작업이 포함됩니다. ```Python pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` <Tip warning={true}> 어떤 파이프라인에서도 [`torch.autocast`](https://pytorch.org/docs/stable/amp.html#torch.autocast) 를 사용하는 것은 검은색 이미지를 생성할 수 있고, 순수한 float16 정밀도를 사용하는 것보다 항상 느리기 때문에 사용하지 않는 것이 좋습니다. </Tip> ## 추가 메모리 절약을 위한 슬라이스 어텐션 추가 메모리 절약을 위해, 한 번에 모두 계산하는 대신 단계적으로 계산을 수행하는 슬라이스 버전의 어텐션(attention)을 사용할 수 있습니다. <Tip> Attention slicing은 모델이 하나 이상의 어텐션 헤드를 사용하는 한, 배치 크기가 1인 경우에도 유용합니다. 하나 이상의 어텐션 헤드가 있는 경우 *QK^T* 어텐션 매트릭스는 상당한 양의 메모리를 절약할 수 있는 각 헤드에 대해 순차적으로 계산될 수 있습니다. </Tip> 각 헤드에 대해 순차적으로 어텐션 계산을 수행하려면, 다음과 같이 추론 전에 파이프라인에서 [`~StableDiffusionPipeline.enable_attention_slicing`]를 호출하면 됩니다: ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_attention_slicing() image = pipe(prompt).images[0] ``` 추론 시간이 약 10% 느려지는 약간의 성능 저하가 있지만 이 방법을 사용하면 3.2GB 정도의 작은 VRAM으로도 Stable Diffusion을 사용할 수 있습니다! ## 더 큰 배치를 위한 sliced VAE 디코드 제한된 VRAM에서 대규모 이미지 배치를 디코딩하거나 32개 이상의 이미지가 포함된 배치를 활성화하기 위해, 배치의 latent 이미지를 한 번에 하나씩 디코딩하는 슬라이스 VAE 디코드를 사용할 수 있습니다. 이를 [`~StableDiffusionPipeline.enable_attention_slicing`] 또는 [`~StableDiffusionPipeline.enable_xformers_memory_efficient_attention`]과 결합하여 메모리 사용을 추가로 최소화할 수 있습니다. VAE 디코드를 한 번에 하나씩 수행하려면 추론 전에 파이프라인에서 [`~StableDiffusionPipeline.enable_vae_slicing`]을 호출합니다. 예를 들어: ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_vae_slicing() images = pipe([prompt] * 32).images ``` 다중 이미지 배치에서 VAE 디코드가 약간의 성능 향상이 이루어집니다. 단일 이미지 배치에서는 성능 영향은 없습니다. <a name="sequential_offloading"></a> ## 메모리 절약을 위해 가속 기능을 사용하여 CPU로 오프로딩 추가 메모리 절약을 위해 가중치를 CPU로 오프로드하고 순방향 전달을 수행할 때만 GPU로 로드할 수 있습니다. CPU 오프로딩을 수행하려면 [`~StableDiffusionPipeline.enable_sequential_cpu_offload`]를 호출하기만 하면 됩니다: ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_sequential_cpu_offload() image = pipe(prompt).images[0] ``` 그러면 메모리 소비를 3GB 미만으로 줄일 수 있습니다. 참고로 이 방법은 전체 모델이 아닌 서브모듈 수준에서 작동합니다. 이는 메모리 소비를 최소화하는 가장 좋은 방법이지만 프로세스의 반복적 특성으로 인해 추론 속도가 훨씬 느립니다. 파이프라인의 UNet 구성 요소는 여러 번 실행됩니다('num_inference_steps' 만큼). 매번 UNet의 서로 다른 서브모듈이 순차적으로 온로드된 다음 필요에 따라 오프로드되므로 메모리 이동 횟수가 많습니다. <Tip> 또 다른 최적화 방법인 <a href="#model_offloading">모델 오프로딩</a>을 사용하는 것을 고려하십시오. 이는 훨씬 빠르지만 메모리 절약이 크지는 않습니다. </Tip> 또한 ttention slicing과 연결해서 최소 메모리(< 2GB)로도 동작할 수 있습니다. ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_sequential_cpu_offload() pipe.enable_attention_slicing(1) image = pipe(prompt).images[0] ``` **참고**: 'enable_sequential_cpu_offload()'를 사용할 때, 미리 파이프라인을 CUDA로 이동하지 **않는** 것이 중요합니다.그렇지 않으면 메모리 소비의 이득이 최소화됩니다. 더 많은 정보를 위해 [이 이슈](https://github.com/huggingface/diffusers/issues/1934)를 보세요. <a name="model_offloading"></a> ## 빠른 추론과 메모리 메모리 절약을 위한 모델 오프로딩 [순차적 CPU 오프로딩](#sequential_offloading)은 이전 섹션에서 설명한 것처럼 많은 메모리를 보존하지만 필요에 따라 서브모듈을 GPU로 이동하고 새 모듈이 실행될 때 즉시 CPU로 반환되기 때문에 추론 속도가 느려집니다. 전체 모델 오프로딩은 각 모델의 구성 요소인 _modules_을 처리하는 대신, 전체 모델을 GPU로 이동하는 대안입니다. 이로 인해 추론 시간에 미치는 영향은 미미하지만(파이프라인을 'cuda'로 이동하는 것과 비교하여) 여전히 약간의 메모리를 절약할 수 있습니다. 이 시나리오에서는 파이프라인의 주요 구성 요소 중 하나만(일반적으로 텍스트 인코더, unet 및 vae) GPU에 있고, 나머지는 CPU에서 대기할 것입니다. 여러 반복을 위해 실행되는 UNet과 같은 구성 요소는 더 이상 필요하지 않을 때까지 GPU에 남아 있습니다. 이 기능은 아래와 같이 파이프라인에서 `enable_model_cpu_offload()`를 호출하여 활성화할 수 있습니다. ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_model_cpu_offload() image = pipe(prompt).images[0] ``` 이는 추가적인 메모리 절약을 위한 attention slicing과도 호환됩니다. ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_model_cpu_offload() pipe.enable_attention_slicing(1) image = pipe(prompt).images[0] ``` <Tip> 이 기능을 사용하려면 'accelerate' 버전 0.17.0 이상이 필요합니다. </Tip> ## Channels Last 메모리 형식 사용하기 Channels Last 메모리 형식은 차원 순서를 보존하는 메모리에서 NCHW 텐서 배열을 대체하는 방법입니다. Channels Last 텐서는 채널이 가장 조밀한 차원이 되는 방식으로 정렬됩니다(일명 픽셀당 이미지를 저장). 현재 모든 연산자 Channels Last 형식을 지원하는 것은 아니라 성능이 저하될 수 있으므로, 사용해보고 모델에 잘 작동하는지 확인하는 것이 좋습니다. 예를 들어 파이프라인의 UNet 모델이 channels Last 형식을 사용하도록 설정하려면 다음을 사용할 수 있습니다: ```python print(pipe.unet.conv_out.state_dict()["weight"].stride()) # (2880, 9, 3, 1) pipe.unet.to(memory_format=torch.channels_last) # in-place 연산 # 2번째 차원에서 스트라이드 1을 가지는 (2880, 1, 960, 320)로, 연산이 작동함을 증명합니다. print(pipe.unet.conv_out.state_dict()["weight"].stride()) ``` ## 추적(tracing) 추적은 모델을 통해 예제 입력 텐서를 통해 실행되는데, 해당 입력이 모델의 레이어를 통과할 때 호출되는 작업을 캡처하여 실행 파일 또는 'ScriptFunction'이 반환되도록 하고, 이는 just-in-time 컴파일로 최적화됩니다. UNet 모델을 추적하기 위해 다음을 사용할 수 있습니다: ```python import time import torch from diffusers import StableDiffusionPipeline import functools # torch 기울기 비활성화 torch.set_grad_enabled(False) # 변수 설정 n_experiments = 2 unet_runs_per_experiment = 50 # 입력 불러오기 def generate_inputs(): sample = torch.randn((2, 4, 64, 64), device="cuda", dtype=torch.float16) timestep = torch.rand(1, device="cuda", dtype=torch.float16) * 999 encoder_hidden_states = torch.randn((2, 77, 768), device="cuda", dtype=torch.float16) return sample, timestep, encoder_hidden_states pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ).to("cuda") unet = pipe.unet unet.eval() unet.to(memory_format=torch.channels_last) # Channels Last 메모리 형식 사용 unet.forward = functools.partial(unet.forward, return_dict=False) # return_dict=False을 기본값으로 설정 # 워밍업 for _ in range(3): with torch.inference_mode(): inputs = generate_inputs() orig_output = unet(*inputs) # 추적 print("tracing..") unet_traced = torch.jit.trace(unet, inputs) unet_traced.eval() print("done tracing") # 워밍업 및 그래프 최적화 for _ in range(5): with torch.inference_mode(): inputs = generate_inputs() orig_output = unet_traced(*inputs) # 벤치마킹 with torch.inference_mode(): for _ in range(n_experiments): torch.cuda.synchronize() start_time = time.time() for _ in range(unet_runs_per_experiment): orig_output = unet_traced(*inputs) torch.cuda.synchronize() print(f"unet traced inference took {time.time() - start_time:.2f} seconds") for _ in range(n_experiments): torch.cuda.synchronize() start_time = time.time() for _ in range(unet_runs_per_experiment): orig_output = unet(*inputs) torch.cuda.synchronize() print(f"unet inference took {time.time() - start_time:.2f} seconds") # 모델 저장 unet_traced.save("unet_traced.pt") ``` 그 다음, 파이프라인의 `unet` 특성을 다음과 같이 추적된 모델로 바꿀 수 있습니다. ```python from diffusers import StableDiffusionPipeline import torch from dataclasses import dataclass @dataclass class UNet2DConditionOutput: sample: torch.FloatTensor pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ).to("cuda") # jitted unet 사용 unet_traced = torch.jit.load("unet_traced.pt") # pipe.unet 삭제 class TracedUNet(torch.nn.Module): def __init__(self): super().__init__() self.in_channels = pipe.unet.in_channels self.device = pipe.unet.device def forward(self, latent_model_input, t, encoder_hidden_states): sample = unet_traced(latent_model_input, t, encoder_hidden_states)[0] return UNet2DConditionOutput(sample=sample) pipe.unet = TracedUNet() with torch.inference_mode(): image = pipe([prompt] * 1, num_inference_steps=50).images[0] ``` ## Memory-efficient attention 어텐션 블록의 대역폭을 최적화하는 최근 작업으로 GPU 메모리 사용량이 크게 향상되고 향상되었습니다. @tridao의 가장 최근의 플래시 어텐션: [code](https://github.com/HazyResearch/flash-attention), [paper](https://arxiv.org/pdf/2205.14135.pdf). 배치 크기 1(프롬프트 1개)의 512x512 크기로 추론을 실행할 때 몇 가지 Nvidia GPU에서 얻은 속도 향상은 다음과 같습니다: | GPU | 기준 어텐션 FP16 | 메모리 효율적인 어텐션 FP16 | |------------------ |--------------------- |--------------------------------- | | NVIDIA Tesla T4 | 3.5it/s | 5.5it/s | | NVIDIA 3060 RTX | 4.6it/s | 7.8it/s | | NVIDIA A10G | 8.88it/s | 15.6it/s | | NVIDIA RTX A6000 | 11.7it/s | 21.09it/s | | NVIDIA TITAN RTX | 12.51it/s | 18.22it/s | | A100-SXM4-40GB | 18.6it/s | 29.it/s | | A100-SXM-80GB | 18.7it/s | 29.5it/s | 이를 활용하려면 다음을 만족해야 합니다: - PyTorch > 1.12 - Cuda 사용 가능 - [xformers 라이브러리를 설치함](xformers) ```python from diffusers import StableDiffusionPipeline import torch pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ).to("cuda") pipe.enable_xformers_memory_efficient_attention() with torch.inference_mode(): sample = pipe("a small cat") # 선택: 이를 비활성화 하기 위해 다음을 사용할 수 있습니다. # pipe.disable_xformers_memory_efficient_attention() ```
diffusers/docs/source/ko/optimization/fp16.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/fp16.md", "repo_id": "diffusers", "token_count": 10813 }
93
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DreamBooth [DreamBooth](https://arxiv.org/abs/2208.12242)는 한 주제에 대한 적은 이미지(3~5개)만으로도 stable diffusion과 같이 text-to-image 모델을 개인화할 수 있는 방법입니다. 이를 통해 모델은 다양한 장면, 포즈 및 장면(뷰)에서 피사체에 대해 맥락화(contextualized)된 이미지를 생성할 수 있습니다. ![프로젝트 블로그에서의 DreamBooth 예시](https://dreambooth.github.io/DreamBooth_files/teaser_static.jpg) <small>에서의 Dreambooth 예시 <a href="https://dreambooth.github.io">project's blog.</a></small> 이 가이드는 다양한 GPU, Flax 사양에 대해 [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) 모델로 DreamBooth를 파인튜닝하는 방법을 보여줍니다. 더 깊이 파고들어 작동 방식을 확인하는 데 관심이 있는 경우, 이 가이드에 사용된 DreamBooth의 모든 학습 스크립트를 [여기](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)에서 찾을 수 있습니다. 스크립트를 실행하기 전에 라이브러리의 학습에 필요한 dependencies를 설치해야 합니다. 또한 `main` GitHub 브랜치에서 🧨 Diffusers를 설치하는 것이 좋습니다. ```bash pip install git+https://github.com/huggingface/diffusers pip install -U -r diffusers/examples/dreambooth/requirements.txt ``` xFormers는 학습에 필요한 요구 사항은 아니지만, 가능하면 [설치](../optimization/xformers)하는 것이 좋습니다. 학습 속도를 높이고 메모리 사용량을 줄일 수 있기 때문입니다. 모든 dependencies을 설정한 후 다음을 사용하여 [🤗 Accelerate](https://github.com/huggingface/accelerate/) 환경을 다음과 같이 초기화합니다: ```bash accelerate config ``` 별도 설정 없이 기본 🤗 Accelerate 환경을 설치하려면 다음을 실행합니다: ```bash accelerate config default ``` 또는 현재 환경이 노트북과 같은 대화형 셸을 지원하지 않는 경우 다음을 사용할 수 있습니다: ```py from accelerate.utils import write_basic_config write_basic_config() ``` ## 파인튜닝 <Tip warning={true}> DreamBooth 파인튜닝은 하이퍼파라미터에 매우 민감하고 과적합되기 쉽습니다. 적절한 하이퍼파라미터를 선택하는 데 도움이 되도록 다양한 권장 설정이 포함된 [심층 분석](https://huggingface.co/blog/dreambooth)을 살펴보는 것이 좋습니다. </Tip> <frameworkcontent> <pt> [몇 장의 강아지 이미지들](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ)로 DreamBooth를 시도해봅시다. 이를 다운로드해 디렉터리에 저장한 다음 `INSTANCE_DIR` 환경 변수를 해당 경로로 설정합니다: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export OUTPUT_DIR="path_to_saved_model" ``` 그런 다음, 다음 명령을 사용하여 학습 스크립트를 실행할 수 있습니다 (전체 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)에서 찾을 수 있습니다): ```bash accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 ``` </pt> <jax> TPU에 액세스할 수 있거나 더 빠르게 훈련하고 싶다면 [Flax 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_flax.py)를 사용해 볼 수 있습니다. Flax 학습 스크립트는 gradient checkpointing 또는 gradient accumulation을 지원하지 않으므로, 메모리가 30GB 이상인 GPU가 필요합니다. 스크립트를 실행하기 전에 요구 사항이 설치되어 있는지 확인하십시오. ```bash pip install -U -r requirements.txt ``` 그러면 다음 명령어로 학습 스크립트를 실행시킬 수 있습니다: ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="path-to-instance-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --max_train_steps=400 ``` </jax> </frameworkcontent> ### Prior-preserving(사전 보존) loss를 사용한 파인튜닝 과적합과 language drift를 방지하기 위해 사전 보존이 사용됩니다(관심이 있는 경우 [논문](https://arxiv.org/abs/2208.12242)을 참조하세요). 사전 보존을 위해 동일한 클래스의 다른 이미지를 학습 프로세스의 일부로 사용합니다. 좋은 점은 Stable Diffusion 모델 자체를 사용하여 이러한 이미지를 생성할 수 있다는 것입니다! 학습 스크립트는 생성된 이미지를 우리가 지정한 로컬 경로에 저장합니다. 저자들에 따르면 사전 보존을 위해 `num_epochs * num_samples`개의 이미지를 생성하는 것이 좋습니다. 200-300개에서 대부분 잘 작동합니다. <frameworkcontent> <pt> ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export CLASS_DIR="path_to_class_images" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` </pt> <jax> ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --num_class_images=200 \ --max_train_steps=800 ``` </jax> </frameworkcontent> ## 텍스트 인코더와 and UNet로 파인튜닝하기 해당 스크립트를 사용하면 `unet`과 함께 `text_encoder`를 파인튜닝할 수 있습니다. 실험에서(자세한 내용은 [🧨 Diffusers를 사용해 DreamBooth로 Stable Diffusion 학습하기](https://huggingface.co/blog/dreambooth) 게시물을 확인하세요), 특히 얼굴 이미지를 생성할 때 훨씬 더 나은 결과를 얻을 수 있습니다. <Tip warning={true}> 텍스트 인코더를 학습시키려면 추가 메모리가 필요해 16GB GPU로는 동작하지 않습니다. 이 옵션을 사용하려면 최소 24GB VRAM이 필요합니다. </Tip> `--train_text_encoder` 인수를 학습 스크립트에 전달하여 `text_encoder` 및 `unet`을 파인튜닝할 수 있습니다: <frameworkcontent> <pt> ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export CLASS_DIR="path_to_class_images" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --use_8bit_adam --gradient_checkpointing \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` </pt> <jax> ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=2e-6 \ --num_class_images=200 \ --max_train_steps=800 ``` </jax> </frameworkcontent> ## LoRA로 파인튜닝하기 DreamBooth에서 대규모 모델의 학습을 가속화하기 위한 파인튜닝 기술인 LoRA(Low-Rank Adaptation of Large Language Models)를 사용할 수 있습니다. 자세한 내용은 [LoRA 학습](training/lora#dreambooth) 가이드를 참조하세요. ### 학습 중 체크포인트 저장하기 Dreambooth로 훈련하는 동안 과적합하기 쉬우므로, 때때로 학습 중에 정기적인 체크포인트를 저장하는 것이 유용합니다. 중간 체크포인트 중 하나가 최종 모델보다 더 잘 작동할 수 있습니다! 체크포인트 저장 기능을 활성화하려면 학습 스크립트에 다음 인수를 전달해야 합니다: ```bash --checkpointing_steps=500 ``` 이렇게 하면 `output_dir`의 하위 폴더에 전체 학습 상태가 저장됩니다. 하위 폴더 이름은 접두사 `checkpoint-`로 시작하고 지금까지 수행된 step 수입니다. 예시로 `checkpoint-1500`은 1500 학습 step 후에 저장된 체크포인트입니다. #### 저장된 체크포인트에서 훈련 재개하기 저장된 체크포인트에서 훈련을 재개하려면, `--resume_from_checkpoint` 인수를 전달한 다음 사용할 체크포인트의 이름을 지정하면 됩니다. 특수 문자열 `"latest"`를 사용하여 저장된 마지막 체크포인트(즉, step 수가 가장 많은 체크포인트)에서 재개할 수도 있습니다. 예를 들어 다음은 1500 step 후에 저장된 체크포인트에서부터 학습을 재개합니다: ```bash --resume_from_checkpoint="checkpoint-1500" ``` 원하는 경우 일부 하이퍼파라미터를 조정할 수 있습니다. #### 저장된 체크포인트를 사용하여 추론 수행하기 저장된 체크포인트는 훈련 재개에 적합한 형식으로 저장됩니다. 여기에는 모델 가중치뿐만 아니라 옵티마이저, 데이터 로더 및 학습률의 상태도 포함됩니다. **`"accelerate>=0.16.0"`**이 설치된 경우 다음 코드를 사용하여 중간 체크포인트에서 추론을 실행합니다. ```python from diffusers import DiffusionPipeline, UNet2DConditionModel from transformers import CLIPTextModel import torch # 학습에 사용된 것과 동일한 인수(model, revision)로 파이프라인을 불러옵니다. model_id = "CompVis/stable-diffusion-v1-4" unet = UNet2DConditionModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/unet") # `args.train_text_encoder`로 학습한 경우면 텍스트 인코더를 꼭 불러오세요 text_encoder = CLIPTextModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/text_encoder") pipeline = DiffusionPipeline.from_pretrained(model_id, unet=unet, text_encoder=text_encoder, dtype=torch.float16) pipeline.to("cuda") # 추론을 수행하거나 저장하거나, 허브에 푸시합니다. pipeline.save_pretrained("dreambooth-pipeline") ``` If you have **`"accelerate<0.16.0"`** installed, you need to convert it to an inference pipeline first: ```python from accelerate import Accelerator from diffusers import DiffusionPipeline # 학습에 사용된 것과 동일한 인수(model, revision)로 파이프라인을 불러옵니다. model_id = "CompVis/stable-diffusion-v1-4" pipeline = DiffusionPipeline.from_pretrained(model_id) accelerator = Accelerator() # 초기 학습에 `--train_text_encoder`가 사용된 경우 text_encoder를 사용합니다. unet, text_encoder = accelerator.prepare(pipeline.unet, pipeline.text_encoder) # 체크포인트 경로로부터 상태를 복원합니다. 여기서는 절대 경로를 사용해야 합니다. accelerator.load_state("/sddata/dreambooth/daruma-v2-1/checkpoint-100") # unwrapped 모델로 파이프라인을 다시 빌드합니다.(.unet and .text_encoder로의 할당도 작동해야 합니다) pipeline = DiffusionPipeline.from_pretrained( model_id, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), ) # 추론을 수행하거나 저장하거나, 허브에 푸시합니다. pipeline.save_pretrained("dreambooth-pipeline") ``` ## 각 GPU 용량에서의 최적화 하드웨어에 따라 16GB에서 8GB까지 GPU에서 DreamBooth를 최적화하는 몇 가지 방법이 있습니다! ### xFormers [xFormers](https://github.com/facebookresearch/xformers)는 Transformers를 최적화하기 위한 toolbox이며, 🧨 Diffusers에서 사용되는[memory-efficient attention](https://facebookresearch.github.io/xformers/components/ops.html#module-xformers.ops) 메커니즘을 포함하고 있습니다. [xFormers를 설치](./optimization/xformers)한 다음 학습 스크립트에 다음 인수를 추가합니다: ```bash --enable_xformers_memory_efficient_attention ``` xFormers는 Flax에서 사용할 수 없습니다. ### 그래디언트 없음으로 설정 메모리 사용량을 줄일 수 있는 또 다른 방법은 [기울기 설정](https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html)을 0 대신 `None`으로 하는 것입니다. 그러나 이로 인해 특정 동작이 변경될 수 있으므로 문제가 발생하면 이 인수를 제거해 보십시오. 학습 스크립트에 다음 인수를 추가하여 그래디언트를 `None`으로 설정합니다. ```bash --set_grads_to_none ``` ### 16GB GPU Gradient checkpointing과 [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)의 8비트 옵티마이저의 도움으로, 16GB GPU에서 dreambooth를 훈련할 수 있습니다. bitsandbytes가 설치되어 있는지 확인하세요: ```bash pip install bitsandbytes ``` 그 다음, 학습 스크립트에 `--use_8bit_adam` 옵션을 명시합니다: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export CLASS_DIR="path_to_class_images" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### 12GB GPU 12GB GPU에서 DreamBooth를 실행하려면 gradient checkpointing, 8비트 옵티마이저, xFormers를 활성화하고 그래디언트를 `None`으로 설정해야 합니다. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### 8GB GPU에서 학습하기 8GB GPU에 대해서는 [DeepSpeed](https://www.deepspeed.ai/)를 사용해 일부 텐서를 VRAM에서 CPU 또는 NVME로 오프로드하여 더 적은 GPU 메모리로 학습할 수도 있습니다. 🤗 Accelerate 환경을 구성하려면 다음 명령을 실행하세요: ```bash accelerate config ``` 환경 구성 중에 DeepSpeed를 사용할 것을 확인하세요. 그러면 DeepSpeed stage 2, fp16 혼합 정밀도를 결합하고 모델 매개변수와 옵티마이저 상태를 모두 CPU로 오프로드하면 8GB VRAM 미만에서 학습할 수 있습니다. 단점은 더 많은 시스템 RAM(약 25GB)이 필요하다는 것입니다. 추가 구성 옵션은 [DeepSpeed 문서](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)를 참조하세요. 또한 기본 Adam 옵티마이저를 DeepSpeed의 최적화된 Adam 버전으로 변경해야 합니다. 이는 상당한 속도 향상을 위한 Adam인 [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu)입니다. `DeepSpeedCPUAdam`을 활성화하려면 시스템의 CUDA toolchain 버전이 PyTorch와 함께 설치된 것과 동일해야 합니다. 8비트 옵티마이저는 현재 DeepSpeed와 호환되지 않는 것 같습니다. 다음 명령으로 학습을 시작합니다: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export CLASS_DIR="path_to_class_images" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --sample_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --mixed_precision=fp16 ``` ## 추론 모델을 학습한 후에는, 모델이 저장된 경로를 지정해 [`StableDiffusionPipeline`]로 추론을 수행할 수 있습니다. 프롬프트에 학습에 사용된 특수 `식별자`(이전 예시의 `sks`)가 포함되어 있는지 확인하세요. **`"accelerate>=0.16.0"`**이 설치되어 있는 경우 다음 코드를 사용하여 중간 체크포인트에서 추론을 실행할 수 있습니다: ```python from diffusers import StableDiffusionPipeline import torch model_id = "path_to_saved_model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of sks dog in a bucket" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png") ``` [저장된 학습 체크포인트](#inference-from-a-saved-checkpoint)에서도 추론을 실행할 수도 있습니다.
diffusers/docs/source/ko/training/dreambooth.md/0
{ "file_path": "diffusers/docs/source/ko/training/dreambooth.md", "repo_id": "diffusers", "token_count": 11698 }
94
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 텍스트 기반 image-to-image 생성 [[open-in-colab]] [`StableDiffusionImg2ImgPipeline`]을 사용하면 텍스트 프롬프트와 시작 이미지를 전달하여 새 이미지 생성의 조건을 지정할 수 있습니다. 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash !pip install diffusers transformers ftfy accelerate ``` [`nitrosocke/Ghibli-Diffusion`](https://huggingface.co/nitrosocke/Ghibli-Diffusion)과 같은 사전학습된 stable diffusion 모델로 [`StableDiffusionImg2ImgPipeline`]을 생성하여 시작하세요. ```python import torch import requests from PIL import Image from io import BytesIO from diffusers import StableDiffusionImg2ImgPipeline device = "cuda" pipe = StableDiffusionImg2ImgPipeline.from_pretrained("nitrosocke/Ghibli-Diffusion", torch_dtype=torch.float16).to( device ) ``` 초기 이미지를 다운로드하고 사전 처리하여 파이프라인에 전달할 수 있습니다: ```python url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") init_image.thumbnail((768, 768)) init_image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/image_2_image_using_diffusers_cell_8_output_0.jpeg"/> </div> <Tip> 💡 `strength`는 입력 이미지에 추가되는 노이즈의 양을 제어하는 0.0에서 1.0 사이의 값입니다. 1.0에 가까운 값은 다양한 변형을 허용하지만 입력 이미지와 의미적으로 일치하지 않는 이미지를 생성합니다. </Tip> 프롬프트를 정의하고(지브리 스타일(Ghibli-style)에 맞게 조정된 이 체크포인트의 경우 프롬프트 앞에 `ghibli style` 토큰을 붙여야 합니다) 파이프라인을 실행합니다: ```python prompt = "ghibli style, a fantasy landscape with castles" generator = torch.Generator(device=device).manual_seed(1024) image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ghibli-castles.png"/> </div> 다른 스케줄러로 실험하여 출력에 어떤 영향을 미치는지 확인할 수도 있습니다: ```python from diffusers import LMSDiscreteScheduler lms = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.scheduler = lms generator = torch.Generator(device=device).manual_seed(1024) image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lms-ghibli.png"/> </div> 아래 공백을 확인하고 `strength` 값을 다르게 설정하여 이미지를 생성해 보세요. `strength`를 낮게 설정하면 원본 이미지와 더 유사한 이미지가 생성되는 것을 확인할 수 있습니다. 자유롭게 스케줄러를 [`LMSDiscreteScheduler`]로 전환하여 출력에 어떤 영향을 미치는지 확인해 보세요. <iframe src="https://stevhliu-ghibli-img2img.hf.space" frameborder="0" width="850" height="500" ></iframe>
diffusers/docs/source/ko/using-diffusers/img2img.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/img2img.md", "repo_id": "diffusers", "token_count": 2085 }
95
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <br> <img src="https://raw.githubusercontent.com/huggingface/diffusers/77aadfee6a891ab9fcfb780f87c693f7a5beeb8e/docs/source/imgs/diffusers_library.jpg" width="400"/> <br> </p> # Diffusers 🤗 Diffusers é uma biblioteca de modelos de difusão de última geração para geração de imagens, áudio e até mesmo estruturas 3D de moléculas. Se você está procurando uma solução de geração simples ou queira treinar seu próprio modelo de difusão, 🤗 Diffusers é uma modular caixa de ferramentas que suporta ambos. Nossa biblioteca é desenhada com foco em [usabilidade em vez de desempenho](conceptual/philosophy#usability-over-performance), [simples em vez de fácil](conceptual/philosophy#simple-over-easy) e [customizável em vez de abstrações](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction). A Biblioteca tem três componentes principais: - Pipelines de última geração para a geração em poucas linhas de código. Têm muitos pipelines no 🤗 Diffusers, veja a tabela no pipeline [Visão geral](api/pipelines/overview) para uma lista completa de pipelines disponíveis e as tarefas que eles resolvem. - Intercambiáveis [agendadores de ruído](api/schedulers/overview) para balancear as compensações entre velocidade e qualidade de geração. - [Modelos](api/models) pré-treinados que podem ser usados como se fossem blocos de construção, e combinados com agendadores, para criar seu próprio sistema de difusão de ponta a ponta. <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutoriais</div> <p class="text-gray-700">Aprenda as competências fundamentais que precisa para iniciar a gerar saídas, construa seu próprio sistema de difusão, e treine um modelo de difusão. Nós recomendamos começar por aqui se você está utilizando o 🤗 Diffusers pela primeira vez!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Guias de utilização</div> <p class="text-gray-700">Guias práticos para ajudar você carregar pipelines, modelos, e agendadores. Você também aprenderá como usar os pipelines para tarefas específicas, controlar como as saídas são geradas, otimizar a velocidade de geração, e outras técnicas diferentes de treinamento.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Guias conceituais</div> <p class="text-gray-700">Compreenda porque a biblioteca foi desenhada da forma que ela é, e aprenda mais sobre as diretrizes éticas e implementações de segurança para o uso da biblioteca.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models/overview" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Referência</div> <p class="text-gray-700">Descrições técnicas de como funcionam as classes e métodos do 🤗 Diffusers</p> </a> </div> </div>
diffusers/docs/source/pt/index.md/0
{ "file_path": "diffusers/docs/source/pt/index.md", "repo_id": "diffusers", "token_count": 1654 }
96
# -*- coding: utf-8 -*- import inspect from typing import Optional, Union import numpy as np import PIL.Image import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import PIL_INTERPOLATION from diffusers.utils.torch_utils import randn_tensor def preprocess(image, w, h): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def slerp(t, v0, v1, DOT_THRESHOLD=0.9995): if not isinstance(v0, np.ndarray): inputs_are_torch = True input_device = v0.device v0 = v0.cpu().numpy() v1 = v1.cpu().numpy() dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) if np.abs(dot) > DOT_THRESHOLD: v2 = (1 - t) * v0 + t * v1 else: theta_0 = np.arccos(dot) sin_theta_0 = np.sin(theta_0) theta_t = theta_0 * t sin_theta_t = np.sin(theta_t) s0 = np.sin(theta_0 - theta_t) / sin_theta_0 s1 = sin_theta_t / sin_theta_0 v2 = s0 * v0 + s1 * v1 if inputs_are_torch: v2 = torch.from_numpy(v2).to(input_device) return v2 def spherical_dist_loss(x, y): x = F.normalize(x, dim=-1) y = F.normalize(y, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def set_requires_grad(model, value): for param in model.parameters(): param.requires_grad = value class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, clip_model: CLIPModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], feature_extractor: CLIPFeatureExtractor, coca_model=None, coca_tokenizer=None, coca_transform=None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, clip_model=clip_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, coca_model=coca_model, coca_tokenizer=coca_tokenizer, coca_transform=coca_transform, ) self.feature_extractor_size = ( feature_extractor.size if isinstance(feature_extractor.size, int) else feature_extractor.size["shortest_edge"] ) self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) set_requires_grad(self.text_encoder, False) set_requires_grad(self.clip_model, False) def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(slice_size) def disable_attention_slicing(self): self.enable_attention_slicing(None) def freeze_vae(self): set_requires_grad(self.vae, False) def unfreeze_vae(self): set_requires_grad(self.vae, True) def freeze_unet(self): set_requires_grad(self.unet, False) def unfreeze_unet(self): set_requires_grad(self.unet, True) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None): if not isinstance(image, torch.Tensor): raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}") image = image.to(device=device, dtype=dtype) if isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.sample(generator) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor init_latents = 0.18215 * init_latents init_latents = init_latents.repeat_interleave(batch_size, dim=0) noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def get_image_description(self, image): transformed_image = self.coca_transform(image).unsqueeze(0) with torch.no_grad(), torch.cuda.amp.autocast(): generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype)) generated = self.coca_tokenizer.decode(generated[0].cpu().numpy()) return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,") def get_clip_image_embeddings(self, image, batch_size): clip_image_input = self.feature_extractor.preprocess(image) clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half() image_embeddings_clip = self.clip_model.get_image_features(clip_image_features) image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True) image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0) return image_embeddings_clip @torch.enable_grad() def cond_fn( self, latents, timestep, index, text_embeddings, noise_pred_original, original_image_embeddings_clip, clip_guidance_scale, ): latents = latents.detach().requires_grad_() latent_model_input = self.scheduler.scale_model_input(latents, timestep) # predict the noise residual noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)): alpha_prod_t = self.scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) fac = torch.sqrt(beta_prod_t) sample = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, LMSDiscreteScheduler): sigma = self.scheduler.sigmas[index] sample = latents - sigma * noise_pred else: raise ValueError(f"scheduler type {type(self.scheduler)} not supported") # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor sample = 1 / 0.18215 * sample image = self.vae.decode(sample).sample image = (image / 2 + 0.5).clamp(0, 1) image = transforms.Resize(self.feature_extractor_size)(image) image = self.normalize(image).to(latents.dtype) image_embeddings_clip = self.clip_model.get_image_features(image) image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True) loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale grads = -torch.autograd.grad(loss, latents)[0] if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents.detach() + grads * (sigma**2) noise_pred = noise_pred_original else: noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads return noise_pred, latents @torch.no_grad() def __call__( self, style_image: Union[torch.FloatTensor, PIL.Image.Image], content_image: Union[torch.FloatTensor, PIL.Image.Image], style_prompt: Optional[str] = None, content_prompt: Optional[str] = None, height: Optional[int] = 512, width: Optional[int] = 512, noise_strength: float = 0.6, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, batch_size: Optional[int] = 1, eta: float = 0.0, clip_guidance_scale: Optional[float] = 100, generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", return_dict: bool = True, slerp_latent_style_strength: float = 0.8, slerp_prompt_style_strength: float = 0.1, slerp_clip_image_style_strength: float = 0.1, ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if isinstance(generator, torch.Generator) and batch_size > 1: generator = [generator] + [None] * (batch_size - 1) coca_is_none = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] coca_is_none = [x[0] for x in coca_is_none if x[1]] coca_is_none_str = ", ".join(coca_is_none) # generate prompts with coca model if prompt is None if content_prompt is None: if len(coca_is_none): raise ValueError( f"Content prompt is None and CoCa [{coca_is_none_str}] is None." f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) content_prompt = self.get_image_description(content_image) if style_prompt is None: if len(coca_is_none): raise ValueError( f"Style prompt is None and CoCa [{coca_is_none_str}] is None." f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) style_prompt = self.get_image_description(style_image) # get prompt text embeddings for content and style content_text_input = self.tokenizer( content_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0] style_text_input = self.tokenizer( style_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0] text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings) # duplicate text embeddings for each generation per prompt text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0) # set timesteps accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) extra_set_kwargs = {} if accepts_offset: extra_set_kwargs["offset"] = 1 self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device) latent_timestep = timesteps[:1].repeat(batch_size) # Preprocess image preprocessed_content_image = preprocess(content_image, width, height) content_latents = self.prepare_latents( preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator ) preprocessed_style_image = preprocess(style_image, width, height) style_latents = self.prepare_latents( preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator ) latents = slerp(slerp_latent_style_strength, content_latents, style_latents) if clip_guidance_scale > 0: content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size) style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size) clip_image_embeddings = slerp( slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: max_length = content_text_input.input_ids.shape[-1] uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt") uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform classifier free guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: text_embeddings_for_guidance = ( text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings ) noise_pred, latents = self.cond_fn( latents, t, i, text_embeddings_for_guidance, noise_pred, clip_image_embeddings, clip_guidance_scale, ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample progress_bar.update() # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diffusers/examples/community/clip_guided_images_mixing_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/clip_guided_images_mixing_stable_diffusion.py", "repo_id": "diffusers", "token_count": 8926 }
97
import inspect import os import numpy as np import torch import torch.nn.functional as nnf from PIL import Image from torch.optim.adam import Adam from tqdm import tqdm from diffusers import StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput def retrieve_timesteps( scheduler, num_inference_steps=None, device=None, timesteps=None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class NullTextPipeline(StableDiffusionPipeline): def get_noise_pred(self, latents, t, context): latents_input = torch.cat([latents] * 2) guidance_scale = 7.5 noise_pred = self.unet(latents_input, t, encoder_hidden_states=context)["sample"] noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) latents = self.prev_step(noise_pred, t, latents) return latents def get_noise_pred_single(self, latents, t, context): noise_pred = self.unet(latents, t, encoder_hidden_states=context)["sample"] return noise_pred @torch.no_grad() def image2latent(self, image_path): image = Image.open(image_path).convert("RGB") image = np.array(image) image = torch.from_numpy(image).float() / 127.5 - 1 image = image.permute(2, 0, 1).unsqueeze(0).to(self.device) latents = self.vae.encode(image)["latent_dist"].mean latents = latents * 0.18215 return latents @torch.no_grad() def latent2image(self, latents): latents = 1 / 0.18215 * latents.detach() image = self.vae.decode(latents)["sample"].detach() image = self.processor.postprocess(image, output_type="pil")[0] return image def prev_step(self, model_output, timestep, sample): prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps alpha_prod_t = self.scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output prev_sample = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction return prev_sample def next_step(self, model_output, timestep, sample): timestep, next_timestep = ( min(timestep - self.scheduler.config.num_train_timesteps // self.num_inference_steps, 999), timestep, ) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep] beta_prod_t = 1 - alpha_prod_t next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction return next_sample def null_optimization(self, latents, context, num_inner_steps, epsilon): uncond_embeddings, cond_embeddings = context.chunk(2) uncond_embeddings_list = [] latent_cur = latents[-1] bar = tqdm(total=num_inner_steps * self.num_inference_steps) for i in range(self.num_inference_steps): uncond_embeddings = uncond_embeddings.clone().detach() uncond_embeddings.requires_grad = True optimizer = Adam([uncond_embeddings], lr=1e-2 * (1.0 - i / 100.0)) latent_prev = latents[len(latents) - i - 2] t = self.scheduler.timesteps[i] with torch.no_grad(): noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings) for j in range(num_inner_steps): noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings) noise_pred = noise_pred_uncond + 7.5 * (noise_pred_cond - noise_pred_uncond) latents_prev_rec = self.prev_step(noise_pred, t, latent_cur) loss = nnf.mse_loss(latents_prev_rec, latent_prev) optimizer.zero_grad() loss.backward() optimizer.step() loss_item = loss.item() bar.update() if loss_item < epsilon + i * 2e-5: break for j in range(j + 1, num_inner_steps): bar.update() uncond_embeddings_list.append(uncond_embeddings[:1].detach()) with torch.no_grad(): context = torch.cat([uncond_embeddings, cond_embeddings]) latent_cur = self.get_noise_pred(latent_cur, t, context) bar.close() return uncond_embeddings_list @torch.no_grad() def ddim_inversion_loop(self, latent, context): self.scheduler.set_timesteps(self.num_inference_steps) _, cond_embeddings = context.chunk(2) all_latent = [latent] latent = latent.clone().detach() with torch.no_grad(): for i in range(0, self.num_inference_steps): t = self.scheduler.timesteps[len(self.scheduler.timesteps) - i - 1] noise_pred = self.unet(latent, t, encoder_hidden_states=cond_embeddings)["sample"] latent = self.next_step(noise_pred, t, latent) all_latent.append(latent) return all_latent def get_context(self, prompt): uncond_input = self.tokenizer( [""], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt" ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] text_input = self.tokenizer( [prompt], padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] context = torch.cat([uncond_embeddings, text_embeddings]) return context def invert( self, image_path: str, prompt: str, num_inner_steps=10, early_stop_epsilon=1e-6, num_inference_steps=50 ): self.num_inference_steps = num_inference_steps context = self.get_context(prompt) latent = self.image2latent(image_path) ddim_latents = self.ddim_inversion_loop(latent, context) if os.path.exists(image_path + ".pt"): uncond_embeddings = torch.load(image_path + ".pt") else: uncond_embeddings = self.null_optimization(ddim_latents, context, num_inner_steps, early_stop_epsilon) uncond_embeddings = torch.stack(uncond_embeddings, 0) torch.save(uncond_embeddings, image_path + ".pt") return ddim_latents[-1], uncond_embeddings @torch.no_grad() def __call__( self, prompt, uncond_embeddings, inverted_latent, num_inference_steps: int = 50, timesteps=None, guidance_scale=7.5, negative_prompt=None, num_images_per_prompt=1, generator=None, latents=None, prompt_embeds=None, negative_prompt_embeds=None, output_type="pil", ): self._guidance_scale = guidance_scale # 0. Default height and width to unet height = self.unet.config.sample_size * self.vae_scale_factor width = self.unet.config.sample_size * self.vae_scale_factor # to deal with lora scaling and other possible forward hook callback_steps = None # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameter device = self._execution_device # 3. Encode input prompt prompt_embeds, _ = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) latents = inverted_latent with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=uncond_embeddings[i])["sample"] noise_pred = self.unet(latents, t, encoder_hidden_states=prompt_embeds)["sample"] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] progress_bar.update() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] else: image = latents image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=[True] * image.shape[0] ) # Offload all models self.maybe_free_model_hooks() return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=False)
diffusers/examples/community/pipeline_null_text_inversion.py/0
{ "file_path": "diffusers/examples/community/pipeline_null_text_inversion.py", "repo_id": "diffusers", "token_count": 5423 }
98
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpeechToImagePipeline(DiffusionPipeline): def __init__( self, speech_model: WhisperForConditionalGeneration, speech_processor: WhisperProcessor, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=speech_model, speech_processor=speech_processor, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): if slice_size == "auto": slice_size = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(slice_size) def disable_attention_slicing(self): self.enable_attention_slicing(None) @torch.no_grad() def __call__( self, audio, sampling_rate=16_000, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, **kwargs, ): inputs = self.speech_processor.feature_extractor( audio, return_tensors="pt", sampling_rate=sampling_rate ).input_features.to(self.device) predicted_ids = self.speech_model.generate(inputs, max_length=480_000) prompt = self.speech_processor.tokenizer.batch_decode(predicted_ids, skip_special_tokens=True, normalize=True)[ 0 ] if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(num_inference_steps) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return image return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diffusers/examples/community/speech_to_image_diffusion.py/0
{ "file_path": "diffusers/examples/community/speech_to_image_diffusion.py", "repo_id": "diffusers", "token_count": 5146 }
99
# Copyright 2023 Peter Willemsen <[email protected]>. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def make_transparency_mask(size, overlap_pixels, remove_borders=[]): size_x = size[0] - overlap_pixels * 2 size_y = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels mask = np.ones((size_y, size_x), dtype=np.uint8) * 255 mask = np.pad(mask, mode="linear_ramp", pad_width=overlap_pixels, end_values=0) if "l" in remove_borders: mask = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: mask = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: mask = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: mask = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def clamp(n, smallest, largest): return max(smallest, min(n, largest)) def clamp_rect(rect: [int], min: [int], max: [int]): return ( clamp(rect[0], min[0], max[0]), clamp(rect[1], min[1], max[1]), clamp(rect[2], min[0], max[0]), clamp(rect[3], min[1], max[1]), ) def add_overlap_rect(rect: [int], overlap: int, image_size: [int]): rect = list(rect) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap rect = clamp_rect(rect, [0, 0], [image_size[0], image_size[1]]) return rect def squeeze_tile(tile, original_image, original_slice, slice_x): result = Image.new("RGB", (tile.size[0] + original_slice, tile.size[1])) result.paste( original_image.resize((tile.size[0], tile.size[1]), Image.BICUBIC).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ), (0, 0), ) result.paste(tile, (original_slice, 0)) return result def unsqueeze_tile(tile, original_image_slice): crop_rect = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) tile = tile.crop(crop_rect) return tile def next_divisible(n, d): divisor = n % d return n - divisor class StableDiffusionTiledUpscalePipeline(StableDiffusionUpscalePipeline): r""" Pipeline for tile-based text-guided image super-resolution using Stable Diffusion 2, trading memory for compute to create gigantic images. This model inherits from [`StableDiffusionUpscalePipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. low_res_scheduler ([`SchedulerMixin`]): A scheduler used to add initial noise to the low res conditioning image. It must be an instance of [`DDPMScheduler`]. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, low_res_scheduler: DDPMScheduler, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], max_noise_level: int = 350, ): super().__init__( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, max_noise_level=max_noise_level, ) def _process_tile(self, original_image_slice, x, y, tile_size, tile_border, image, final_image, **kwargs): torch.manual_seed(0) crop_rect = ( min(image.size[0] - (tile_size + original_image_slice), x * tile_size), min(image.size[1] - (tile_size + original_image_slice), y * tile_size), min(image.size[0], (x + 1) * tile_size), min(image.size[1], (y + 1) * tile_size), ) crop_rect_with_overlap = add_overlap_rect(crop_rect, tile_border, image.size) tile = image.crop(crop_rect_with_overlap) translated_slice_x = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] translated_slice_x = translated_slice_x - (original_image_slice / 2) translated_slice_x = max(0, translated_slice_x) to_input = squeeze_tile(tile, image, original_image_slice, translated_slice_x) orig_input_size = to_input.size to_input = to_input.resize((tile_size, tile_size), Image.BICUBIC) upscaled_tile = super(StableDiffusionTiledUpscalePipeline, self).__call__(image=to_input, **kwargs).images[0] upscaled_tile = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC) upscaled_tile = unsqueeze_tile(upscaled_tile, original_image_slice) upscaled_tile = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC) remove_borders = [] if x == 0: remove_borders.append("l") elif crop_rect[2] == image.size[0]: remove_borders.append("r") if y == 0: remove_borders.append("t") elif crop_rect[3] == image.size[1]: remove_borders.append("b") transparency_mask = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=remove_borders ), mode="L", ) final_image.paste( upscaled_tile, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), transparency_mask ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], image: Union[PIL.Image.Image, List[PIL.Image.Image]], num_inference_steps: int = 75, guidance_scale: float = 9.0, noise_level: int = 50, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, tile_size: int = 128, tile_border: int = 32, original_image_slice: int = 32, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.FloatTensor`): `Image`, or tensor representing an image batch which will be upscaled. * num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. tile_size (`int`, *optional*): The size of the tiles. Too big can result in an OOM-error. tile_border (`int`, *optional*): The number of pixels around a tile to consider (bigger means less seams, too big can lead to an OOM-error). original_image_slice (`int`, *optional*): The amount of pixels of the original image to calculate with the current tile (bigger means more depth is preserved, less blur occurs in the final image, too big can lead to an OOM-error or loss in detail). callback (`Callable`, *optional*): A function that take a callback function with a single argument, a dict, that contains the (partially) processed image under "image", as well as the progress (0 to 1, where 1 is completed) under "progress". Returns: A PIL.Image that is 4 times larger than the original input image. """ final_image = Image.new("RGB", (image.size[0] * 4, image.size[1] * 4)) tcx = math.ceil(image.size[0] / tile_size) tcy = math.ceil(image.size[1] / tile_size) total_tile_count = tcx * tcy current_count = 0 for y in range(tcy): for x in range(tcx): self._process_tile( original_image_slice, x, y, tile_size, tile_border, image, final_image, prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, noise_level=noise_level, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image}) return final_image def main(): # Run a demo model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionTiledUpscalePipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16) pipe = pipe.to("cuda") image = Image.open("../../docs/source/imgs/diffusers_library.jpg") def callback(obj): print(f"progress: {obj['progress']:.4f}") obj["image"].save("diffusers_library_progress.jpg") final_image = pipe(image=image, prompt="Black font, white background, vector", noise_level=40, callback=callback) final_image.save("diffusers_library.jpg") if __name__ == "__main__": main()
diffusers/examples/community/tiled_upscaling.py/0
{ "file_path": "diffusers/examples/community/tiled_upscaling.py", "repo_id": "diffusers", "token_count": 5905 }
100
# Kandinsky2.2 text-to-image fine-tuning Kandinsky 2.2 includes a prior pipeline that generates image embeddings from text prompts, and a decoder pipeline that generates the output image based on the image embeddings. We provide `train_text_to_image_prior.py` and `train_text_to_image_decoder.py` scripts to show you how to fine-tune the Kandinsky prior and decoder models separately based on your own dataset. To achieve the best results, you should fine-tune **_both_** your prior and decoder models. ___Note___: ___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset.___ ## Running locally with PyTorch Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the --push_to_hub flag. ___ ### Pokemon example For all our examples, we will directly store the trained weights on the Hub, so we need to be logged in and add the `--push_to_hub` flag. In order to do that, you have to be a registered user on the 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to the [User Access Tokens](https://huggingface.co/docs/hub/security-tokens) guide. Run the following command to authenticate your token ```bash huggingface-cli login ``` We also use [Weights and Biases](https://docs.wandb.ai/quickstart) logging by default, because it is really useful to monitor the training progress by regularly generating sample images during training. To install wandb, run ```bash pip install wandb ``` To disable wandb logging, remove the `--report_to=="wandb"` and `--validation_prompts="A robot pokemon, 4k photo"` flags from below examples #### Fine-tune decoder <br> <!-- accelerate_snippet_start --> ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --validation_prompts="A robot pokemon, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-decoder-pokemon-model" ``` <!-- accelerate_snippet_end --> To train on your own training files, prepare the dataset according to the format required by `datasets`. You can find the instructions for how to do that in the [ImageFolder with metadata](https://huggingface.co/docs/datasets/en/image_load#imagefolder-with-metadata) guide. If you wish to use custom loading logic, you should modify the script and we have left pointers for that in the training script. ```bash export TRAIN_DIR="path_to_your_dataset" accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \ --train_data_dir=$TRAIN_DIR \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --validation_prompts="A robot pokemon, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi22-decoder-pokemon-model" ``` Once the training is finished the model will be saved in the `output_dir` specified in the command. In this example it's `kandi22-decoder-pokemon-model`. To load the fine-tuned model for inference just pass that path to `AutoPipelineForText2Image` ```python from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained(output_dir, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt='A robot pokemon, 4k photo' images = pipe(prompt=prompt).images images[0].save("robot-pokemon.png") ``` Checkpoints only save the unet, so to run inference from a checkpoint, just load the unet ```python from diffusers import AutoPipelineForText2Image, UNet2DConditionModel model_path = "path_to_saved_model" unet = UNet2DConditionModel.from_pretrained(model_path + "/checkpoint-<N>/unet") pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() image = pipe(prompt="A robot pokemon, 4k photo").images[0] image.save("robot-pokemon.png") ``` #### Fine-tune prior You can fine-tune the Kandinsky prior model with `train_text_to_image_prior.py` script. Note that we currently do not support `--gradient_checkpointing` for prior model fine-tuning. <br> <!-- accelerate_snippet_start --> ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --validation_prompts="A robot pokemon, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-prior-pokemon-model" ``` <!-- accelerate_snippet_end --> To perform inference with the fine-tuned prior model, you will need to first create a prior pipeline by passing the `output_dir` to `DiffusionPipeline`. Then create a `KandinskyV22CombinedPipeline` from a pretrained or fine-tuned decoder checkpoint along with all the modules of the prior pipeline you just created. ```python from diffusers import AutoPipelineForText2Image, DiffusionPipeline import torch pipe_prior = DiffusionPipeline.from_pretrained(output_dir, torch_dtype=torch.float16) prior_components = {"prior_" + k: v for k,v in pipe_prior.components.items()} pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt='A robot pokemon, 4k photo' images = pipe(prompt=prompt, negative_prompt=negative_prompt).images images[0] ``` If you want to use a fine-tuned decoder checkpoint along with your fine-tuned prior checkpoint, you can simply replace the "kandinsky-community/kandinsky-2-2-decoder" in above code with your custom model repo name. Note that in order to be able to create a `KandinskyV22CombinedPipeline`, your model repository need to have a prior tag. If you have created your model repo using our training script, the prior tag is automatically included. #### Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" --multi_gpu train_text_to_image_decoder.py \ --dataset_name=$DATASET_NAME \ --resolution=768 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --checkpoints_total_limit=3 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --validation_prompts="A robot pokemon, 4k photo" \ --report_to="wandb" \ --push_to_hub \ --output_dir="kandi2-decoder-pokemon-model" ``` #### Training with Min-SNR weighting We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps achieve faster convergence by rebalancing the loss. Enable the `--snr_gamma` argument and set it to the recommended value of 5.0. ## Training with LoRA Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. With LoRA, it's possible to fine-tune Kandinsky 2.2 on a custom image-caption pair dataset on consumer GPUs like Tesla T4, Tesla V100. ### Training First, you need to set up your development environment as explained in the [installation](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables. Here, we will use [Kandinsky 2.2](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder) and the [Pokemons dataset](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions). #### Train decoder ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_decoder_lora.py \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=768 \ --train_batch_size=1 \ --num_train_epochs=100 --checkpointing_steps=5000 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --seed=42 \ --rank=4 \ --gradient_checkpointing \ --output_dir="kandi22-decoder-pokemon-lora" \ --validation_prompt="cute dragon creature" --report_to="wandb" \ --push_to_hub \ ``` #### Train prior ```bash export DATASET_NAME="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image_prior_lora.py \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=768 \ --train_batch_size=1 \ --num_train_epochs=100 --checkpointing_steps=5000 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --seed=42 \ --rank=4 \ --output_dir="kandi22-prior-pokemon-lora" \ --validation_prompt="cute dragon creature" --report_to="wandb" \ --push_to_hub \ ``` **___Note: When using LoRA we can use a much higher learning rate compared to non-LoRA fine-tuning. Here we use *1e-4* instead of the usual *1e-5*. Also, by using LoRA, it's possible to run above scripts in consumer GPUs like T4 or V100.___** ### Inference #### Inference using fine-tuned LoRA checkpoint for decoder Once you have trained a Kandinsky decoder model using the above command, inference can be done with the `AutoPipelineForText2Image` after loading the trained LoRA weights. You need to pass the `output_dir` for loading the LoRA weights, which in this case is `kandi22-decoder-pokemon-lora`. ```python from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16) pipe.unet.load_attn_procs(output_dir) pipe.enable_model_cpu_offload() prompt='A robot pokemon, 4k photo' image = pipe(prompt=prompt).images[0] image.save("robot_pokemon.png") ``` #### Inference using fine-tuned LoRA checkpoint for prior ```python from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16) pipe.prior_prior.load_attn_procs(output_dir) pipe.enable_model_cpu_offload() prompt='A robot pokemon, 4k photo' image = pipe(prompt=prompt).images[0] image.save("robot_pokemon.png") image ``` ### Training with xFormers: You can enable memory efficient attention by [installing xFormers](https://huggingface.co/docs/diffusers/main/en/optimization/xformers) and passing the `--enable_xformers_memory_efficient_attention` argument to the script. xFormers training is not available for fine-tuning the prior model. **Note**: According to [this issue](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212), xFormers `v0.0.16` cannot be used for training in some GPUs. If you observe that problem, please install a development version as indicated in that comment.
diffusers/examples/kandinsky2_2/text_to_image/README.md/0
{ "file_path": "diffusers/examples/kandinsky2_2/text_to_image/README.md", "repo_id": "diffusers", "token_count": 4394 }
101
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import functools import gc import itertools import json import logging import math import os import random import shutil from pathlib import Path from typing import List, Optional, Union import accelerate import cv2 import numpy as np import torch import torch.utils.checkpoint import transformers import webdataset as wds from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from braceexpand import braceexpand from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from torch.utils.data import default_collate from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, DPTFeatureExtractor, DPTForDepthEstimation, PretrainedConfig from webdataset.tariterators import ( base_plus_ext, tar_file_expander, url_opener, valid_sample, ) import diffusers from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetPipeline, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available MAX_SEQ_LENGTH = 77 if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.18.0.dev0") logger = get_logger(__name__) def filter_keys(key_set): def _f(dictionary): return {k: v for k, v in dictionary.items() if k in key_set} return _f def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None): """Return function over iterator that groups key, value pairs into samples. :param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to lower case (Default value = True) """ current_sample = None for filesample in data: assert isinstance(filesample, dict) fname, value = filesample["fname"], filesample["data"] prefix, suffix = keys(fname) if prefix is None: continue if lcase: suffix = suffix.lower() # FIXME webdataset version throws if suffix in current_sample, but we have a potential for # this happening in the current LAION400m dataset if a tar ends with same prefix as the next # begins, rare, but can happen since prefix aren't unique across tar files in that dataset if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample: if valid_sample(current_sample): yield current_sample current_sample = {"__key__": prefix, "__url__": filesample["__url__"]} if suffixes is None or suffix in suffixes: current_sample[suffix] = value if valid_sample(current_sample): yield current_sample def tarfile_to_samples_nothrow(src, handler=wds.warn_and_continue): # NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw streams = url_opener(src, handler=handler) files = tar_file_expander(streams, handler=handler) samples = group_by_keys_nothrow(files, handler=handler) return samples def control_transform(image): image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) control_image = Image.fromarray(image) return control_image def canny_image_transform(example, resolution=1024): image = example["image"] image = transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BILINEAR)(image) # get crop coordinates c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(resolution, resolution)) image = transforms.functional.crop(image, c_top, c_left, resolution, resolution) control_image = control_transform(image) image = transforms.ToTensor()(image) image = transforms.Normalize([0.5], [0.5])(image) control_image = transforms.ToTensor()(control_image) example["image"] = image example["control_image"] = control_image example["crop_coords"] = (c_top, c_left) return example def depth_image_transform(example, feature_extractor, resolution=1024): image = example["image"] image = transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BILINEAR)(image) # get crop coordinates c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(resolution, resolution)) image = transforms.functional.crop(image, c_top, c_left, resolution, resolution) control_image = feature_extractor(images=image, return_tensors="pt").pixel_values.squeeze(0) image = transforms.ToTensor()(image) image = transforms.Normalize([0.5], [0.5])(image) example["image"] = image example["control_image"] = control_image example["crop_coords"] = (c_top, c_left) return example class WebdatasetFilter: def __init__(self, min_size=1024, max_pwatermark=0.5): self.min_size = min_size self.max_pwatermark = max_pwatermark def __call__(self, x): try: if "json" in x: x_json = json.loads(x["json"]) filter_size = (x_json.get("original_width", 0.0) or 0.0) >= self.min_size and x_json.get( "original_height", 0 ) >= self.min_size filter_watermark = (x_json.get("pwatermark", 1.0) or 1.0) <= self.max_pwatermark return filter_size and filter_watermark else: return False except Exception: return False class Text2ImageDataset: def __init__( self, train_shards_path_or_url: Union[str, List[str]], eval_shards_path_or_url: Union[str, List[str]], num_train_examples: int, per_gpu_batch_size: int, global_batch_size: int, num_workers: int, resolution: int = 256, center_crop: bool = True, random_flip: bool = False, shuffle_buffer_size: int = 1000, pin_memory: bool = False, persistent_workers: bool = False, control_type: str = "canny", feature_extractor: Optional[DPTFeatureExtractor] = None, ): if not isinstance(train_shards_path_or_url, str): train_shards_path_or_url = [list(braceexpand(urls)) for urls in train_shards_path_or_url] # flatten list using itertools train_shards_path_or_url = list(itertools.chain.from_iterable(train_shards_path_or_url)) if not isinstance(eval_shards_path_or_url, str): eval_shards_path_or_url = [list(braceexpand(urls)) for urls in eval_shards_path_or_url] # flatten list using itertools eval_shards_path_or_url = list(itertools.chain.from_iterable(eval_shards_path_or_url)) def get_orig_size(json): return (int(json.get("original_width", 0.0)), int(json.get("original_height", 0.0))) if control_type == "canny": image_transform = functools.partial(canny_image_transform, resolution=resolution) elif control_type == "depth": image_transform = functools.partial( depth_image_transform, feature_extractor=feature_extractor, resolution=resolution ) processing_pipeline = [ wds.decode("pil", handler=wds.ignore_and_continue), wds.rename( image="jpg;png;jpeg;webp", control_image="jpg;png;jpeg;webp", text="text;txt;caption", orig_size="json", handler=wds.warn_and_continue, ), wds.map(filter_keys({"image", "control_image", "text", "orig_size"})), wds.map_dict(orig_size=get_orig_size), wds.map(image_transform), wds.to_tuple("image", "control_image", "text", "orig_size", "crop_coords"), ] # Create train dataset and loader pipeline = [ wds.ResampledShards(train_shards_path_or_url), tarfile_to_samples_nothrow, wds.select(WebdatasetFilter(min_size=512)), wds.shuffle(shuffle_buffer_size), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=False, collation_fn=default_collate), ] num_worker_batches = math.ceil(num_train_examples / (global_batch_size * num_workers)) # per dataloader worker num_batches = num_worker_batches * num_workers num_samples = num_batches * global_batch_size # each worker is iterating over this self._train_dataset = wds.DataPipeline(*pipeline).with_epoch(num_worker_batches) self._train_dataloader = wds.WebLoader( self._train_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers, ) # add meta-data to dataloader instance for convenience self._train_dataloader.num_batches = num_batches self._train_dataloader.num_samples = num_samples # Create eval dataset and loader pipeline = [ wds.SimpleShardList(eval_shards_path_or_url), wds.split_by_worker, wds.tarfile_to_samples(handler=wds.ignore_and_continue), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=False, collation_fn=default_collate), ] self._eval_dataset = wds.DataPipeline(*pipeline) self._eval_dataloader = wds.WebLoader( self._eval_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers, ) @property def train_dataset(self): return self._train_dataset @property def train_dataloader(self): return self._train_dataloader @property def eval_dataset(self): return self._eval_dataset @property def eval_dataloader(self): return self._eval_dataloader def image_grid(imgs, rows, cols): assert len(imgs) == rows * cols w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid def log_validation(vae, unet, controlnet, args, accelerator, weight_dtype, step): logger.info("Running validation... ") controlnet = accelerator.unwrap_model(controlnet) pipeline = StableDiffusionXLControlNetPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=vae, unet=unet, controlnet=controlnet, revision=args.revision, torch_dtype=weight_dtype, ) # pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if len(args.validation_image) == len(args.validation_prompt): validation_images = args.validation_image validation_prompts = args.validation_prompt elif len(args.validation_image) == 1: validation_images = args.validation_image * len(args.validation_prompt) validation_prompts = args.validation_prompt elif len(args.validation_prompt) == 1: validation_images = args.validation_image validation_prompts = args.validation_prompt * len(args.validation_image) else: raise ValueError( "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`" ) image_logs = [] for validation_prompt, validation_image in zip(validation_prompts, validation_images): validation_image = Image.open(validation_image).convert("RGB") validation_image = validation_image.resize((args.resolution, args.resolution)) images = [] for _ in range(args.num_validation_images): with torch.autocast("cuda"): image = pipeline( validation_prompt, image=validation_image, num_inference_steps=20, generator=generator ).images[0] images.append(image) image_logs.append( {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} ) for tracker in accelerator.trackers: if tracker.name == "tensorboard": for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] formatted_images = [] formatted_images.append(np.asarray(validation_image)) for image in images: formatted_images.append(np.asarray(image)) formatted_images = np.stack(formatted_images) tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") elif tracker.name == "wandb": formatted_images = [] for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] formatted_images.append(wandb.Image(validation_image, caption="Controlnet conditioning")) for image in images: image = wandb.Image(image, caption=validation_prompt) formatted_images.append(image) tracker.log({"validation": formatted_images}) else: logger.warn(f"image logging not implemented for {tracker.name}") del pipeline gc.collect() torch.cuda.empty_cache() return image_logs def import_model_class_from_model_name_or_path( pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" ): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder=subfolder, revision=revision ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "CLIPTextModelWithProjection": from transformers import CLIPTextModelWithProjection return CLIPTextModelWithProjection else: raise ValueError(f"{model_class} is not supported.") def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None): img_str = "" if image_logs is not None: img_str = "You can find some example images below.\n" for i, log in enumerate(image_logs): images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] validation_image.save(os.path.join(repo_folder, "image_control.png")) img_str += f"prompt: {validation_prompt}\n" images = [validation_image] + images image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png")) img_str += f"![images_{i})](./images_{i}.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {base_model} tags: - stable-diffusion-xl - stable-diffusion-xl-diffusers - text-to-image - diffusers - controlnet inference: true --- """ model_card = f""" # controlnet-{repo_id} These are controlnet weights trained on {base_model} with new type of conditioning. {img_str} """ with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_vae_model_name_or_path", type=str, default=None, help="Path to an improved VAE to stabilize training. For more details check out: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( "--controlnet_model_name_or_path", type=str, default=None, help="Path to pretrained controlnet model or model identifier from huggingface.co/models." " If not specified controlnet weights are initialized from unet.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help=( "Revision of pretrained model identifier from huggingface.co/models. Trainable model components should be" " float32 precision." ), ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--output_dir", type=str, default="controlnet-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--crops_coords_top_left_h", type=int, default=0, help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), ) parser.add_argument( "--crops_coords_top_left_w", type=int, default=0, help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" "instructions." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=3, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--dataloader_num_workers", type=int, default=1, help=("Number of subprocesses to use for data loading."), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--set_grads_to_none", action="store_true", help=( "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" " behaviors, so disable this argument if it causes any problems. More info:" " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" ), ) parser.add_argument( "--train_shards_path_or_url", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--eval_shards_path_or_url", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image." ) parser.add_argument( "--conditioning_image_column", type=str, default="conditioning_image", help="The column of the dataset containing the controlnet conditioning image.", ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) parser.add_argument( "--validation_prompt", type=str, default=None, nargs="+", help=( "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." " Provide either a matching number of `--validation_image`s, a single `--validation_image`" " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." ), ) parser.add_argument( "--validation_image", type=str, default=None, nargs="+", help=( "A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`" " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" " `--validation_image` that will be used with all `--validation_prompt`s." ), ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument( "--tracker_project_name", type=str, default="sd_xl_train_controlnet", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) parser.add_argument( "--control_type", type=str, default="canny", help=("The type of controlnet conditioning image to use. One of `canny`, `depth`" " Defaults to `canny`."), ) parser.add_argument( "--transformer_layers_per_block", type=str, default=None, help=("The number of layers per block in the transformer. If None, defaults to" " `args.transformer_layers`."), ) parser.add_argument( "--old_style_controlnet", action="store_true", default=False, help=( "Use the old style controlnet, which is a single transformer layer with" " a single head. Defaults to False." ), ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") if args.validation_prompt is not None and args.validation_image is None: raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") if args.validation_prompt is None and args.validation_image is not None: raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") if ( args.validation_image is not None and args.validation_prompt is not None and len(args.validation_image) != 1 and len(args.validation_prompt) != 1 and len(args.validation_image) != len(args.validation_prompt) ): raise ValueError( "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," " or the same number of `--validation_prompt`s and `--validation_image`s" ) if args.resolution % 8 != 0: raise ValueError( "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the controlnet encoder." ) return args # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt def encode_prompt(prompt_batch, text_encoders, tokenizers, proportion_empty_prompts, is_train=True): prompt_embeds_list = [] captions = [] for caption in prompt_batch: if random.random() < proportion_empty_prompts: captions.append("") elif isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) with torch.no_grad(): for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( captions, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder( text_input_ids.to(text_encoder.device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) return prompt_embeds, pooled_prompt_embeds def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token, private=True, ).repo_id # Load the tokenizers tokenizer_one = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False ) tokenizer_two = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False ) # import correct text encoder classes text_encoder_cls_one = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision ) text_encoder_cls_two = import_model_class_from_model_name_or_path( args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" ) # Load scheduler and models # noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") noise_scheduler = EulerDiscreteScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder_one = text_encoder_cls_one.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) text_encoder_two = text_encoder_cls_two.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision ) vae_path = ( args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path ) vae = AutoencoderKL.from_pretrained( vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) if args.controlnet_model_name_or_path: logger.info("Loading existing controlnet weights") pre_controlnet = ControlNetModel.from_pretrained(args.controlnet_model_name_or_path) else: logger.info("Initializing controlnet weights from unet") pre_controlnet = ControlNetModel.from_unet(unet) if args.transformer_layers_per_block is not None: transformer_layers_per_block = [int(x) for x in args.transformer_layers_per_block.split(",")] down_block_types = ["DownBlock2D" if l == 0 else "CrossAttnDownBlock2D" for l in transformer_layers_per_block] controlnet = ControlNetModel.from_config( pre_controlnet.config, down_block_types=down_block_types, transformer_layers_per_block=transformer_layers_per_block, ) controlnet.load_state_dict(pre_controlnet.state_dict(), strict=False) del pre_controlnet else: controlnet = pre_controlnet if args.control_type == "depth": feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas") depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas") depth_model.requires_grad_(False) else: feature_extractor = None depth_model = None # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: i = len(weights) - 1 while len(weights) > 0: weights.pop() model = models[i] sub_dir = "controlnet" model.save_pretrained(os.path.join(output_dir, sub_dir)) i -= 1 def load_model_hook(models, input_dir): while len(models) > 0: # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = ControlNetModel.from_pretrained(input_dir, subfolder="controlnet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) controlnet.train() if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warn( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: controlnet.enable_gradient_checkpointing() # Check that all trainable models are in full precision low_precision_error_string = ( " Please make sure to always have all model weights in full float32 precision when starting training - even if" " doing mixed precision training, copy of the weights should still be float32." ) if accelerator.unwrap_model(controlnet).dtype != torch.float32: raise ValueError( f"Controlnet loaded as datatype {accelerator.unwrap_model(controlnet).dtype}. {low_precision_error_string}" ) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = controlnet.parameters() optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae, unet and text_encoder to device and cast to weight_dtype # The VAE is in float32 to avoid NaN losses. if args.pretrained_vae_model_name_or_path is not None: vae.to(accelerator.device, dtype=weight_dtype) else: vae.to(accelerator.device, dtype=torch.float32) unet.to(accelerator.device, dtype=weight_dtype) text_encoder_one.to(accelerator.device, dtype=weight_dtype) text_encoder_two.to(accelerator.device, dtype=weight_dtype) if args.control_type == "depth": depth_model.to(accelerator.device, dtype=weight_dtype) # Here, we compute not just the text embeddings but also the additional embeddings # needed for the SD XL UNet to operate. def compute_embeddings( prompt_batch, original_sizes, crop_coords, proportion_empty_prompts, text_encoders, tokenizers, is_train=True ): target_size = (args.resolution, args.resolution) original_sizes = list(map(list, zip(*original_sizes))) crops_coords_top_left = list(map(list, zip(*crop_coords))) original_sizes = torch.tensor(original_sizes, dtype=torch.long) crops_coords_top_left = torch.tensor(crops_coords_top_left, dtype=torch.long) # crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w) prompt_embeds, pooled_prompt_embeds = encode_prompt( prompt_batch, text_encoders, tokenizers, proportion_empty_prompts, is_train ) add_text_embeds = pooled_prompt_embeds # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids # add_time_ids = list(crops_coords_top_left + target_size) add_time_ids = list(target_size) add_time_ids = torch.tensor([add_time_ids]) add_time_ids = add_time_ids.repeat(len(prompt_batch), 1) # add_time_ids = torch.cat([torch.tensor(original_sizes, dtype=torch.long), add_time_ids], dim=-1) add_time_ids = torch.cat([original_sizes, crops_coords_top_left, add_time_ids], dim=-1) add_time_ids = add_time_ids.to(accelerator.device, dtype=prompt_embeds.dtype) prompt_embeds = prompt_embeds.to(accelerator.device) add_text_embeds = add_text_embeds.to(accelerator.device) unet_added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} return {"prompt_embeds": prompt_embeds, **unet_added_cond_kwargs} def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): sigmas = noise_scheduler.sigmas.to(device=accelerator.device, dtype=dtype) schedule_timesteps = noise_scheduler.timesteps.to(accelerator.device) timesteps = timesteps.to(accelerator.device) step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < n_dim: sigma = sigma.unsqueeze(-1) return sigma dataset = Text2ImageDataset( train_shards_path_or_url=args.train_shards_path_or_url, eval_shards_path_or_url=args.eval_shards_path_or_url, num_train_examples=args.max_train_samples, per_gpu_batch_size=args.train_batch_size, global_batch_size=args.train_batch_size * accelerator.num_processes, num_workers=args.dataloader_num_workers, resolution=args.resolution, center_crop=False, random_flip=False, shuffle_buffer_size=1000, pin_memory=True, persistent_workers=True, control_type=args.control_type, feature_extractor=feature_extractor, ) train_dataloader = dataset.train_dataloader # Let's first compute all the embeddings so that we can free up the text encoders # from memory. text_encoders = [text_encoder_one, text_encoder_two] tokenizers = [tokenizer_one, tokenizer_two] compute_embeddings_fn = functools.partial( compute_embeddings, proportion_empty_prompts=args.proportion_empty_prompts, text_encoders=text_encoders, tokenizers=tokenizers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. controlnet, optimizer, lr_scheduler = accelerator.prepare(controlnet, optimizer, lr_scheduler) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) # tensorboard cannot handle list types for config tracker_config.pop("validation_prompt") tracker_config.pop("validation_image") accelerator.init_trackers(args.tracker_project_name, config=tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num batches each epoch = {train_dataloader.num_batches}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) image_logs = None for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): with accelerator.accumulate(controlnet): image, control_image, text, orig_size, crop_coords = batch encoded_text = compute_embeddings_fn(text, orig_size, crop_coords) image = image.to(accelerator.device, non_blocking=True) control_image = control_image.to(accelerator.device, non_blocking=True) if args.pretrained_vae_model_name_or_path is not None: pixel_values = image.to(dtype=weight_dtype) if vae.dtype != weight_dtype: vae.to(dtype=weight_dtype) else: pixel_values = image # latents = vae.encode(pixel_values).latent_dist.sample() # encode pixel values with batch size of at most 8 latents = [] for i in range(0, pixel_values.shape[0], 8): latents.append(vae.encode(pixel_values[i : i + 8]).latent_dist.sample()) latents = torch.cat(latents, dim=0) latents = latents * vae.config.scaling_factor if args.pretrained_vae_model_name_or_path is None: latents = latents.to(weight_dtype) if args.control_type == "depth": control_image = control_image.to(weight_dtype) with torch.autocast("cuda"): depth_map = depth_model(control_image).predicted_depth depth_map = torch.nn.functional.interpolate( depth_map.unsqueeze(1), size=image.shape[2:], mode="bicubic", align_corners=False, ) depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) depth_map = (depth_map - depth_min) / (depth_max - depth_min) control_image = (depth_map * 255.0).to(torch.uint8).float() / 255.0 # hack to match inference control_image = torch.cat([control_image] * 3, dim=1) # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) sigmas = get_sigmas(timesteps, len(noisy_latents.shape), noisy_latents.dtype) inp_noisy_latents = noisy_latents / ((sigmas**2 + 1) ** 0.5) # ControlNet conditioning. controlnet_image = control_image.to(dtype=weight_dtype) prompt_embeds = encoded_text.pop("prompt_embeds") down_block_res_samples, mid_block_res_sample = controlnet( inp_noisy_latents, timesteps, encoder_hidden_states=prompt_embeds, added_cond_kwargs=encoded_text, controlnet_cond=controlnet_image, return_dict=False, ) # Predict the noise residual model_pred = unet( inp_noisy_latents, timesteps, encoder_hidden_states=prompt_embeds, added_cond_kwargs=encoded_text, down_block_additional_residuals=[ sample.to(dtype=weight_dtype) for sample in down_block_res_samples ], mid_block_additional_residual=mid_block_res_sample.to(dtype=weight_dtype), ).sample model_pred = model_pred * (-sigmas) + noisy_latents weighing = sigmas**-2.0 # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = latents # compute loss against the denoised latents elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = torch.mean( (weighing.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), 1 ) loss = loss.mean() accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = controlnet.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=args.set_grads_to_none) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if args.validation_prompt is not None and global_step % args.validation_steps == 0: image_logs = log_validation( vae, unet, controlnet, args, accelerator, weight_dtype, global_step ) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: controlnet = accelerator.unwrap_model(controlnet) controlnet.save_pretrained(args.output_dir) if args.push_to_hub: save_model_card( repo_id, image_logs=image_logs, base_model=args.pretrained_model_name_or_path, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/controlnet/train_controlnet_webdataset.py/0
{ "file_path": "diffusers/examples/research_projects/controlnet/train_controlnet_webdataset.py", "repo_id": "diffusers", "token_count": 25866 }
102
# InstructPix2Pix text-to-edit-image fine-tuning This extended LoRA training script was authored by [Aiden-Frost](https://github.com/Aiden-Frost). This is an experimental LoRA extension of [this example](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py). This script provides further support add LoRA layers for unet model. ## Training script example ```bash export MODEL_ID="timbrooks/instruct-pix2pix" export DATASET_ID="instruction-tuning-sd/cartoonization" export OUTPUT_DIR="instructPix2Pix-cartoonization" accelerate launch finetune_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_ID \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=2 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --lr_warmup_steps=0 \ --val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \ --validation_prompt="Generate a cartoonized version of the natural image" \ --seed=42 \ --rank=4 \ --output_dir=$OUTPUT_DIR \ --report_to=wandb \ --push_to_hub ``` ## Inference After training the model and the lora weight of the model is stored in the ```$OUTPUT_DIR```. ```bash # load the base model pipeline pipe_lora = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix") # Load LoRA weights from the provided path output_dir = "path/to/lora_weight_directory" pipe_lora.unet.load_attn_procs(output_dir) input_image_path = "/path/to/input_image" input_image = Image.open(input_image_path) edited_images = pipe_lora(num_images_per_prompt=1, prompt=args.edit_prompt, image=input_image, num_inference_steps=1000).images edited_images[0].show() ``` ## Results Here is an example of using the script to train a instructpix2pix model. Trained on google colab T4 GPU ```bash MODEL_ID="timbrooks/instruct-pix2pix" DATASET_ID="instruction-tuning-sd/cartoonization" TRAIN_EPOCHS=100 ``` Below are few examples for given the input image, edit_prompt and the edited_image (output of the model) <p align="center"> <img src="https://github.com/Aiden-Frost/Efficiently-teaching-counting-and-cartoonization-to-InstructPix2Pix.-/blob/main/diffusers_result_assets/edited_image_results.png?raw=true" alt="instructpix2pix-inputs" width=600/> </p> Here are some rough statistics about the training model using this script <p align="center"> <img src="https://github.com/Aiden-Frost/Efficiently-teaching-counting-and-cartoonization-to-InstructPix2Pix.-/blob/main/diffusers_result_assets/results.png?raw=true" alt="instructpix2pix-inputs" width=600/> </p> ## References * InstructPix2Pix - https://github.com/timothybrooks/instruct-pix2pix * Dataset and example training script - https://huggingface.co/blog/instruction-tuning-sd * For more information about the project - https://github.com/Aiden-Frost/Efficiently-teaching-counting-and-cartoonization-to-InstructPix2Pix.-
diffusers/examples/research_projects/instructpix2pix_lora/README.md/0
{ "file_path": "diffusers/examples/research_projects/instructpix2pix_lora/README.md", "repo_id": "diffusers", "token_count": 1124 }
103
import argparse import itertools import json import logging import math import uuid import warnings from os import environ, listdir, makedirs from os.path import basename, join from pathlib import Path from typing import List import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from huggingface_hub.utils import insecure_hashlib from PIL import Image from torch import dtype from torch.nn import Module from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") logger = get_logger(__name__) def log_validation_images_to_tracker( images: List[np.array], label: str, validation_prompt: str, accelerator: Accelerator, epoch: int ): logger.info(f"Logging images to tracker for validation prompt: {validation_prompt}.") for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{label}_{epoch}_{i}: {validation_prompt}") for i, image in enumerate(images) ] } ) # TODO: Add `prompt_embeds` and `negative_prompt_embeds` parameters to the function when `pre_compute_text_embeddings` # argument is implemented. def generate_validation_images( text_encoder: Module, tokenizer: Module, unet: Module, vae: Module, arguments: argparse.Namespace, accelerator: Accelerator, weight_dtype: dtype, ): logger.info("Running validation images.") pipeline_args = {} if text_encoder is not None: pipeline_args["text_encoder"] = accelerator.unwrap_model(text_encoder) if vae is not None: pipeline_args["vae"] = vae # create pipeline (note: unet and vae are loaded again in float32) pipeline = DiffusionPipeline.from_pretrained( arguments.pretrained_model_name_or_path, tokenizer=tokenizer, unet=accelerator.unwrap_model(unet), revision=arguments.revision, torch_dtype=weight_dtype, **pipeline_args, ) # We train on the simplified learning objective. If we were previously predicting a variance, we need the # scheduler to ignore it scheduler_args = {} if "variance_type" in pipeline.scheduler.config: variance_type = pipeline.scheduler.config.variance_type if variance_type in ["learned", "learned_range"]: variance_type = "fixed_small" scheduler_args["variance_type"] = variance_type pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) generator = ( None if arguments.seed is None else torch.Generator(device=accelerator.device).manual_seed(arguments.seed) ) images_sets = [] for vp, nvi, vnp, vis, vgs in zip( arguments.validation_prompt, arguments.validation_number_images, arguments.validation_negative_prompt, arguments.validation_inference_steps, arguments.validation_guidance_scale, ): images = [] if vp is not None: logger.info( f"Generating {nvi} images with prompt: '{vp}', negative prompt: '{vnp}', inference steps: {vis}, " f"guidance scale: {vgs}." ) pipeline_args = {"prompt": vp, "negative_prompt": vnp, "num_inference_steps": vis, "guidance_scale": vgs} # run inference # TODO: it would be good to measure whether it's faster to run inference on all images at once, one at a # time or in small batches for _ in range(nvi): with torch.autocast("cuda"): image = pipeline(**pipeline_args, num_images_per_prompt=1, generator=generator).images[0] images.append(image) images_sets.append(images) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() return images_sets def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=False, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=False, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--validation_steps", type=int, default=None, help=( "Run validation every X steps. Validation consists of running the prompt(s) `validation_prompt` " "multiple times (`validation_number_images`) and logging the images." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning. You can use commas to " "define multiple negative prompts. This parameter can be defined also within the file given by " "`concepts_list` parameter in the respective subject.", ) parser.add_argument( "--validation_number_images", type=int, default=4, help="Number of images that should be generated during validation with the validation parameters given. This " "can be defined within the file given by `concepts_list` parameter in the respective subject.", ) parser.add_argument( "--validation_negative_prompt", type=str, default=None, help="A negative prompt that is used during validation to verify that the model is learning. You can use commas" " to define multiple negative prompts, each one corresponding to a validation prompt. This parameter can " "be defined also within the file given by `concepts_list` parameter in the respective subject.", ) parser.add_argument( "--validation_inference_steps", type=int, default=25, help="Number of inference steps (denoising steps) to run during validation. This can be defined within the " "file given by `concepts_list` parameter in the respective subject.", ) parser.add_argument( "--validation_guidance_scale", type=float, default=7.5, help="To control how much the image generation process follows the text prompt. This can be defined within the " "file given by `concepts_list` parameter in the respective subject.", ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--set_grads_to_none", action="store_true", help=( "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" " behaviors, so disable this argument if it causes any problems. More info:" " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" ), ) parser.add_argument( "--concepts_list", type=str, default=None, help="Path to json file containing a list of multiple concepts, will overwrite parameters like instance_prompt," " class_prompt, etc.", ) if input_args: args = parser.parse_args(input_args) else: args = parser.parse_args() if not args.concepts_list and (not args.instance_data_dir or not args.instance_prompt): raise ValueError( "You must specify either instance parameters (data directory, prompt, etc.) or use " "the `concept_list` parameter and specify them within the file." ) if args.concepts_list: if args.instance_prompt: raise ValueError("If you are using `concepts_list` parameter, define the instance prompt within the file.") if args.instance_data_dir: raise ValueError( "If you are using `concepts_list` parameter, define the instance data directory within the file." ) if args.validation_steps and (args.validation_prompt or args.validation_negative_prompt): raise ValueError( "If you are using `concepts_list` parameter, define validation parameters for " "each subject within the file:\n - `validation_prompt`." "\n - `validation_negative_prompt`.\n - `validation_guidance_scale`." "\n - `validation_number_images`.\n - `validation_prompt`." "\n - `validation_inference_steps`.\nThe `validation_steps` parameter is the only one " "that needs to be defined outside the file." ) env_local_rank = int(environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if not args.concepts_list: if not args.class_data_dir: raise ValueError("You must specify a data directory for class images.") if not args.class_prompt: raise ValueError("You must specify prompt for class images.") else: if args.class_data_dir: raise ValueError( "If you are using `concepts_list` parameter, define the class data directory within the file." ) if args.class_prompt: raise ValueError( "If you are using `concepts_list` parameter, define the class prompt within the file." ) else: # logger is not available yet if not args.class_data_dir: warnings.warn( "Ignoring `class_data_dir` parameter, you need to use it together with `with_prior_preservation`." ) if not args.class_prompt: warnings.warn( "Ignoring `class_prompt` parameter, you need to use it together with `with_prior_preservation`." ) return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and then tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = [] self.instance_images_path = [] self.num_instance_images = [] self.instance_prompt = [] self.class_data_root = [] if class_data_root is not None else None self.class_images_path = [] self.num_class_images = [] self.class_prompt = [] self._length = 0 for i in range(len(instance_data_root)): self.instance_data_root.append(Path(instance_data_root[i])) if not self.instance_data_root[i].exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path.append(list(Path(instance_data_root[i]).iterdir())) self.num_instance_images.append(len(self.instance_images_path[i])) self.instance_prompt.append(instance_prompt[i]) self._length += self.num_instance_images[i] if class_data_root is not None: self.class_data_root.append(Path(class_data_root[i])) self.class_data_root[i].mkdir(parents=True, exist_ok=True) self.class_images_path.append(list(self.class_data_root[i].iterdir())) self.num_class_images.append(len(self.class_images_path)) if self.num_class_images[i] > self.num_instance_images[i]: self._length -= self.num_instance_images[i] self._length += self.num_class_images[i] self.class_prompt.append(class_prompt[i]) self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} for i in range(len(self.instance_images_path)): instance_image = Image.open(self.instance_images_path[i][index % self.num_instance_images[i]]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example[f"instance_images_{i}"] = self.image_transforms(instance_image) example[f"instance_prompt_ids_{i}"] = self.tokenizer( self.instance_prompt[i], truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids if self.class_data_root: for i in range(len(self.class_data_root)): class_image = Image.open(self.class_images_path[i][index % self.num_class_images[i]]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example[f"class_images_{i}"] = self.image_transforms(class_image) example[f"class_prompt_ids_{i}"] = self.tokenizer( self.class_prompt[i], truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def collate_fn(num_instances, examples, with_prior_preservation=False): input_ids = [] pixel_values = [] for i in range(num_instances): input_ids += [example[f"instance_prompt_ids_{i}"] for example in examples] pixel_values += [example[f"instance_images_{i}"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: for i in range(num_instances): input_ids += [example[f"class_prompt_ids_{i}"] for example in examples] pixel_values += [example[f"class_images_{i}"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch class PromptDataset(Dataset): """A simple dataset to prepare the prompts to generate class images on multiple GPUs.""" def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) instance_data_dir = [] instance_prompt = [] class_data_dir = [] if args.with_prior_preservation else None class_prompt = [] if args.with_prior_preservation else None if args.concepts_list: with open(args.concepts_list, "r") as f: concepts_list = json.load(f) if args.validation_steps: args.validation_prompt = [] args.validation_number_images = [] args.validation_negative_prompt = [] args.validation_inference_steps = [] args.validation_guidance_scale = [] for concept in concepts_list: instance_data_dir.append(concept["instance_data_dir"]) instance_prompt.append(concept["instance_prompt"]) if args.with_prior_preservation: try: class_data_dir.append(concept["class_data_dir"]) class_prompt.append(concept["class_prompt"]) except KeyError: raise KeyError( "`class_data_dir` or `class_prompt` not found in concepts_list while using " "`with_prior_preservation`." ) else: if "class_data_dir" in concept: warnings.warn( "Ignoring `class_data_dir` key, to use it you need to enable `with_prior_preservation`." ) if "class_prompt" in concept: warnings.warn( "Ignoring `class_prompt` key, to use it you need to enable `with_prior_preservation`." ) if args.validation_steps: args.validation_prompt.append(concept.get("validation_prompt", None)) args.validation_number_images.append(concept.get("validation_number_images", 4)) args.validation_negative_prompt.append(concept.get("validation_negative_prompt", None)) args.validation_inference_steps.append(concept.get("validation_inference_steps", 25)) args.validation_guidance_scale.append(concept.get("validation_guidance_scale", 7.5)) else: # Parse instance and class inputs, and double check that lengths match instance_data_dir = args.instance_data_dir.split(",") instance_prompt = args.instance_prompt.split(",") assert all( x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt)] ), "Instance data dir and prompt inputs are not of the same length." if args.with_prior_preservation: class_data_dir = args.class_data_dir.split(",") class_prompt = args.class_prompt.split(",") assert all( x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt), len(class_data_dir), len(class_prompt)] ), "Instance & class data dir or prompt inputs are not of the same length." if args.validation_steps: validation_prompts = args.validation_prompt.split(",") num_of_validation_prompts = len(validation_prompts) args.validation_prompt = validation_prompts args.validation_number_images = [args.validation_number_images] * num_of_validation_prompts negative_validation_prompts = [None] * num_of_validation_prompts if args.validation_negative_prompt: negative_validation_prompts = args.validation_negative_prompt.split(",") while len(negative_validation_prompts) < num_of_validation_prompts: negative_validation_prompts.append(None) args.validation_negative_prompt = negative_validation_prompts assert num_of_validation_prompts == len( negative_validation_prompts ), "The length of negative prompts for validation is greater than the number of validation prompts." args.validation_inference_steps = [args.validation_inference_steps] * num_of_validation_prompts args.validation_guidance_scale = [args.validation_guidance_scale] * num_of_validation_prompts # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: for i in range(len(class_data_dir)): class_images_dir = Path(class_data_dir[i]) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(class_prompt[i], num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for ii, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = ( class_images_dir / f"{example['index'][ii] + cur_class_images}-{hash_image}.jpg" ) image.save(image_filename) # Clean up the memory deleting one-time-use variables. del pipeline del sample_dataloader del sample_dataset if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer tokenizer = None if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.train_text_encoder: text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=instance_data_dir, instance_prompt=instance_prompt, class_data_root=class_data_dir, class_prompt=class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(len(instance_data_dir), examples, args.with_prior_preservation), num_workers=1, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initialize automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image time_steps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) time_steps = time_steps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, time_steps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, time_steps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, time_steps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=args.set_grads_to_none) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: save_path = join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if ( args.validation_steps and any(args.validation_prompt) and global_step % args.validation_steps == 0 ): images_set = generate_validation_images( text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype ) for images, validation_prompt in zip(images_set, args.validation_prompt): if len(images) > 0: label = str(uuid.uuid1())[:8] # generate an id for different set of images log_validation_images_to_tracker( images, label, validation_prompt, accelerator, global_step ) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py/0
{ "file_path": "diffusers/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py", "repo_id": "diffusers", "token_count": 21609 }
104
## Textual Inversion fine-tuning example [Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples. The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion. ## Running on Colab Colab for training [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) Colab for inference [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run: ```bash pip install -r requirements.txt ``` And initialize an [🤗 Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Cat toy example First, let's login so that we can upload the checkpoint to the Hub during training: ```bash huggingface-cli login ``` Now let's get our dataset. For this example we will use some cat images: https://huggingface.co/datasets/diffusers/cat_toy_example . Let's first download it locally: ```py from huggingface_hub import snapshot_download local_dir = "./cat" snapshot_download("diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes") ``` This will be our training data. Now we can launch the training using: **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** **___Note: Please follow the [README_sdxl.md](./README_sdxl.md) if you are using the [stable-diffusion-xl](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0).___** ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATA_DIR="./cat" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" \ --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 \ --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --push_to_hub \ --output_dir="textual_inversion_cat" ``` A full training run takes ~1 hour on one V100 GPU. **Note**: As described in [the official paper](https://arxiv.org/abs/2208.01618) only one embedding vector is used for the placeholder token, *e.g.* `"<cat-toy>"`. However, one can also add multiple embedding vectors for the placeholder token to increase the number of fine-tuneable parameters. This can help the model to learn more complex details. To use multiple embedding vectors, you should define `--num_vectors` to a number larger than one, *e.g.*: ```bash --num_vectors 5 ``` The saved textual inversion vectors will then be larger in size compared to the default case. ### Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt. ```python from diffusers import StableDiffusionPipeline import torch model_id = "path-to-your-trained-model" pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float16).to("cuda") prompt = "A <cat-toy> backpack" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("cat-backpack.png") ``` ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. Before running the scripts, make sure to install the library's training dependencies: ```bash pip install -U -r requirements_flax.txt ``` ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATA_DIR="path-to-dir-containing-images" python textual_inversion_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" \ --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 \ --scale_lr \ --output_dir="textual_inversion_cat" ``` It should be at least 70% faster than the PyTorch script with the same configuration. ### Training with xformers: You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation.
diffusers/examples/textual_inversion/README.md/0
{ "file_path": "diffusers/examples/textual_inversion/README.md", "repo_id": "diffusers", "token_count": 1736 }
105
# Script for converting a Hugging Face Diffusers trained SDXL LoRAs to Kohya format # This means that you can input your diffusers-trained LoRAs and # Get the output to work with WebUIs such as AUTOMATIC1111, ComfyUI, SD.Next and others. # To get started you can find some cool `diffusers` trained LoRAs such as this cute Corgy # https://huggingface.co/ignasbud/corgy_dog_LoRA/, download its `pytorch_lora_weights.safetensors` file # and run the script: # python convert_diffusers_sdxl_lora_to_webui.py --input_lora pytorch_lora_weights.safetensors --output_lora corgy.safetensors # now you can use corgy.safetensors in your WebUI of choice! # To train your own, here are some diffusers training scripts and utils that you can use and then convert: # LoRA Ease - no code SDXL Dreambooth LoRA trainer: https://huggingface.co/spaces/multimodalart/lora-ease # Dreambooth Advanced Training Script - state of the art techniques such as pivotal tuning and prodigy optimizer: # - Script: https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py # - Colab (only on Pro): https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb # Canonical diffusers training scripts: # - Script: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora_sdxl.py # - Colab (runs on free tier): https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_DreamBooth_LoRA_.ipynb import argparse import os from safetensors.torch import load_file, save_file from diffusers.utils import convert_all_state_dict_to_peft, convert_state_dict_to_kohya def convert_and_save(input_lora, output_lora=None): if output_lora is None: base_name = os.path.splitext(input_lora)[0] output_lora = f"{base_name}_webui.safetensors" diffusers_state_dict = load_file(input_lora) peft_state_dict = convert_all_state_dict_to_peft(diffusers_state_dict) kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict) save_file(kohya_state_dict, output_lora) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Convert LoRA model to PEFT and then to Kohya format.") parser.add_argument( "input_lora", type=str, help="Path to the input LoRA model file in the diffusers format.", ) parser.add_argument( "output_lora", type=str, nargs="?", help="Path for the converted LoRA (safetensors format for AUTOMATIC1111, ComfyUI, etc.). Optional, defaults to input name with a _webui suffix.", ) args = parser.parse_args() convert_and_save(args.input_lora, args.output_lora)
diffusers/scripts/convert_diffusers_sdxl_lora_to_webui.py/0
{ "file_path": "diffusers/scripts/convert_diffusers_sdxl_lora_to_webui.py", "repo_id": "diffusers", "token_count": 1013 }
106
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Conversion script for the NCSNPP checkpoints. """ import argparse import json import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel def convert_ncsnpp_checkpoint(checkpoint, config): """ Takes a state dict and the path to """ new_model_architecture = UNet2DModel(**config) new_model_architecture.time_proj.W.data = checkpoint["all_modules.0.W"].data new_model_architecture.time_proj.weight.data = checkpoint["all_modules.0.W"].data new_model_architecture.time_embedding.linear_1.weight.data = checkpoint["all_modules.1.weight"].data new_model_architecture.time_embedding.linear_1.bias.data = checkpoint["all_modules.1.bias"].data new_model_architecture.time_embedding.linear_2.weight.data = checkpoint["all_modules.2.weight"].data new_model_architecture.time_embedding.linear_2.bias.data = checkpoint["all_modules.2.bias"].data new_model_architecture.conv_in.weight.data = checkpoint["all_modules.3.weight"].data new_model_architecture.conv_in.bias.data = checkpoint["all_modules.3.bias"].data new_model_architecture.conv_norm_out.weight.data = checkpoint[list(checkpoint.keys())[-4]].data new_model_architecture.conv_norm_out.bias.data = checkpoint[list(checkpoint.keys())[-3]].data new_model_architecture.conv_out.weight.data = checkpoint[list(checkpoint.keys())[-2]].data new_model_architecture.conv_out.bias.data = checkpoint[list(checkpoint.keys())[-1]].data module_index = 4 def set_attention_weights(new_layer, old_checkpoint, index): new_layer.query.weight.data = old_checkpoint[f"all_modules.{index}.NIN_0.W"].data.T new_layer.key.weight.data = old_checkpoint[f"all_modules.{index}.NIN_1.W"].data.T new_layer.value.weight.data = old_checkpoint[f"all_modules.{index}.NIN_2.W"].data.T new_layer.query.bias.data = old_checkpoint[f"all_modules.{index}.NIN_0.b"].data new_layer.key.bias.data = old_checkpoint[f"all_modules.{index}.NIN_1.b"].data new_layer.value.bias.data = old_checkpoint[f"all_modules.{index}.NIN_2.b"].data new_layer.proj_attn.weight.data = old_checkpoint[f"all_modules.{index}.NIN_3.W"].data.T new_layer.proj_attn.bias.data = old_checkpoint[f"all_modules.{index}.NIN_3.b"].data new_layer.group_norm.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.weight"].data new_layer.group_norm.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.bias"].data def set_resnet_weights(new_layer, old_checkpoint, index): new_layer.conv1.weight.data = old_checkpoint[f"all_modules.{index}.Conv_0.weight"].data new_layer.conv1.bias.data = old_checkpoint[f"all_modules.{index}.Conv_0.bias"].data new_layer.norm1.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.weight"].data new_layer.norm1.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.bias"].data new_layer.conv2.weight.data = old_checkpoint[f"all_modules.{index}.Conv_1.weight"].data new_layer.conv2.bias.data = old_checkpoint[f"all_modules.{index}.Conv_1.bias"].data new_layer.norm2.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_1.weight"].data new_layer.norm2.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_1.bias"].data new_layer.time_emb_proj.weight.data = old_checkpoint[f"all_modules.{index}.Dense_0.weight"].data new_layer.time_emb_proj.bias.data = old_checkpoint[f"all_modules.{index}.Dense_0.bias"].data if new_layer.in_channels != new_layer.out_channels or new_layer.up or new_layer.down: new_layer.conv_shortcut.weight.data = old_checkpoint[f"all_modules.{index}.Conv_2.weight"].data new_layer.conv_shortcut.bias.data = old_checkpoint[f"all_modules.{index}.Conv_2.bias"].data for i, block in enumerate(new_model_architecture.downsample_blocks): has_attentions = hasattr(block, "attentions") for j in range(len(block.resnets)): set_resnet_weights(block.resnets[j], checkpoint, module_index) module_index += 1 if has_attentions: set_attention_weights(block.attentions[j], checkpoint, module_index) module_index += 1 if hasattr(block, "downsamplers") and block.downsamplers is not None: set_resnet_weights(block.resnet_down, checkpoint, module_index) module_index += 1 block.skip_conv.weight.data = checkpoint[f"all_modules.{module_index}.Conv_0.weight"].data block.skip_conv.bias.data = checkpoint[f"all_modules.{module_index}.Conv_0.bias"].data module_index += 1 set_resnet_weights(new_model_architecture.mid_block.resnets[0], checkpoint, module_index) module_index += 1 set_attention_weights(new_model_architecture.mid_block.attentions[0], checkpoint, module_index) module_index += 1 set_resnet_weights(new_model_architecture.mid_block.resnets[1], checkpoint, module_index) module_index += 1 for i, block in enumerate(new_model_architecture.up_blocks): has_attentions = hasattr(block, "attentions") for j in range(len(block.resnets)): set_resnet_weights(block.resnets[j], checkpoint, module_index) module_index += 1 if has_attentions: set_attention_weights( block.attentions[0], checkpoint, module_index ) # why can there only be a single attention layer for up? module_index += 1 if hasattr(block, "resnet_up") and block.resnet_up is not None: block.skip_norm.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data block.skip_norm.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data module_index += 1 block.skip_conv.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data block.skip_conv.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data module_index += 1 set_resnet_weights(block.resnet_up, checkpoint, module_index) module_index += 1 new_model_architecture.conv_norm_out.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data new_model_architecture.conv_norm_out.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data module_index += 1 new_model_architecture.conv_out.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data new_model_architecture.conv_out.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data return new_model_architecture.state_dict() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default="/Users/arthurzucker/Work/diffusers/ArthurZ/diffusion_pytorch_model.bin", type=str, required=False, help="Path to the checkpoint to convert.", ) parser.add_argument( "--config_file", default="/Users/arthurzucker/Work/diffusers/ArthurZ/config.json", type=str, required=False, help="The config json file corresponding to the architecture.", ) parser.add_argument( "--dump_path", default="/Users/arthurzucker/Work/diffusers/ArthurZ/diffusion_model_new.pt", type=str, required=False, help="Path to the output model.", ) args = parser.parse_args() checkpoint = torch.load(args.checkpoint_path, map_location="cpu") with open(args.config_file) as f: config = json.loads(f.read()) converted_checkpoint = convert_ncsnpp_checkpoint( checkpoint, config, ) if "sde" in config: del config["sde"] model = UNet2DModel(**config) model.load_state_dict(converted_checkpoint) try: scheduler = ScoreSdeVeScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) pipe = ScoreSdeVePipeline(unet=model, scheduler=scheduler) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
diffusers/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py", "repo_id": "diffusers", "token_count": 3608 }
107
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def onnx_export( model, model_args: tuple, output_path: Path, ordered_input_names, output_names, dynamic_axes, opset, use_external_data_format=False, ): output_path.parent.mkdir(parents=True, exist_ok=True) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( model, model_args, f=output_path.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, use_external_data_format=use_external_data_format, enable_onnx_checker=True, opset_version=opset, ) else: export( model, model_args, f=output_path.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset, ) @torch.no_grad() def convert_models(model_path: str, output_path: str, opset: int, fp16: bool = False): dtype = torch.float16 if fp16 else torch.float32 if fp16 and torch.cuda.is_available(): device = "cuda" elif fp16 and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA") else: device = "cpu" output_path = Path(output_path) # VAE DECODER vae_decoder = AutoencoderKL.from_pretrained(model_path + "/vae") vae_latent_channels = vae_decoder.config.latent_channels # forward only through the decoder part vae_decoder.forward = vae_decoder.decode onnx_export( vae_decoder, model_args=( torch.randn(1, vae_latent_channels, 25, 25).to(device=device, dtype=dtype), False, ), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, }, opset=opset, ) del vae_decoder if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") args = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fp16) print("SD: Done: ONNX")
diffusers/scripts/convert_vae_diff_to_onnx.py/0
{ "file_path": "diffusers/scripts/convert_vae_diff_to_onnx.py", "repo_id": "diffusers", "token_count": 1684 }
108
# 🧨 Diffusers Experimental We are adding experimental code to support novel applications and usages of the Diffusers library. Currently, the following experiments are supported: * Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model.
diffusers/src/diffusers/experimental/README.md/0
{ "file_path": "diffusers/src/diffusers/experimental/README.md", "repo_id": "diffusers", "token_count": 69 }
109
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict import torch class AttnProcsLayers(torch.nn.Module): def __init__(self, state_dict: Dict[str, torch.Tensor]): super().__init__() self.layers = torch.nn.ModuleList(state_dict.values()) self.mapping = dict(enumerate(state_dict.keys())) self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())} # .processor for unet, .self_attn for text encoder self.split_keys = [".processor", ".self_attn"] # we add a hook to state_dict() and load_state_dict() so that the # naming fits with `unet.attn_processors` def map_to(module, state_dict, *args, **kwargs): new_state_dict = {} for key, value in state_dict.items(): num = int(key.split(".")[1]) # 0 is always "layers" new_key = key.replace(f"layers.{num}", module.mapping[num]) new_state_dict[new_key] = value return new_state_dict def remap_key(key, state_dict): for k in self.split_keys: if k in key: return key.split(k)[0] + k raise ValueError( f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}." ) def map_from(module, state_dict, *args, **kwargs): all_keys = list(state_dict.keys()) for key in all_keys: replace_key = remap_key(key, state_dict) new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}") state_dict[new_key] = state_dict[key] del state_dict[key] self._register_state_dict_hook(map_to) self._register_load_state_dict_pre_hook(map_from, with_module=True)
diffusers/src/diffusers/loaders/utils.py/0
{ "file_path": "diffusers/src/diffusers/loaders/utils.py", "repo_id": "diffusers", "token_count": 1032 }
110