Spaces:
Paused
Paused
File size: 2,083 Bytes
1c72248 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
---
job: extension
config:
name: "flux_lora_rami_v1"
process:
- type: 'sd_trainer'
training_folder: "output_flux_lora_rami"
trigger_word: "rami murad"
device: cuda:0
network:
type: "lora"
linear: 16
linear_alpha: 16
save:
dtype: float16
save_every: 250
max_step_saves_to_keep: 4
push_to_hub: false
datasets:
- folder_path: "ai-toolkit/images"
caption_ext: "txt"
caption_dropout_rate: 0.05
shuffle_tokens: false
cache_latents_to_disk: true
resolution: [1024]
train:
batch_size: 1
bypass_guidance_embedding: true
steps: 3000
gradient_accumulation: 1
train_unet: true
train_text_encoder: false
gradient_checkpointing: true
noise_scheduler: "flowmatch"
optimizer: "adamw8bit"
lr: 1e-4
dtype: fp16
disable_sampling: true
ema_config:
use_ema: true
ema_decay: 0.99
model:
name_or_path: "black-forest-labs/FLUX.1-dev"
is_flux: true
load_in_8bit: true
quantize: true
quantize_kwargs:
exclude:
- "*time_text_embed*"
sample:
sampler: "flowmatch"
sample_every: 250
width: 1024
height: 1024
prompts:
- "[trigger] smiling in front of a white background, headshot, studio lighting"
- "[trigger] wearing a suit, standing in a futuristic city, cinematic lighting"
- "[trigger] in a medieval outfit, standing in front of a castle"
- "[trigger] sitting at a wooden desk, writing in a notebook"
- "[trigger] relaxing at the beach during sunset, soft light"
- "[trigger] on stage giving a TED talk, spotlight"
- "[trigger] in a forest with sunbeams shining through the trees"
neg: ""
seed: 42
walk_seed: true
guidance_scale: 4
sample_steps: 25
meta:
name: "[name]"
version: '1.0'
|