File size: 2,018 Bytes
f9567e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import ml_collections
from dataclasses import dataclass
@dataclass
class Args:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
model = Args(
latent_size = 64,
learn_sigma = False, # different from DiT, we direct predict noise here
channels = 4,
block_grad_to_lowres = False,
norm_type = "TDRMSN",
use_t2i = True,
clip_dim=4096,
num_clip_token=77,
gradient_checking=True, # for larger model
cfg_indicator=0.10,
textVAE = Args(
num_blocks = 11,
hidden_dim = 1024,
hidden_token_length = 256,
num_attention_heads = 8,
dropout_prob = 0.1,
),
)
def d(**kwargs):
"""Helper of creating a config dict."""
return ml_collections.ConfigDict(initial_dictionary=kwargs)
def get_config():
config = ml_collections.ConfigDict()
config.seed = 1234
config.z_shape = (4, 64, 64)
config.autoencoder = d(
pretrained_path='assets/stable-diffusion/autoencoder_kl.pth',
scale_factor=0.23010
)
config.train = d(
n_steps=1000000,
batch_size=1024,
mode='cond',
log_interval=10,
eval_interval=5000,
save_interval=50000,
)
config.optimizer = d(
name='adamw',
lr=0.00002,
weight_decay=0.03,
betas=(0.9, 0.9),
)
config.lr_scheduler = d(
name='customized',
warmup_steps=5000
)
global model
config.nnet = d(
name='dit',
model_args=model,
)
config.loss_coeffs = []
config.dataset = d(
name='JDB_demo_features',
resolution=512,
llm='t5',
train_path='/data/qihao/dataset/JDB_demo_feature/',
val_path='/data/qihao/dataset/coco_val_features/',
cfg=False
)
config.sample = d(
sample_steps=50,
n_samples=30000,
mini_batch_size=10,
cfg=False,
scale=7,
path=''
)
return config
|