Spaces:
Paused
Paused
Update train.py
Browse files
train.py
CHANGED
@@ -5,97 +5,64 @@ from diffusers import (
|
|
5 |
StableDiffusionPipeline,
|
6 |
DPMSolverMultistepScheduler,
|
7 |
AutoencoderKL,
|
8 |
-
UNet2DConditionModel
|
9 |
)
|
10 |
from transformers import CLIPTextModel, CLIPTokenizer
|
11 |
from peft import LoraConfig, get_peft_model
|
12 |
|
13 |
-
|
|
|
|
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
REPO_ID = "HiDream-ai/HiDream-I1-Dev"
|
19 |
|
20 |
-
#
|
21 |
-
|
22 |
-
print(f"π₯ Downloading full model snapshot to {MODEL_CACHE}")
|
23 |
-
MODEL_ROOT = snapshot_download(
|
24 |
-
repo_id=REPO_ID,
|
25 |
-
local_dir=MODEL_CACHE,
|
26 |
-
local_dir_use_symlinks=False, # force a copy so config.json ends up there
|
27 |
-
)
|
28 |
-
|
29 |
-
# βββ STEP 2: LOAD SCHEDULER ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
30 |
-
|
31 |
-
print("π Loading scheduler")
|
32 |
scheduler = DPMSolverMultistepScheduler.from_pretrained(
|
33 |
-
|
34 |
-
subfolder="scheduler",
|
35 |
)
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
print("π Loading VAE")
|
40 |
vae = AutoencoderKL.from_pretrained(
|
41 |
-
|
42 |
-
|
43 |
-
torch_dtype=torch.float16,
|
44 |
-
).to("cuda")
|
45 |
-
|
46 |
-
# βββ STEP 4: LOAD TEXT ENCODER + TOKENIZER βββββββββββββββββββββββββββββββββββββ
|
47 |
|
48 |
-
print("π Loading text encoder + tokenizer")
|
49 |
text_encoder = CLIPTextModel.from_pretrained(
|
50 |
-
|
51 |
-
|
52 |
-
torch_dtype=torch.float16,
|
53 |
-
).to("cuda")
|
54 |
tokenizer = CLIPTokenizer.from_pretrained(
|
55 |
-
|
56 |
-
subfolder="tokenizer",
|
57 |
)
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
print("π Loading UβNet")
|
62 |
unet = UNet2DConditionModel.from_pretrained(
|
63 |
-
|
64 |
-
|
65 |
-
torch_dtype=torch.float16,
|
66 |
-
).to("cuda")
|
67 |
-
|
68 |
-
# βββ STEP 6: BUILD THE PIPELINE βββββββββββββββββββββββββββββββββββββββββββββββ
|
69 |
|
70 |
-
|
|
|
71 |
pipe = StableDiffusionPipeline(
|
72 |
vae=vae,
|
73 |
text_encoder=text_encoder,
|
74 |
tokenizer=tokenizer,
|
75 |
unet=unet,
|
76 |
-
scheduler=scheduler
|
77 |
).to("cuda")
|
78 |
|
79 |
-
#
|
80 |
-
|
81 |
-
|
82 |
-
lora_config = LoraConfig(
|
83 |
-
r=16,
|
84 |
-
lora_alpha=16,
|
85 |
-
bias="none",
|
86 |
-
task_type="CAUSAL_LM",
|
87 |
-
)
|
88 |
pipe.unet = get_peft_model(pipe.unet, lora_config)
|
89 |
|
90 |
-
#
|
91 |
-
|
92 |
-
print(f"π Loading dataset from: {DATA_DIR}")
|
93 |
for step in range(100):
|
94 |
-
# ββ hereβs where youβd load your images, run forward/backward, optimizer, etc.
|
95 |
print(f"Training step {step+1}/100")
|
|
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
pipe.save_pretrained(OUTPUT_DIR)
|
101 |
-
print("β
Training complete. Saved to", OUTPUT_DIR)
|
|
|
5 |
StableDiffusionPipeline,
|
6 |
DPMSolverMultistepScheduler,
|
7 |
AutoencoderKL,
|
8 |
+
UNet2DConditionModel
|
9 |
)
|
10 |
from transformers import CLIPTextModel, CLIPTokenizer
|
11 |
from peft import LoraConfig, get_peft_model
|
12 |
|
13 |
+
MODEL_ID = "black-forest-labs/FLUX.1-dev"
|
14 |
+
dataset_path = "/workspace/data"
|
15 |
+
output_dir = "/workspace/lora-trained"
|
16 |
|
17 |
+
# 1) grab the model locally
|
18 |
+
print("π₯ Downloading FluxβDev modelβ¦")
|
19 |
+
model_path = snapshot_download(MODEL_ID, local_dir="./fluxdev-model")
|
|
|
20 |
|
21 |
+
# 2) load each piece with its correct subfolder
|
22 |
+
print("π Loading schedulerβ¦")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
scheduler = DPMSolverMultistepScheduler.from_pretrained(
|
24 |
+
model_path, subfolder="scheduler"
|
|
|
25 |
)
|
26 |
|
27 |
+
print("π Loading VAEβ¦")
|
|
|
|
|
28 |
vae = AutoencoderKL.from_pretrained(
|
29 |
+
model_path, subfolder="vae", torch_dtype=torch.float16
|
30 |
+
)
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
print("π Loading text encoder + tokenizerβ¦")
|
33 |
text_encoder = CLIPTextModel.from_pretrained(
|
34 |
+
model_path, subfolder="text_encoder", torch_dtype=torch.float16
|
35 |
+
)
|
|
|
|
|
36 |
tokenizer = CLIPTokenizer.from_pretrained(
|
37 |
+
model_path, subfolder="tokenizer"
|
|
|
38 |
)
|
39 |
|
40 |
+
print("π Loading UβNetβ¦")
|
|
|
|
|
41 |
unet = UNet2DConditionModel.from_pretrained(
|
42 |
+
model_path, subfolder="unet", torch_dtype=torch.float16
|
43 |
+
)
|
|
|
|
|
|
|
|
|
44 |
|
45 |
+
# 3) assemble the pipeline
|
46 |
+
print("π Assembling pipelineβ¦")
|
47 |
pipe = StableDiffusionPipeline(
|
48 |
vae=vae,
|
49 |
text_encoder=text_encoder,
|
50 |
tokenizer=tokenizer,
|
51 |
unet=unet,
|
52 |
+
scheduler=scheduler
|
53 |
).to("cuda")
|
54 |
|
55 |
+
# 4) apply LoRA
|
56 |
+
print("π§ Applying LoRAβ¦")
|
57 |
+
lora_config = LoraConfig(r=16, lora_alpha=16, bias="none", task_type="CAUSAL_LM")
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
pipe.unet = get_peft_model(pipe.unet, lora_config)
|
59 |
|
60 |
+
# 5) your training loop (or dummy loop for illustration)
|
61 |
+
print("π Starting fineβtuningβ¦")
|
|
|
62 |
for step in range(100):
|
|
|
63 |
print(f"Training step {step+1}/100")
|
64 |
+
# β¦insert your actual dataβloader and loss/backprop hereβ¦
|
65 |
|
66 |
+
os.makedirs(output_dir, exist_ok=True)
|
67 |
+
pipe.save_pretrained(output_dir)
|
68 |
+
print("β
Done. LoRA weights in", output_dir)
|
|
|
|