LPX55 commited on
Commit
7fa6974
·
1 Parent(s): 472ae73
Files changed (2) hide show
  1. app.py +4 -1
  2. modelmod.py +7 -7
app.py CHANGED
@@ -29,7 +29,10 @@ if HF_TOKEN: login(token=HF_TOKEN)
29
  cp_dir = os.getenv('CHECKPOINT_DIR', 'checkpoints')
30
  snapshot_download("Djrango/Qwen2vl-Flux", local_dir=cp_dir)
31
  hf_hub_download(repo_id="TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline", local_dir=f"{cp_dir}/anyline")
32
- shutil.move("checkpoints/anyline/Anyline/MTEED.pth", f"{cp_dir}/anyline")
 
 
 
33
  snapshot_download("depth-anything/Depth-Anything-V2-Large", local_dir=f"{cp_dir}/depth-anything-v2")
34
  snapshot_download("facebook/sam2-hiera-large", local_dir=f"{cp_dir}/segment-anything-2")
35
  # https://github.com/facebookresearch/sam2/issues/26
 
29
  cp_dir = os.getenv('CHECKPOINT_DIR', 'checkpoints')
30
  snapshot_download("Djrango/Qwen2vl-Flux", local_dir=cp_dir)
31
  hf_hub_download(repo_id="TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline", local_dir=f"{cp_dir}/anyline")
32
+ try:
33
+ shutil.move("checkpoints/anyline/Anyline/MTEED.pth", f"{cp_dir}/anyline")
34
+ except:
35
+ print("anyline fail")
36
  snapshot_download("depth-anything/Depth-Anything-V2-Large", local_dir=f"{cp_dir}/depth-anything-v2")
37
  snapshot_download("facebook/sam2-hiera-large", local_dir=f"{cp_dir}/segment-anything-2")
38
  # https://github.com/facebookresearch/sam2/issues/26
modelmod.py CHANGED
@@ -122,10 +122,10 @@ class FluxModel:
122
  self.connector.to(self.dtype).to(self.device)
123
 
124
  # Text encoders initialization
125
- self.tokenizer = CLIPTokenizer.from_pretrained(MODEL_PATHS['flux'], subfolder="tokenizer")
126
- self.text_encoder = CLIPTextModel.from_pretrained(MODEL_PATHS['flux'], subfolder="text_encoder")
127
- self.text_encoder_two = T5EncoderModel.from_pretrained(MODEL_PATHS['flux'], subfolder="text_encoder_2", **self.qkwargs)
128
- self.tokenizer_two = T5TokenizerFast.from_pretrained(MODEL_PATHS['flux'], subfolder="tokenizer_2")
129
 
130
  self.text_encoder.requires_grad_(False).to(self.dtype).to(self.device)
131
  #self.text_encoder_two.requires_grad_(False).to(self.dtype).to(self.device)
@@ -139,9 +139,9 @@ class FluxModel:
139
  self.t5_context_embedder.to(self.dtype).to(self.device)
140
 
141
  # Basic components
142
- self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(MODEL_PATHS['flux'], subfolder="scheduler", shift=1)
143
- self.vae = AutoencoderKL.from_pretrained(MODEL_PATHS['flux'], subfolder="vae")
144
- self.transformer = FluxTransformer2DModel.from_pretrained(MODEL_PATHS['flux'], subfolder="transformer", **self.qkwargs)
145
 
146
  self.vae.requires_grad_(False).to(self.dtype).to(self.device)
147
  #self.transformer.requires_grad_(False).to(self.dtype).to(self.device)
 
122
  self.connector.to(self.dtype).to(self.device)
123
 
124
  # Text encoders initialization
125
+ self.tokenizer = CLIPTokenizer.from_pretrained(MODEL_PATHS['flux'], subfolder="tokenizer", token=HF_TOKEN)
126
+ self.text_encoder = CLIPTextModel.from_pretrained(MODEL_PATHS['flux'], subfolder="text_encoder", token=HF_TOKEN)
127
+ self.text_encoder_two = T5EncoderModel.from_pretrained(MODEL_PATHS['flux'], subfolder="text_encoder_2", token=HF_TOKEN, **self.qkwargs)
128
+ self.tokenizer_two = T5TokenizerFast.from_pretrained(MODEL_PATHS['flux'], subfolder="tokenizer_2", token=HF_TOKEN)
129
 
130
  self.text_encoder.requires_grad_(False).to(self.dtype).to(self.device)
131
  #self.text_encoder_two.requires_grad_(False).to(self.dtype).to(self.device)
 
139
  self.t5_context_embedder.to(self.dtype).to(self.device)
140
 
141
  # Basic components
142
+ self.noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(MODEL_PATHS['flux'], subfolder="scheduler", shift=1, token=HF_TOKEN)
143
+ self.vae = AutoencoderKL.from_pretrained(MODEL_PATHS['flux'], subfolder="vae", token=HF_TOKEN)
144
+ self.transformer = FluxTransformer2DModel.from_pretrained(MODEL_PATHS['flux'], subfolder="transformer", token=HF_TOKEN, **self.qkwargs)
145
 
146
  self.vae.requires_grad_(False).to(self.dtype).to(self.device)
147
  #self.transformer.requires_grad_(False).to(self.dtype).to(self.device)