foivospar commited on
Commit
480ec1d
·
1 Parent(s): 9fe1f75

change sd1.5 to modelscope

Browse files
Files changed (2) hide show
  1. app.py +4 -1
  2. requirements.txt +1 -0
app.py CHANGED
@@ -31,6 +31,7 @@ else:
31
 
32
  # download models
33
  from huggingface_hub import hf_hub_download
 
34
 
35
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/config.json", local_dir="./models")
36
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/diffusion_pytorch_model.safetensors", local_dir="./models")
@@ -38,12 +39,14 @@ hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="encoder/config.json", lo
38
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="encoder/pytorch_model.bin", local_dir="./models")
39
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arcface.onnx", local_dir="./models/antelopev2")
40
 
 
 
41
  # Load face detection and recognition package
42
  app = FaceAnalysis(name='antelopev2', root='./', providers=['CPUExecutionProvider'])
43
  app.prepare(ctx_id=0, det_size=(640, 640))
44
 
45
  # Load pipeline
46
- base_model = 'runwayml/stable-diffusion-v1-5'
47
  encoder = CLIPTextModelWrapper.from_pretrained(
48
  'models', subfolder="encoder", torch_dtype=dtype
49
  )
 
31
 
32
  # download models
33
  from huggingface_hub import hf_hub_download
34
+ from modelscope import snapshot_download
35
 
36
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/config.json", local_dir="./models")
37
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/diffusion_pytorch_model.safetensors", local_dir="./models")
 
39
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="encoder/pytorch_model.bin", local_dir="./models")
40
  hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arcface.onnx", local_dir="./models/antelopev2")
41
 
42
+ base_model = snapshot_download('AI-ModelScope/stable-diffusion-v1-5', cache_dir='./models')
43
+
44
  # Load face detection and recognition package
45
  app = FaceAnalysis(name='antelopev2', root='./', providers=['CPUExecutionProvider'])
46
  app.prepare(ctx_id=0, det_size=(640, 640))
47
 
48
  # Load pipeline
49
+ #base_model = 'runwayml/stable-diffusion-v1-5'
50
  encoder = CLIPTextModelWrapper.from_pretrained(
51
  'models', subfolder="encoder", torch_dtype=dtype
52
  )
requirements.txt CHANGED
@@ -5,6 +5,7 @@ diffusers==0.23.0
5
  transformers==4.34.1
6
  peft
7
  accelerate
 
8
  insightface
9
  onnxruntime-gpu
10
  gradio
 
5
  transformers==4.34.1
6
  peft
7
  accelerate
8
+ modelscope
9
  insightface
10
  onnxruntime-gpu
11
  gradio