Spaces:
Running
on
Zero
Running
on
Zero
刘虹雨
commited on
Commit
·
26d9215
1
Parent(s):
101ef26
update code
Browse files
app.py
CHANGED
@@ -1,7 +1,59 @@
|
|
1 |
import os
|
|
|
2 |
import sys
|
3 |
import warnings
|
4 |
import logging
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import argparse
|
6 |
import json
|
7 |
import random
|
@@ -56,47 +108,14 @@ import shutil
|
|
56 |
|
57 |
# Suppress warnings (especially for PyTorch)
|
58 |
warnings.filterwarnings("ignore")
|
59 |
-
import os
|
60 |
-
import subprocess
|
61 |
-
import sys
|
62 |
|
63 |
-
os.environ["MEDIAPIPE_DISABLE_GPU"] = "1" # Disable GPU for MediaPipe
|
64 |
-
def install_cuda_toolkit():
|
65 |
-
CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.1.0/local_installers/cuda_12.1.0_530.30.02_linux.run"
|
66 |
-
CUDA_TOOLKIT_FILE = "/tmp/%s" % os.path.basename(CUDA_TOOLKIT_URL)
|
67 |
|
68 |
-
|
69 |
-
subprocess.call(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE])
|
70 |
-
subprocess.call(["chmod", "+x", CUDA_TOOLKIT_FILE])
|
71 |
-
|
72 |
-
print("[INFO] Installing CUDA Toolkit silently ...")
|
73 |
-
subprocess.call([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"])
|
74 |
-
|
75 |
-
print("[INFO] Setting CUDA environment variables ...")
|
76 |
-
os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
77 |
-
os.environ["PATH"] = "%s/bin:%s" % (os.environ["CUDA_HOME"], os.environ.get("PATH", ""))
|
78 |
-
os.environ["LD_LIBRARY_PATH"] = "%s/lib64:%s" % (
|
79 |
-
os.environ["CUDA_HOME"],
|
80 |
-
os.environ.get("LD_LIBRARY_PATH", "")
|
81 |
-
)
|
82 |
|
83 |
-
# Optional: set architecture list for compilation (Ampere and Ada)
|
84 |
-
os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6;8.9"
|
85 |
-
if os.path.exists(CUDA_TOOLKIT_FILE):
|
86 |
-
os.remove(CUDA_TOOLKIT_FILE)
|
87 |
-
print(f"[INFO] Removed installer file: {CUDA_TOOLKIT_FILE}")
|
88 |
-
else:
|
89 |
-
print(f"[WARN] Installer file not found: {CUDA_TOOLKIT_FILE}")
|
90 |
-
print(os.listdir("/usr/local/cuda"))
|
91 |
-
print("[INFO] CUDA 12.1 installation complete. CUDA_HOME set to /usr/local/cuda")
|
92 |
|
93 |
# 🔧 Set CUDA_HOME before anything else
|
94 |
|
95 |
-
|
96 |
-
logging.basicConfig(
|
97 |
-
level=logging.INFO,
|
98 |
-
format="%(asctime)s - %(levelname)s - %(message)s"
|
99 |
-
)
|
100 |
from diffusers import (
|
101 |
StableDiffusionControlNetImg2ImgPipeline,
|
102 |
ControlNetModel,
|
@@ -946,33 +965,25 @@ def launch_gradio_app():
|
|
946 |
|
947 |
demo.queue()
|
948 |
demo.launch(server_name="0.0.0.0")
|
949 |
-
def _get_output(cmd):
|
950 |
-
try:
|
951 |
-
return subprocess.check_output(cmd).decode("utf-8")
|
952 |
-
except Exception as ex:
|
953 |
-
logging.exception(ex)
|
954 |
|
955 |
-
return None
|
956 |
|
957 |
if __name__ == '__main__':
|
958 |
import torch.multiprocessing as mp
|
959 |
mp.set_start_method('spawn', force=True)
|
960 |
-
logging.info("Environment Variables: %s" % os.environ)
|
961 |
-
logging.info("Installing CUDA extensions...")
|
962 |
-
if _get_output(["nvcc", "--version"]) is None:
|
963 |
-
|
964 |
-
|
965 |
-
|
966 |
-
|
967 |
-
else:
|
968 |
-
|
969 |
-
|
970 |
-
print("CUDA_HOME =", os.environ.get("CUDA_HOME"))
|
971 |
-
from torch.utils.cpp_extension import CUDA_HOME
|
972 |
-
print("CUDA_HOME from PyTorch:", CUDA_HOME)
|
973 |
launch_pretrained()
|
974 |
-
|
975 |
-
|
976 |
image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/demo_imgs"
|
977 |
example_img_names = os.listdir(image_folder)
|
978 |
render_model, sample_steps, DiT_model, \
|
|
|
1 |
import os
|
2 |
+
import subprocess
|
3 |
import sys
|
4 |
import warnings
|
5 |
import logging
|
6 |
+
# Configure logging settings
|
7 |
+
logging.basicConfig(
|
8 |
+
level=logging.INFO,
|
9 |
+
format="%(asctime)s - %(levelname)s - %(message)s"
|
10 |
+
)
|
11 |
+
def _get_output(cmd):
|
12 |
+
try:
|
13 |
+
return subprocess.check_output(cmd).decode("utf-8")
|
14 |
+
except Exception as ex:
|
15 |
+
logging.exception(ex)
|
16 |
+
|
17 |
+
return None
|
18 |
+
def install_cuda_toolkit():
|
19 |
+
CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.1.0/local_installers/cuda_12.1.0_530.30.02_linux.run"
|
20 |
+
CUDA_TOOLKIT_FILE = "/tmp/%s" % os.path.basename(CUDA_TOOLKIT_URL)
|
21 |
+
|
22 |
+
print(f"[INFO] Downloading CUDA Toolkit from {CUDA_TOOLKIT_URL} ...")
|
23 |
+
subprocess.call(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE])
|
24 |
+
subprocess.call(["chmod", "+x", CUDA_TOOLKIT_FILE])
|
25 |
+
|
26 |
+
print("[INFO] Installing CUDA Toolkit silently ...")
|
27 |
+
subprocess.call([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"])
|
28 |
+
|
29 |
+
print("[INFO] Setting CUDA environment variables ...")
|
30 |
+
os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
31 |
+
os.environ["PATH"] = "%s/bin:%s" % (os.environ["CUDA_HOME"], os.environ.get("PATH", ""))
|
32 |
+
os.environ["LD_LIBRARY_PATH"] = "%s/lib64:%s" % (
|
33 |
+
os.environ["CUDA_HOME"],
|
34 |
+
os.environ.get("LD_LIBRARY_PATH", "")
|
35 |
+
)
|
36 |
+
|
37 |
+
# Optional: set architecture list for compilation (Ampere and Ada)
|
38 |
+
os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6;8.9"
|
39 |
+
if os.path.exists(CUDA_TOOLKIT_FILE):
|
40 |
+
os.remove(CUDA_TOOLKIT_FILE)
|
41 |
+
print(f"[INFO] Removed installer file: {CUDA_TOOLKIT_FILE}")
|
42 |
+
else:
|
43 |
+
print(f"[WARN] Installer file not found: {CUDA_TOOLKIT_FILE}")
|
44 |
+
print(os.listdir("/usr/local/cuda"))
|
45 |
+
print("[INFO] CUDA 12.1 installation complete. CUDA_HOME set to /usr/local/cuda")
|
46 |
+
logging.info("Environment Variables: %s" % os.environ)
|
47 |
+
logging.info("Installing CUDA extensions...")
|
48 |
+
if _get_output(["nvcc", "--version"]) is None:
|
49 |
+
logging.info("Installing CUDA toolkit...")
|
50 |
+
install_cuda_toolkit()
|
51 |
+
logging.info("installCUDA: %s" % _get_output(["nvcc", "--version"]))
|
52 |
+
else:
|
53 |
+
logging.info("Detected CUDA: %s" % _get_output(["nvcc", "--version"]))
|
54 |
+
print("CUDA_HOME =", os.environ.get("CUDA_HOME"))
|
55 |
+
from torch.utils.cpp_extension import CUDA_HOME
|
56 |
+
print("CUDA_HOME from PyTorch:", CUDA_HOME)
|
57 |
import argparse
|
58 |
import json
|
59 |
import random
|
|
|
108 |
|
109 |
# Suppress warnings (especially for PyTorch)
|
110 |
warnings.filterwarnings("ignore")
|
|
|
|
|
|
|
111 |
|
|
|
|
|
|
|
|
|
112 |
|
113 |
+
os.environ["MEDIAPIPE_DISABLE_GPU"] = "1" # Disable GPU for MediaPipe
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
# 🔧 Set CUDA_HOME before anything else
|
117 |
|
118 |
+
|
|
|
|
|
|
|
|
|
119 |
from diffusers import (
|
120 |
StableDiffusionControlNetImg2ImgPipeline,
|
121 |
ControlNetModel,
|
|
|
965 |
|
966 |
demo.queue()
|
967 |
demo.launch(server_name="0.0.0.0")
|
|
|
|
|
|
|
|
|
|
|
968 |
|
|
|
969 |
|
970 |
if __name__ == '__main__':
|
971 |
import torch.multiprocessing as mp
|
972 |
mp.set_start_method('spawn', force=True)
|
973 |
+
# logging.info("Environment Variables: %s" % os.environ)
|
974 |
+
# logging.info("Installing CUDA extensions...")
|
975 |
+
# if _get_output(["nvcc", "--version"]) is None:
|
976 |
+
# logging.info("Installing CUDA toolkit...")
|
977 |
+
# install_cuda_toolkit()
|
978 |
+
# logging.info("installCUDA: %s" % _get_output(["nvcc", "--version"]))
|
979 |
+
|
980 |
+
# else:
|
981 |
+
# logging.info("Detected CUDA: %s" % _get_output(["nvcc", "--version"]))
|
982 |
+
|
983 |
+
# print("CUDA_HOME =", os.environ.get("CUDA_HOME"))
|
984 |
+
# from torch.utils.cpp_extension import CUDA_HOME
|
985 |
+
# print("CUDA_HOME from PyTorch:", CUDA_HOME)
|
986 |
launch_pretrained()
|
|
|
|
|
987 |
image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/demo_imgs"
|
988 |
example_img_names = os.listdir(image_folder)
|
989 |
render_model, sample_steps, DiT_model, \
|