EuuIia commited on
Commit
d1276ae
·
verified ·
1 Parent(s): c1a5a9b

Upload 6 files

Browse files
Files changed (6) hide show
  1. Dockerfile.txt +126 -0
  2. builder.sh +346 -0
  3. compose.yaml.txt +26 -0
  4. entrypoint.sh +21 -0
  5. info.sh +154 -0
  6. start.sh +54 -0
Dockerfile.txt ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # ADUC-SDR Video Suite — High-Perf Diffusers for 8× L40S (SM 8.9)
3
+ # CUDA 12.8 | PyTorch 2.8.0+cu128 | Ubuntu 22.04
4
+ # =============================================================================
5
+ FROM nvidia/cuda:12.8.0-devel-ubuntu22.04
6
+
7
+ LABEL maintainer="Carlos Rodrigues dos Santos & Development Partner"
8
+ LABEL description="High-performance Diffusers stack with FA2/SDPA, 8×L40S"
9
+ LABEL version="4.4.0"
10
+ LABEL cuda_version="12.8.0"
11
+ LABEL python_version="3.10"
12
+ LABEL pytorch_version="2.8.0+cu128"
13
+ LABEL gpu_optimized_for="8x_NVIDIA_L40S"
14
+
15
+ # ---------------- Core env & caches ----------------
16
+ ENV DEBIAN_FRONTEND=noninteractive TZ=UTC LANG=C.UTF-8 LC_ALL=C.UTF-8 \
17
+ PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1 \
18
+ PIP_NO_CACHE_DIR=1 PIP_DISABLE_PIP_VERSION_CHECK=1
19
+
20
+ # GPU/Compute
21
+ ENV NVIDIA_VISIBLE_DEVICES=all
22
+ ENV TORCH_CUDA_ARCH_LIST="8.9"
23
+ ENV CUDA_DEVICE_ORDER=PCI_BUS_ID
24
+ ENV CUDA_DEVICE_MAX_CONNECTIONS=32
25
+
26
+ # Threads
27
+ ENV OMP_NUM_THREADS=8 MKL_NUM_THREADS=8 MAX_JOBS=160
28
+
29
+ # Alloc/caches
30
+ ENV PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512,garbage_collection_threshold:0.8
31
+ ENV CUDA_LAUNCH_BLOCKING=0 CUDA_CACHE_MAXSIZE=2147483648 CUDA_CACHE_DISABLE=0
32
+
33
+ # App paths
34
+ ENV APP_HOME=/app
35
+ WORKDIR $APP_HOME
36
+
37
+ # Persistent data and caches in /data
38
+ ENV HF_HOME=/data/.cache/huggingface
39
+ ENV TORCH_HOME=/data/.cache/torch
40
+ ENV HF_DATASETS_CACHE=/data/.cache/datasets
41
+ ENV TRANSFORMERS_CACHE=/data/.cache/transformers
42
+ ENV DIFFUSERS_CACHE=/data/.cache/diffusers
43
+ ENV HF_HUB_ENABLE_HF_TRANSFER=1
44
+ ENV TOKENIZERS_PARALLELISM=false
45
+
46
+ # Create non-root user and data dirs early, fix ownership
47
+ RUN useradd -m -u 1000 -s /bin/bash appuser && \
48
+ mkdir -p /data /data/models \
49
+ /data/.cache/huggingface /data/.cache/torch \
50
+ /data/.cache/datasets /data/.cache/transformers /data/.cache/diffusers && \
51
+ chown -R appuser:appuser /data
52
+
53
+ # Models live in /data/models and are visible at /app/models
54
+ ENV MODELS_DIR=/data/models
55
+ RUN ln -sf /data/models /app/models
56
+
57
+ # ---------------- System & Python ----------------
58
+ RUN apt-get update && apt-get install -y --no-install-recommends \
59
+ build-essential gosu tree cmake git git-lfs curl wget ffmpeg ninja-build \
60
+ python3.10 python3.10-dev python3.10-distutils python3-pip \
61
+ ca-certificates libglib2.0-0 libgl1 \
62
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
63
+
64
+ RUN ln -sf /usr/bin/python3.10 /usr/bin/python3 && \
65
+ ln -sf /usr/bin/python3.10 /usr/bin/python && \
66
+ python3 -m pip install --upgrade pip
67
+
68
+ # ---------------- PyTorch cu128 (pinned) ----------------
69
+ RUN pip install --index-url https://download.pytorch.org/whl/cu128 \
70
+ torch>=2.8.0+cu128 torchvision>=0.23.0+cu128 torchaudio>=2.8.0+cu128
71
+
72
+ # ---------------- Toolchain, Triton, FA2 (no bnb build) ----------------
73
+ RUN pip install packaging ninja cmake pybind11 scikit-build cython hf_transfer "numpy>=1.24.4"
74
+
75
+ # Triton 3.x (no triton.ops)
76
+ RUN pip uninstall -y triton || true && \
77
+ pip install -v --no-build-isolation triton==3.4.0
78
+
79
+
80
+ # FlashAttention 2.8.x
81
+ RUN pip install flash-attn==2.8.3 --no-build-isolation || \
82
+ pip install flash-attn==2.8.2 --no-build-isolation || \
83
+ pip install flash-attn==2.8.1 --no-build-isolation || \
84
+ pip install flash-attn==2.8.0.post2 --no-build-isolation
85
+
86
+ # ---------------- App dependencies ----------------
87
+ COPY requirements.txt ./requirements.txt
88
+ RUN pip install --no-cache-dir -r requirements.txt
89
+
90
+ # Pin bnb to avoid surprise CUDA/PTX mismatches (adjust as needed)
91
+ RUN pip install --upgrade bitsandbytes
92
+
93
+ # Custom .whl (Apex + dropout_layer_norm)
94
+ RUN echo "Installing custom wheels..." && \
95
+ pip install --no-cache-dir \
96
+ "https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl" \
97
+ "https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/dropout_layer_norm-0.1-cp310-cp310-linux_x86_64.whl"
98
+
99
+ # ====================================================================
100
+ # Optional: q8_kernels + LTX-Video (enable if needed; ensure wheel ABI)
101
+ RUN pip install --no-cache-dir \
102
+ "https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/q8_kernels-0.0.5-cp310-cp310-linux_x86_64.whl"
103
+ # RUN git clone https://github.com/Lightricks/LTX-Video.git /data/LTX-Video && \
104
+ # cd /data/LTX-Video && python -m pip install -e .[inference]
105
+ # ====================================================================
106
+
107
+ # Scripts and app
108
+ COPY info.sh ./app/info.sh
109
+ COPY builder.sh ./app/builder.sh
110
+ COPY start.sh ./app/start.sh
111
+ COPY entrypoint.sh ./app/entrypoint.sh
112
+
113
+ # Copy the rest of the source last for better caching
114
+ COPY . .
115
+
116
+ # Permissions on app tree
117
+ RUN chown -R appuser:appuser /app /data && \
118
+ chmod 0755 /app/entrypoint.sh /app/start.sh /app/info.sh /app/builder.sh
119
+
120
+ VOLUME /data
121
+
122
+ ENTRYPOINT ["/app/entrypoint.sh"]
123
+ USER appuser
124
+
125
+ # ---------------- Entry ----------------
126
+ CMD ["/app/start.sh"]
builder.sh ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ echo "🚀 Builder (FlashAttn LayerNorm extra + Apex + Q8) — runtime com GPU visível"
5
+
6
+ # ===== Config e diretórios =====
7
+
8
+
9
+ mkdir -p /app/wheels /app/cuda_cache /app/wheels/src
10
+ chmod -R 777 /app/wheels || true
11
+ export CUDA_CACHE_PATH="/app/cuda_cache"
12
+
13
+ # Preserva licença NGC (se existir)
14
+ if [ -f "/NGC-DL-CONTAINER-LICENSE" ]; then
15
+ cp -f /NGC-DL-CONTAINER-LICENSE /app/wheels/NGC-DL-CONTAINER-LICENSE || true
16
+ fi
17
+
18
+ # ===== Dependências mínimas =====
19
+ python -m pip install -v -U pip build setuptools wheel hatchling hatch-vcs scikit-build-core cmake ninja packaging "huggingface_hub[hf_transfer]" || true
20
+
21
+ # ===== Tags de ambiente (Python/CUDA/Torch) =====
22
+ PY_TAG="$(python -c 'import sys; print(f"cp{sys.version_info[0]}{sys.version_info[1]}")' 2>/dev/null || echo cp310)"
23
+ TORCH_VER="$(python - <<'PY'
24
+ try:
25
+ import torch, re
26
+ v = torch.__version__
27
+ print(re.sub(r'\+.*$', '', v))
28
+ except Exception:
29
+ print("unknown")
30
+ PY
31
+ )"
32
+ CU_TAG="$(python - <<'PY'
33
+ try:
34
+ import torch
35
+ cu = getattr(torch.version, "cuda", None)
36
+ print("cu"+cu.replace(".","")) if cu else print("")
37
+ except Exception:
38
+ print("")
39
+ PY
40
+ )"
41
+ echo "[env] PY_TAG=${PY_TAG} TORCH_VER=${TORCH_VER} CU_TAG=${CU_TAG}"
42
+
43
+ # ============================================================================
44
+ # CHECKERS
45
+ # ============================================================================
46
+
47
+ # Checa especificamente o módulo nativo requerido pelo layer_norm (sem checar 'flash-attn' geral)
48
+ check_flash_layer_norm_bin () {
49
+ python - <<'PY'
50
+ import importlib
51
+ ok = False
52
+ # extensões conhecidas produzidas por csrc/layer_norm
53
+ for name in [
54
+ "dropout_layer_norm", # nome do módulo nativo
55
+ "flash_attn.ops.layer_norm", # wrapper python que usa o nativo
56
+ "flash_attn.ops.rms_norm", # pode depender do mesmo backend em alguns empacotamentos
57
+ ]:
58
+ try:
59
+ importlib.import_module(name)
60
+ ok = True
61
+ break
62
+ except Exception:
63
+ pass
64
+ raise SystemExit(0 if ok else 1)
65
+ PY
66
+ }
67
+
68
+ check_apex () {
69
+ python - <<'PY'
70
+ try:
71
+ from apex.normalization import FusedLayerNorm
72
+ import importlib; importlib.import_module("fused_layer_norm_cuda")
73
+ ok = True
74
+ except Exception:
75
+ ok = False
76
+ raise SystemExit(0 if ok else 1)
77
+ PY
78
+ }
79
+
80
+ check_q8 () {
81
+ python - <<'PY'
82
+ import importlib.util
83
+ spec = importlib.util.find_spec("ltx_q8_kernels") or importlib.util.find_spec("q8_kernels")
84
+ raise SystemExit(0 if spec else 1)
85
+ PY
86
+ }
87
+
88
+ # ============================================================================
89
+ # DOWNLOAD DO HUB (GENÉRICO)
90
+ # ============================================================================
91
+
92
+ # Instala uma wheel do HF por prefixo simples (ex.: apex-, q8_kernels-)
93
+ install_from_hf_by_prefix () {
94
+ local PREFIX="$1"
95
+ echo "[hub] Procurando wheels '${PREFIX}-*.whl' em ${SELF_HF_REPO_ID} com tags ${PY_TAG}/${CU_TAG}"
96
+ python - "$PREFIX" "$PY_TAG" "$CU_TAG" <<'PY' || exit 0
97
+ import os, sys
98
+ from huggingface_hub import HfApi, hf_hub_download, HfFolder
99
+
100
+ prefix, py_tag, cu_tag = sys.argv[1], sys.argv[2], sys.argv[3]
101
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
102
+ api = HfApi(token=os.getenv("HF_TOKEN") or HfFolder.get_token())
103
+ try:
104
+ files = api.list_repo_files(repo_id=repo, repo_type="model")
105
+ except Exception:
106
+ raise SystemExit(0)
107
+
108
+ def match(name: str) -> bool:
109
+ return name.endswith(".whl") and name.rsplit("/",1)[-1].startswith(prefix + "-") and (py_tag in name)
110
+
111
+ cands = [f for f in files if match(f)]
112
+ pref = [f for f in cands if cu_tag and cu_tag in f] or cands
113
+ if not pref:
114
+ raise SystemExit(0)
115
+
116
+ target = sorted(pref, reverse=True)[0]
117
+ print(target)
118
+ path = hf_hub_download(repo_id=repo, filename=target, repo_type="model", local_dir="/app/wheels")
119
+ print(path)
120
+ PY
121
+ }
122
+
123
+ # Instala wheels do submódulo layer_norm aceitando variantes de nome
124
+ install_flash_layer_norm_from_hf () {
125
+ echo "[hub] Procurando wheels FlashAttention LayerNorm em ${SELF_HF_REPO_ID}"
126
+ python - "$PY_TAG" "$CU_TAG" <<'PY' || exit 0
127
+ import os, sys, re
128
+ from huggingface_hub import HfApi, hf_hub_download, HfFolder
129
+
130
+ py_tag, cu_tag = sys.argv[1], sys.argv[2]
131
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
132
+ api = HfApi(token=os.getenv("HF_TOKEN") or HfFolder.get_token())
133
+ try:
134
+ files = api.list_repo_files(repo_id=repo, repo_type="model")
135
+ except Exception:
136
+ raise SystemExit(0)
137
+
138
+ pats = [
139
+ r"^flash[_-]?attn[_-]?.*layer[_-]?norm-.*\.whl$",
140
+ r"^dropout[_-]?layer[_-]?norm-.*\.whl$",
141
+ ]
142
+ def ok(fn: str) -> bool:
143
+ name = fn.rsplit("/",1)[-1]
144
+ if py_tag not in name: return False
145
+ return any(re.search(p, name, flags=re.I) for p in pats)
146
+
147
+ cands = [f for f in files if ok(f)]
148
+ pref = [f for f in cands if cu_tag and cu_tag in f] or cands
149
+ if not pref:
150
+ raise SystemExit(0)
151
+
152
+ target = sorted(pref, reverse=True)[0]
153
+ print(target)
154
+ path = hf_hub_download(repo_id=repo, filename=target, repo_type="model", local_dir="/app/wheels")
155
+ print(path)
156
+ PY
157
+ }
158
+
159
+ # ============================================================================
160
+ # BUILDERS
161
+ # ============================================================================
162
+
163
+ # Passo extra: SIEMPRE tenta instalar o submódulo layer_norm via wheel do HF;
164
+ # se não houver wheel compatível, compila a partir de csrc/layer_norm e gera wheel.
165
+ build_or_install_flash_layer_norm () {
166
+ echo "[flow] === FlashAttn LayerNorm (passo extra) ==="
167
+
168
+ # 1) Tentar instalar wheel do HF primeiro (evita recompilar)
169
+ HF_OUT="$(install_flash_layer_norm_from_hf || true)"
170
+ if [ -n "${HF_OUT:-}" ]; then
171
+ WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
172
+ echo "[hub] Baixado: ${WHEEL_PATH}"
173
+ python -m pip install -v -U --no-build-isolation --no-deps "${WHEEL_PATH}" || true
174
+ if check_flash_layer_norm_bin; then
175
+ echo "[flow] FlashAttn LayerNorm: OK via wheel do Hub"
176
+ return 0
177
+ fi
178
+ echo "[flow] Wheel do Hub não resolveu import; seguirá com build"
179
+ else
180
+ echo "[hub] Nenhuma wheel compatível encontrada para FlashAttn LayerNorm"
181
+ fi
182
+
183
+ # 2) Build from source do submódulo csrc/layer_norm -> wheel
184
+ local SRC="/app/wheels/src/flash-attn"
185
+ echo "[build] Preparando fonte FlashAttention (layer_norm) em ${SRC}"
186
+ if [ -d "$SRC/.git" ]; then
187
+ git -C "$SRC" fetch --all -p || true
188
+ git -C "$SRC" reset --hard origin/main || true
189
+ git -C "$SRC" clean -fdx || true
190
+ else
191
+ rm -rf "$SRC"
192
+ git clone --depth 1 https://github.com/Dao-AILab/flash-attention "$SRC"
193
+ fi
194
+
195
+ # Define CC alvo a partir da GPU ativa (reduz tempo/ruído de build)
196
+ export TORCH_CUDA_ARCH_LIST="$(python - <<'PY'
197
+ import torch
198
+ try:
199
+ cc = "%d.%d" % torch.cuda.get_device_capability(0)
200
+ print(cc)
201
+ except Exception:
202
+ print("8.9") # fallback p/ Ada (L40S) caso build sem GPU visível
203
+ PY
204
+ )"
205
+ echo "[build] TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST}"
206
+
207
+ pushd "$SRC/csrc/layer_norm" >/dev/null
208
+ export MAX_JOBS="${MAX_JOBS:-90}"
209
+ # Gera wheel reutilizável
210
+ python -m pip wheel -v --no-build-isolation --no-deps . -w /app/wheels || true
211
+ popd >/dev/null
212
+
213
+ # Instala a wheel gerada
214
+ local W="$(ls -t /app/wheels/*flash*attn*layer*norm*-*.whl 2>/dev/null | head -n1 || true)"
215
+ if [ -z "${W}" ]; then
216
+ W="$(ls -t /app/wheels/*dropout*layer*norm*-*.whl 2>/dev/null | head -n1 || true)"
217
+ fi
218
+ if [ -z "${W}" ]; then
219
+ # fallback para qualquer .whl recém gerado
220
+ W="$(ls -t /app/wheels/*.whl 2>/dev/null | head -n1 || true)"
221
+ fi
222
+
223
+ if [ -n "${W}" ]; then
224
+ python -m pip install -v -U --no-deps "${W}" || true
225
+ echo "[build] FlashAttn LayerNorm instalado da wheel: ${W}"
226
+ else
227
+ echo "[build] Nenhuma wheel gerada; instalando direto do source (último recurso)"
228
+ python -m pip install -v --no-build-isolation "$SRC/csrc/layer_norm" || true
229
+ fi
230
+
231
+ # Checagem final do binário
232
+ if check_flash_layer_norm_bin; then
233
+ echo "[flow] FlashAttn LayerNorm: import OK após build"
234
+ return 0
235
+ fi
236
+ echo "[flow] FlashAttn LayerNorm: falhou import após build"
237
+ return 1
238
+ }
239
+
240
+ build_apex () {
241
+ local SRC="/app/wheels/src/apex"
242
+ echo "[build] Preparando fonte Apex em ${SRC}"
243
+ if [ -d "$SRC/.git" ]; then
244
+ git -C "$SRC" fetch --all -p || true
245
+ git -C "$SRC" reset --hard HEAD || true
246
+ git -C "$SRC" clean -fdx || true
247
+ else
248
+ rm -rf "$SRC"
249
+ git clone --depth 1 https://github.com/NVIDIA/apex "$SRC"
250
+ fi
251
+ echo "[build] Compilando Apex -> wheel"
252
+ export APEX_CPP_EXT=1 APEX_CUDA_EXT=1 APEX_ALL_CONTRIB_EXT=0
253
+ python -m pip wheel -v --no-build-isolation --no-deps "$SRC" -w /app/wheels || true
254
+ local W="$(ls -t /app/wheels/apex-*.whl 2>/dev/null | head -n1 || true)"
255
+ if [ -n "${W}" ]; then
256
+ python -m pip install -v -U --no-deps "${W}" || true
257
+ echo "[build] Apex instalado da wheel recém-compilada: ${W}"
258
+ else
259
+ echo "[build] Nenhuma wheel Apex gerada; instalando do source"
260
+ python -m pip install -v --no-build-isolation "$SRC" || true
261
+ fi
262
+ }
263
+
264
+ Q8_REPO="${Q8_REPO:-https://github.com/Lightricks/LTX-Video-Q8-Kernels}"
265
+ Q8_COMMIT="${Q8_COMMIT:-f3066edea210082799ca5a2bbf9ef0321c5dd8fc}"
266
+ build_q8 () {
267
+ local SRC="/app/wheels/src/q8_kernels"
268
+ rm -rf "$SRC"
269
+ git clone --filter=blob:none "$Q8_REPO" "$SRC"
270
+ git -C "$SRC" checkout "$Q8_COMMIT"
271
+ git -C "$SRC" submodule update --init --recursive
272
+ echo "[build] Compilando Q8 Kernels -> wheel"
273
+ python -m pip wheel -v --no-build-isolation "$SRC" -w /app/wheels || true
274
+ local W="$(ls -t /app/wheels/q8_kernels-*.whl 2>/dev/null | head -n1 || true)"
275
+ if [ -n "${W}" ]; then
276
+ python -m pip install -v -U --no-deps "${W}" || true
277
+ echo "[build] Q8 instalado da wheel recém-compilada: ${W}"
278
+ else
279
+ echo "[build] Nenhuma wheel q8_kernels gerada; instalando do source"
280
+ python -m pip install -v --no-build-isolation "$SRC" || true
281
+ fi
282
+ }
283
+
284
+ # ============================================================================
285
+ # EXECUÇÃO
286
+ # ============================================================================
287
+
288
+ # Passo adicional SEM depender de "flash-attn" já instalado: trata somente o layer_norm
289
+ #build_q8 || true
290
+
291
+ # Apex (mantido)
292
+ # Tenta primeiro via wheel no HF e, se não houver, compila e instala em wheel
293
+ #echo "[flow] === apex ==="
294
+ #HF_OUT="$(install_from_hf_by_prefix "apex" || true)"
295
+ #if [ -n "${HF_OUT:-}" ]; then
296
+ # WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
297
+ # echo "[hub] Baixado: ${WHEEL_PATH}"
298
+ # python -m pip install -v -U --no-build-isolation "${WHEEL_PATH}" || true
299
+ # if ! check_apex; then
300
+ # echo "[flow] apex: import falhou após wheel; compilando"
301
+ # #build_apex || true
302
+ # fi
303
+ #else
304
+ # echo "[hub] Nenhuma wheel apex compatível; compilando"
305
+ # build_apex || true
306
+ #fi
307
+
308
+ #Q8 (opcional)
309
+ echo "[flow] === q8_kernels ==="
310
+ HF_OUT="$(install_from_hf_by_prefix "q8_kernels" || true)"
311
+ if [ -n "${HF_OUT:-}" ]; then
312
+ WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
313
+ echo "[hub] Baixado: ${WHEEL_PATH}"
314
+ python -m pip install -v -U --no-build-isolation "${WHEEL_PATH}" || true
315
+ if ! check_q8; then
316
+ echo "[flow] q8_kernels: import falhou após wheel; compilando"
317
+ build_q8 || true
318
+ fi
319
+ else
320
+ echo "[hub] Nenhuma wheel q8_kernels compatível; compilando"
321
+ build_q8 || true
322
+ fi
323
+
324
+ # Upload de wheels produzidas para o HF (cache cross-restarts)
325
+ python - <<'PY'
326
+ import os
327
+ from huggingface_hub import HfApi, HfFolder
328
+
329
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
330
+ token = os.getenv("HF_TOKEN") or HfFolder.get_token()
331
+ if not token:
332
+ raise SystemExit("HF_TOKEN ausente; upload desabilitado")
333
+
334
+ api = HfApi(token=token)
335
+ api.upload_folder(
336
+ folder_path="/app/wheels",
337
+ repo_id=repo,
338
+ repo_type="model",
339
+ allow_patterns=["*.whl","NGC-DL-CONTAINER-LICENSE"],
340
+ ignore_patterns=["**/src/**","**/*.log","**/logs/**",".git/**"],
341
+ )
342
+ print("Upload concluído (wheels + licença).")
343
+ PY
344
+
345
+ chmod -R 777 /app/wheels || true
346
+ echo "✅ Builder finalizado."
compose.yaml.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ vincie:
3
+ image: img2img:edit
4
+ deploy:
5
+ resources:
6
+ reservations:
7
+ devices:
8
+ - capabilities: [gpu]
9
+ ports:
10
+ - "7860:7860"
11
+ environment:
12
+ GRADIO_SERVER_PORT: "7860"
13
+ HF_HUB_CACHE: "/data/.cache/huggingface/hub"
14
+ CKPT_ROOT: "/data/ckpt/VINCIE-3B"
15
+ VINCIE_ROOT: "/data/VINCIE"
16
+ volumes:
17
+ - vincie_hub:/data/.cache/huggingface/hub
18
+ - vincie_ckpt:/data/ckpt/VINCIE-3B
19
+ - vincie_out:/app/outputs
20
+ - vincie_repo:/data/VINCIE
21
+ volumes:
22
+ vincie_hub: {}
23
+ vincie_ckpt: {}
24
+ vincie_out: {}
25
+ vincie_repo: {}
26
+
entrypoint.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+ # entrypoint.sh - Executado como root para corrigir permissões.
3
+ set -e
4
+
5
+ echo "🔐 ENTRYPOINT (root): Corrigindo permissões para os diretórios de dados e saída..."
6
+
7
+ # Lista de diretórios a serem criados e terem suas permissões ajustadas
8
+ # Usamos os valores padrão, pois as variáveis de ambiente podem não estar disponíveis aqui.
9
+ DIRS_TO_OWN="/app/outputs /app/inputs"
10
+
11
+ # Garante que os diretórios existam
12
+ mkdir -p $DIRS_TO_OWN
13
+
14
+ # Muda o proprietário para o UID e GID 1000, que corresponde ao 'appuser'
15
+ # Usar UID/GID é mais robusto em ambientes de contêiner.
16
+ chown -R 1000:1000 $DIRS_TO_OWN
17
+
18
+ echo "✅ ENTRYPOINT (root): Permissões corrigidas."
19
+
20
+ # Passa a execução para o comando principal (CMD) definido no Dockerfile.
21
+ exec "$@"
info.sh ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -euo pipefail
4
+
5
+ echo "================= RUNTIME CAPABILITIES ================="
6
+ date
7
+
8
+ echo
9
+ if command -v nvidia-smi >/dev/null 2>&1; then
10
+ nvidia-smi
11
+ else
12
+ echo "nvidia-smi: not available"
13
+ fi
14
+ echo
15
+
16
+ echo "CUDA_HOME: ${CUDA_HOME:-/usr/local/cuda}"
17
+ if command -v nvcc >/dev/null 2>&1; then
18
+ nvcc --version || true
19
+ else
20
+ echo "nvcc: not available"
21
+ fi
22
+ echo
23
+
24
+ echo "[PyTorch / CUDA backend]"
25
+ python3 - <<'PY'
26
+ import json, os, torch, inspect
27
+
28
+ def to_bool(x):
29
+ try:
30
+ if callable(x):
31
+ try:
32
+ sig = inspect.signature(x)
33
+ if len(sig.parameters)==0:
34
+ return bool(x())
35
+ except Exception:
36
+ pass
37
+ return True
38
+ return bool(x)
39
+ except Exception:
40
+ return None
41
+
42
+ info = {
43
+ "torch": getattr(torch, "__version__", None),
44
+ "cuda_available": torch.cuda.is_available(),
45
+ "cuda_device_count": torch.cuda.device_count(),
46
+ "cuda_runtime_version": getattr(torch.version, "cuda", None),
47
+ "cudnn_version": torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else None,
48
+ "tf32": (torch.backends.cuda.matmul.allow_tf32 if torch.cuda.is_available() else None),
49
+ "flash_sdp": (to_bool(getattr(torch.backends.cuda, "enable_flash_sdp", None)) if torch.cuda.is_available() else None),
50
+ "mem_efficient_sdp": (to_bool(getattr(torch.backends.cuda, "enable_mem_efficient_sdp", None)) if torch.cuda.is_available() else None),
51
+ "math_sdp": (to_bool(getattr(torch.backends.cuda, "enable_math_sdp", None)) if torch.cuda.is_available() else None),
52
+ }
53
+ print(json.dumps(info, indent=2))
54
+ for i in range(min(torch.cuda.device_count(), 16)):
55
+ print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
56
+ PY
57
+ echo
58
+
59
+ echo "[Apex (FusedLayerNorm/RMSNorm)]"
60
+ python3 - <<'PY'
61
+ try:
62
+ from apex.normalization import FusedLayerNorm, FusedRMSNorm
63
+ import importlib; importlib.import_module("fused_layer_norm_cuda")
64
+ print("apex.normalization: OK")
65
+ except Exception as e:
66
+ print("apex.normalization: FAIL ->", e)
67
+ PY
68
+ echo
69
+
70
+ echo "[FlashAttention (CUDA/Triton/RMSNorm)]"
71
+ python3 - <<'PY'
72
+ import importlib
73
+ mods = [
74
+ 'flash_attn', 'flash_attn_2_cuda',
75
+ 'flash_attn.ops.rms_norm', 'flash_attn.ops.layer_norm',
76
+ 'flash_attn.layers.layer_norm'
77
+ ]
78
+ for m in mods:
79
+ try:
80
+ importlib.import_module(m)
81
+ print(f"{m}: OK")
82
+ except Exception as e:
83
+ print(f"{m}: FAIL -> {e}")
84
+ PY
85
+ echo
86
+
87
+ echo "[FlashAttention versão/details]"
88
+ python3 - <<'PY'
89
+ try:
90
+ import flash_attn
91
+ fa_ver = getattr(flash_attn, "__version__", None)
92
+ print(f"flash_attn: {fa_ver}")
93
+ except Exception:
94
+ print("flash_attn: not importable.")
95
+ try:
96
+ import torch
97
+ print(f"torch: {torch.__version__} | cuda: {getattr(torch.version, 'cuda', None)}")
98
+ except Exception:
99
+ pass
100
+ PY
101
+ echo
102
+
103
+ echo "[Triton]"
104
+ python3 - <<'PY'
105
+ try:
106
+ import triton
107
+ print("triton:", triton.__version__)
108
+ try:
109
+ import triton.ops as _; print("triton.ops: OK")
110
+ except Exception:
111
+ print("triton.ops: not present (ok on Triton>=3.x)")
112
+ except Exception as e:
113
+ print("triton: FAIL ->", e)
114
+ PY
115
+ echo
116
+
117
+ echo "[BitsAndBytes (Q8/Q4)]"
118
+ python3 - <<'PY'
119
+ try:
120
+ import bitsandbytes as bnb
121
+ print("bitsandbytes:", bnb.__version__)
122
+ try:
123
+ from bitsandbytes.triton import _custom_ops as _; print("bnb.triton._custom_ops: OK")
124
+ except Exception as e:
125
+ print("bnb.triton: partial ->", e)
126
+ except Exception as e:
127
+ print("bitsandbytes: FAIL ->", e)
128
+ PY
129
+ echo
130
+
131
+ echo "[Transformers / Diffusers / XFormers / EcoML]"
132
+ python3 - <<'PY'
133
+ def _v(m):
134
+ try:
135
+ mod = __import__(m)
136
+ print(f"{m}: {getattr(mod, '__version__', 'unknown')}")
137
+ except Exception as e:
138
+ print(f"{m}: FAIL -> {e}")
139
+ for m in ("transformers", "diffusers", "xformers", "ecuml", "mlx", "ecobase"):
140
+ _v(m)
141
+ PY
142
+ echo
143
+
144
+ echo "[Distribuído / NCCL Env]"
145
+ env | grep -E '^(CUDA_VISIBLE_DEVICES|NCCL_|TORCH_|ENABLE_.*SDP|HF_HUB_.*|CUDA_|NV_.*NCCL.*|PYTORCH_CUDA_ALLOC_CONF)=' | sort
146
+ echo
147
+
148
+ echo "[Output dir/perms]"
149
+ OUT="/app/outputs"
150
+ echo "OUT dir: $OUT"
151
+ mkdir -p "$OUT"
152
+ ls -la "$OUT" || true
153
+
154
+ echo "================= END CAPABILITIES ================="
start.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ tree -L 4 /app
5
+ tree -L 4 /data
6
+
7
+ echo "🚀 Iniciando o script de setup e lançamento do LTX-Video..."
8
+ echo "Usuário atual: $(whoami)"
9
+
10
+ # Define as variáveis de ambiente que o LTXServer irá consumir
11
+ export HF_HOME="${HF_HOME:-/data/.cache/huggingface}"
12
+ export OUTPUT_ROOT="${OUTPUT_ROOT:-/app/outputs/ltx}"
13
+ export LTXV_FRAME_LOG_EVERY=8
14
+ export LTXV_DEBUG=1
15
+
16
+
17
+ # --- Garante que Diretórios Existam ---
18
+ mkdir -p "$OUTPUT_ROOT" "$HF_HOME"
19
+
20
+
21
+ # 1) Builder (garante Apex/Flash e deps CUDA)
22
+ #echo "🛠️ Iniciando o builder.sh para compilar/instalar dependências CUDA..."
23
+ #if [ -f "/app/builder.sh" ]; then
24
+ # /bin/bash /app/builder.sh
25
+ # echo "✅ Builder finalizado."
26
+ #else
27
+ # echo "⚠️ Aviso: builder.sh não encontrado. Pulando etapa de compilação de dependências."
28
+ #fi
29
+
30
+ #python3 - <<'PY'
31
+ #import os
32
+ #from services.ltx_server import ltx_server_singleton as server
33
+ #try:
34
+ # server.setup_dependencies()
35
+ #except Exception as e:
36
+ # print("Erro no setup")
37
+ #PY
38
+
39
+ cp -rfv /app/LTX-Video/ /data/
40
+
41
+ export OUTPUT_ROOT="${OUTPUT_ROOT:-/app/outputs}"
42
+ export INPUT_ROOT="${INPUT_ROOT:-/app/inputs}"
43
+
44
+ mkdir -p "$OUTPUT_ROOT" "$INPUT_ROOT"
45
+ echo "[aduc][start] Verificando ambiente como usuário: $(whoami)"
46
+
47
+ # Env da UI
48
+ export GRADIO_SERVER_NAME="0.0.0.0"
49
+ export GRADIO_SERVER_PORT="${PORT:-7860}"
50
+ export GRADIO_ENABLE_QUEUE="True"
51
+
52
+ echo "[ltx][start] Lançando app_ltx.py..."
53
+ # Executa diretamente o python.
54
+ exec python /app/app.py