Kremon96 commited on
Commit
756091b
·
verified ·
1 Parent(s): 62b2710

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -17
app.py CHANGED
@@ -67,13 +67,18 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
67
  # Check if running on Zero GPU (compile incompatible with Zero GPU)
68
  IS_ZEROGPU = os.environ.get("SPACES_ZERO_GPU") is not None
69
 
 
 
 
 
 
 
 
70
 
71
  # print("FISH_AE_DTYPE:", FISH_AE_DTYPE)
72
  # print("IS_ZEROGPU:", IS_ZEROGPU)
73
- # if IS_ZEROGPU:
74
- # print("Running on Zero GPU - model compilation disabled")
75
- # else:
76
- # print("Not on Zero GPU - model compilation available")
77
 
78
  def _safe_members(tf, prefix):
79
  if not prefix.endswith('/'):
@@ -170,11 +175,14 @@ def load_models():
170
  """Lazy load models on first use (required for Zero GPU)."""
171
  global model, model_compiled, fish_ae, pca_state, silentcipher_model
172
  if model is None:
173
- # print("Loading models from HuggingFace...")
174
- model = load_model_from_hf(dtype=MODEL_DTYPE, compile=False, token=HF_TOKEN)
175
- fish_ae = load_fish_ae_from_hf(compile=(COMPILE_FISH_IF_NOT_ON_ZERO_GPU and not IS_ZEROGPU), dtype=FISH_AE_DTYPE, token=HF_TOKEN)
 
 
 
176
 
177
- pca_state = load_pca_state_from_hf(token=HF_TOKEN)
178
 
179
  # Load silentcipher model if enabled
180
  if USE_SILENTCIPHER:
@@ -188,10 +196,6 @@ def load_models():
188
  print("Continuing without watermarking...")
189
 
190
  # print("Models loaded successfully!")
191
- # if not IS_ZEROGPU:
192
- # print("Note: model_compiled will be created when you check 'Compile Model'")
193
-
194
-
195
 
196
  def compile_model(should_compile):
197
  """Compile the model for faster inference."""
@@ -226,7 +230,6 @@ def do_compile():
226
 
227
  try:
228
  # Load models first if not already loaded (needed for compilation)
229
- # Since Zero GPU can't compile, we can safely load eagerly here
230
  load_models()
231
 
232
  # print("Compiling model... This will take 1-3 minutes on first run.")
@@ -434,7 +437,9 @@ def generate_audio(
434
 
435
  # Load speaker audio if provided
436
  if speaker_audio_path is not None:
437
- speaker_audio = load_audio(speaker_audio_path).cuda()
 
 
438
  else:
439
  speaker_audio = None
440
 
@@ -633,7 +638,9 @@ def generate_audio_simple(
633
 
634
  # Load speaker audio if provided
635
  if speaker_audio_path is not None:
636
- speaker_audio = load_audio(speaker_audio_path).cuda()
 
 
637
  else:
638
  speaker_audio = None
639
 
@@ -2859,5 +2866,4 @@ if __name__ == "__main__":
2859
 
2860
  # Enable queue for better handling of concurrent requests on HF Spaces
2861
  demo.queue(max_size=20)
2862
- demo.launch(allowed_paths=allowed_paths)
2863
-
 
67
  # Check if running on Zero GPU (compile incompatible with Zero GPU)
68
  IS_ZEROGPU = os.environ.get("SPACES_ZERO_GPU") is not None
69
 
70
+ # Определяем доступное устройство
71
+ def get_available_device():
72
+ """Определяет доступное устройство (GPU или CPU)"""
73
+ if torch.cuda.is_available() and not IS_ZEROGPU:
74
+ return 'cuda'
75
+ else:
76
+ return 'cpu'
77
 
78
  # print("FISH_AE_DTYPE:", FISH_AE_DTYPE)
79
  # print("IS_ZEROGPU:", IS_ZEROGPU)
80
+ # device = get_available_device()
81
+ # print(f"Using device: {device}")
 
 
82
 
83
  def _safe_members(tf, prefix):
84
  if not prefix.endswith('/'):
 
175
  """Lazy load models on first use (required for Zero GPU)."""
176
  global model, model_compiled, fish_ae, pca_state, silentcipher_model
177
  if model is None:
178
+ # Определяем доступное устройство
179
+ device = get_available_device()
180
+ # print(f"Loading models to device: {device}")
181
+
182
+ model = load_model_from_hf(dtype=MODEL_DTYPE, compile=False, token=HF_TOKEN, device=device)
183
+ fish_ae = load_fish_ae_from_hf(compile=(COMPILE_FISH_IF_NOT_ON_ZERO_GPU and not IS_ZEROGPU), dtype=FISH_AE_DTYPE, token=HF_TOKEN, device=device)
184
 
185
+ pca_state = load_pca_state_from_hf(token=HF_TOKEN, device=device)
186
 
187
  # Load silentcipher model if enabled
188
  if USE_SILENTCIPHER:
 
196
  print("Continuing without watermarking...")
197
 
198
  # print("Models loaded successfully!")
 
 
 
 
199
 
200
  def compile_model(should_compile):
201
  """Compile the model for faster inference."""
 
230
 
231
  try:
232
  # Load models first if not already loaded (needed for compilation)
 
233
  load_models()
234
 
235
  # print("Compiling model... This will take 1-3 minutes on first run.")
 
437
 
438
  # Load speaker audio if provided
439
  if speaker_audio_path is not None:
440
+ speaker_audio = load_audio(speaker_audio_path)
441
+ # Move to the same device as the model
442
+ speaker_audio = speaker_audio.to(active_model.device)
443
  else:
444
  speaker_audio = None
445
 
 
638
 
639
  # Load speaker audio if provided
640
  if speaker_audio_path is not None:
641
+ speaker_audio = load_audio(speaker_audio_path)
642
+ # Move to the same device as the model
643
+ speaker_audio = speaker_audio.to(active_model.device)
644
  else:
645
  speaker_audio = None
646
 
 
2866
 
2867
  # Enable queue for better handling of concurrent requests on HF Spaces
2868
  demo.queue(max_size=20)
2869
+ demo.launch(allowed_paths=allowed_paths)