|
|
accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ |
|
|
--dataset_base_path data/example_video_dataset \ |
|
|
--dataset_metadata_path data/example_video_dataset/metadata.csv \ |
|
|
--height 480 \ |
|
|
--width 832 \ |
|
|
--num_frames 49 \ |
|
|
--dataset_repeat 100 \ |
|
|
--model_id_with_origin_paths "Wan-AI/Wan2.2-I2V-A14B:high_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-I2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-I2V-A14B:Wan2.1_VAE.pth" \ |
|
|
--learning_rate 1e-5 \ |
|
|
--num_epochs 2 \ |
|
|
--remove_prefix_in_ckpt "pipe.dit." \ |
|
|
--output_path "./models/train/Wan2.2-I2V-A14B_high_noise_full" \ |
|
|
--trainable_models "dit" \ |
|
|
--extra_inputs "input_image" \ |
|
|
--use_gradient_checkpointing_offload \ |
|
|
--max_timestep_boundary 0.358 \ |
|
|
--min_timestep_boundary 0 |
|
|
|
|
|
|
|
|
accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \ |
|
|
--dataset_base_path data/example_video_dataset \ |
|
|
--dataset_metadata_path data/example_video_dataset/metadata.csv \ |
|
|
--height 480 \ |
|
|
--width 832 \ |
|
|
--num_frames 49 \ |
|
|
--dataset_repeat 100 \ |
|
|
--model_id_with_origin_paths "Wan-AI/Wan2.2-I2V-A14B:low_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-I2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-I2V-A14B:Wan2.1_VAE.pth" \ |
|
|
--learning_rate 1e-5 \ |
|
|
--num_epochs 2 \ |
|
|
--remove_prefix_in_ckpt "pipe.dit." \ |
|
|
--output_path "./models/train/Wan2.2-I2V-A14B_low_noise_full" \ |
|
|
--trainable_models "dit" \ |
|
|
--extra_inputs "input_image" \ |
|
|
--use_gradient_checkpointing_offload \ |
|
|
--max_timestep_boundary 1 \ |
|
|
--min_timestep_boundary 0.358 |
|
|
|