v2 on laionhr 1024

This commit is contained in:
Patrick Esser 2022-07-14 23:36:08 +00:00 committed by pesser
parent 55bf957260
commit 1754106b19
3 changed files with 207 additions and 0 deletions

View file

@ -0,0 +1,132 @@
model:
base_learning_rate: 1.0e-04
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.001
linear_end: 0.015
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 16
cond_stage_trainable: false # Note: different from the one we trained before
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.22765929 # magic number
# NOTE disabled for resuming
#scheduler_config: # 10000 warmup steps
# target: ldm.lr_scheduler.LambdaLinearScheduler
# params:
# warm_up_steps: [ 10000 ]
# cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
# f_start: [ 1.e-6 ]
# f_max: [ 1. ]
# f_min: [ 1. ]
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 64 # not really needed
in_channels: 16
out_channels: 16
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 16
monitor: val/rec_loss
ddconfig:
double_z: True
z_channels: 16
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
num_res_blocks: 2
attn_resolutions: [ 16 ]
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: ldm.data.laion.WebDataModuleFromConfig
params:
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
batch_size: 3
num_workers: 4
multinode: True
train:
shards: '{00000..17279}.tar -'
shuffle: 10000
image_key: jpg
image_transforms:
- target: torchvision.transforms.Resize
params:
size: 1024
interpolation: 3
- target: torchvision.transforms.RandomCrop
params:
size: 1024
# NOTE use enough shards to avoid empty validation loops in workers
validation:
shards: '{17280..17535}.tar -'
shuffle: 0
image_key: jpg
image_transforms:
- target: torchvision.transforms.Resize
params:
size: 1024
interpolation: 3
- target: torchvision.transforms.CenterCrop
params:
size: 1024
lightning:
find_unused_parameters: False
modelcheckpoint:
params:
every_n_train_steps: 2000
callbacks:
image_logger:
target: main.ImageLogger
params:
batch_frequency: 2000
max_images: 2
increase_log_steps: False
log_first_step: False
log_images_kwargs:
use_ema_scope: False
inpaint: False
plot_progressive_rows: False
plot_diffusion_rows: False
N: 2
unconditional_guidance_scale: 5.0
unconditional_guidance_label: [""]
trainer:
benchmark: True
val_check_interval: 5000000
num_sanity_val_steps: 0
accumulate_grad_batches: 4

View file

@ -0,0 +1,33 @@
#!/bin/bash
# mpi version for node rank
H=`hostname`
THEID=`echo -e $HOSTNAMES | python3 -c "import sys;[sys.stdout.write(str(i)) for i,line in enumerate(next(sys.stdin).split(' ')) if line.strip() == '$H'.strip()]"`
export NODE_RANK=${THEID}
echo THEID=$THEID
echo "##########################################"
echo MASTER_ADDR=${MASTER_ADDR}
echo MASTER_PORT=${MASTER_PORT}
echo NODE_RANK=${NODE_RANK}
echo WORLD_SIZE=${WORLD_SIZE}
echo "##########################################"
# debug environment worked great so we stick with it
# no magic there, just a miniconda python=3.9, pytorch=1.12, cudatoolkit=11.3
# env with pip dependencies from stable diffusion's requirements.txt
eval "$(/fsx/stable-diffusion/debug/miniconda3/bin/conda shell.bash hook)"
conda activate stable
cd /fsx/stable-diffusion/stable-diffusion
CONFIG="/fsx/stable-diffusion/stable-diffusion/configs/stable-diffusion/v2_laionhr1024.yaml"
# resume and set new seed to reshuffle data
EXTRA="--seed 714 model.params.ckpt_path=/fsx/stable-diffusion/stable-diffusion/logs/2022-07-12T00-50-44_txt2img-multinode-clip-encoder-f16-1024-laion-hr/checkpoints/last.ckpt"
# custom logdir
#EXTRA="${EXTRA} --logdir rlogs"
# debugging
#EXTRA="${EXTRA} -d True lightning.callbacks.image_logger.params.batch_frequency=50"
python main.py --base $CONFIG --gpus 0,1,2,3,4,5,6,7 -t --num_nodes ${WORLD_SIZE} --scale_lr False $EXTRA

View file

@ -0,0 +1,42 @@
#!/bin/bash
#SBATCH --partition=compute-od-gpu
#SBATCH --job-name=stable-diffusion-v2-laionhr1024
#SBATCH --nodes 20
#SBATCH --ntasks-per-node 1
#SBATCH --cpus-per-gpu=4
#SBATCH --gres=gpu:8
#SBATCH --exclusive
#SBATCH --output=%x_%j.out
#SBATCH --comment "Key=Monitoring,Value=ON"
module load intelmpi
source /opt/intel/mpi/latest/env/vars.sh
export LD_LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib64:/usr/local/cuda-11.0/efa/lib:/usr/local/cuda-11.0/lib:/usr/local/cuda-11.0/lib64:/usr/local/cuda-11.0:/opt/nccl/build/lib:/opt/aws-ofi-nccl-inst
all/lib:/opt/aws-ofi-nccl/lib:$LD_LIBRARY_PATH
export NCCL_PROTO=simple
export PATH=/opt/amazon/efa/bin:$PATH
export LD_PRELOAD="/opt/nccl/build/lib/libnccl.so"
export FI_EFA_FORK_SAFE=1
export FI_LOG_LEVEL=1
export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn
export NCCL_DEBUG=info
export PYTHONFAULTHANDLER=1
export CUDA_LAUNCH_BLOCKING=0
export OMPI_MCA_mtl_base_verbose=1
export FI_EFA_ENABLE_SHM_TRANSFER=0
export FI_PROVIDER=efa
export FI_EFA_TX_MIN_CREDITS=64
export NCCL_TREE_THRESHOLD=0
# sent to sub script
export HOSTNAMES=`scontrol show hostnames "$SLURM_JOB_NODELIST"`
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
export MASTER_PORT=12802
export COUNT_NODE=`scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l`
export WORLD_SIZE=$COUNT_NODE
echo go $COUNT_NODE
echo $HOSTNAMES
echo $WORLD_SIZE
mpirun -n $COUNT_NODE -perhost 1 /fsx/stable-diffusion/stable-diffusion/scripts/slurm/v2_laionhr1024/launcher.sh