Merge remote-tracking branch 'origin/main'

This commit is contained in:
Patrick Esser 2022-07-28 22:46:22 +00:00
commit 9fd981d790
10 changed files with 537 additions and 18 deletions

View file

@ -0,0 +1,149 @@
model:
base_learning_rate: 7.5e-05
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false # Note: different from the one we trained before
conditioning_key: hybrid # important
monitor: val/loss_simple_ema
scale_factor: 0.18215
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/checkpoints/v1pp/v1pp-flatlined-hr.ckpt"
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
f_start: [ 1.e-6 ]
f_max: [ 1. ]
f_min: [ 1. ]
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: ldm.data.laion.WebDataModuleFromConfig
params:
tar_base: "__improvedaesthetic__"
batch_size: 2
num_workers: 4
multinode: True
min_size: 512
max_pwatermark: 0.8
train:
shards: '{00000..17279}.tar -'
shuffle: 10000
image_key: jpg
image_transforms:
- target: torchvision.transforms.Resize
params:
size: 512
interpolation: 3
- target: torchvision.transforms.RandomCrop
params:
size: 512
postprocess:
target: ldm.data.laion.AddMask
params:
mode: "512train-large"
# NOTE use enough shards to avoid empty validation loops in workers
validation:
shards: '{17280..17535}.tar -'
shuffle: 0
image_key: jpg
image_transforms:
- target: torchvision.transforms.Resize
params:
size: 512
interpolation: 3
- target: torchvision.transforms.CenterCrop
params:
size: 512
postprocess:
target: ldm.data.laion.AddMask
params:
mode: "512train-large"
lightning:
find_unused_parameters: False
modelcheckpoint:
params:
every_n_train_steps: 2000
callbacks:
image_logger:
target: main.ImageLogger
params:
disabled: False
batch_frequency: 1000
max_images: 4
increase_log_steps: False
log_first_step: False
log_images_kwargs:
use_ema_scope: False
inpaint: False
plot_progressive_rows: False
plot_diffusion_rows: False
N: 4
unconditional_guidance_scale: 3.0
unconditional_guidance_label: [""]
ddim_steps: 100 # todo check these out for inpainting,
ddim_eta: 1.0 # todo check these out for inpainting,
trainer:
benchmark: True
val_check_interval: 5000000 # really sorry
num_sanity_val_steps: 0
accumulate_grad_batches: 2

View file

@ -0,0 +1,214 @@
model:
base_learning_rate: 5.0e-05
target: ldm.models.diffusion.ddpm.LatentUpscaleDiffusion
params:
low_scale_key: "lr"
linear_start: 0.001
linear_end: 0.015
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 32
channels: 16
cond_stage_trainable: false
conditioning_key: "hybrid-adm"
monitor: val/loss_simple_ema
scale_factor: 0.22765929 # magic number
low_scale_config:
target: ldm.modules.encoders.modules.LowScaleEncoder
params:
scale_factor: 0.18215
linear_start: 0.00085
linear_end: 0.0120
timesteps: 1000
max_noise_level: 250
output_size: null
model_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/models/first_stage_models/kl-f8/model.ckpt"
ddconfig:
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: [ ]
dropout: 0.0
lossconfig:
target: torch.nn.Identity
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 10000 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
f_start: [ 1.e-6 ]
f_max: [ 1. ]
f_min: [ 1. ]
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
num_classes: 251 # timesteps for noise conditoining
image_size: 64 # not really needed
in_channels: 20
out_channels: 16
model_channels: 128
attention_resolutions: [ 8, 4, 2 ] # -> at 32, 16, 8
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 6, 8 ]
# -> res, ds: (64, 1), (32, 2), (16, 4), (6, 8), (4, 16)
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 16
monitor: val/rec_loss
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/models/first_stage_models/kl-f16/model.ckpt"
ddconfig:
double_z: True
z_channels: 16
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
num_res_blocks: 2
attn_resolutions: [ 16 ]
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
#data: # TODO: finetune here later
# target: ldm.data.laion.WebDataModuleFromConfig
# params:
# tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
# batch_size: 10
# num_workers: 4
# train:
# shards: '{00000..17279}.tar -'
# shuffle: 10000
# image_key: jpg
# image_transforms:
# - target: torchvision.transforms.Resize
# params:
# size: 1024
# interpolation: 3
# - target: torchvision.transforms.RandomCrop
# params:
# size: 1024
# postprocess:
# target: ldm.data.laion.AddLR
# params:
# factor: 2
#
# # NOTE use enough shards to avoid empty validation loops in workers
# validation:
# shards: '{17280..17535}.tar -'
# shuffle: 0
# image_key: jpg
# image_transforms:
# - target: torchvision.transforms.Resize
# params:
# size: 1024
# interpolation: 3
# - target: torchvision.transforms.CenterCrop
# params:
# size: 1024
# postprocess:
# target: ldm.data.laion.AddLR
# params:
# factor: 2
data:
target: ldm.data.laion.WebDataModuleFromConfig
params:
tar_base: "__improvedaesthetic__"
batch_size: 28
num_workers: 4
multinode: True
min_size: 512
train:
shards: '{00000..17279}.tar -'
shuffle: 10000
image_key: jpg
image_transforms:
- target: torchvision.transforms.Resize
params:
size: 512
interpolation: 3
- target: torchvision.transforms.RandomCrop
params:
size: 512
postprocess:
target: ldm.data.laion.AddLR
params:
factor: 2
# NOTE use enough shards to avoid empty validation loops in workers
validation:
shards: '{17280..17535}.tar -'
shuffle: 0
image_key: jpg
image_transforms:
- target: torchvision.transforms.Resize
params:
size: 512
interpolation: 3
- target: torchvision.transforms.CenterCrop
params:
size: 512
postprocess:
target: ldm.data.laion.AddLR
params:
factor: 2
lightning:
find_unused_parameters: False
callbacks:
image_logger:
target: main.ImageLogger
params:
batch_frequency: 1000
max_images: 4
increase_log_steps: False
log_first_step: False
log_images_kwargs:
use_ema_scope: False
inpaint: False
plot_progressive_rows: False
plot_diffusion_rows: False
N: 4
unconditional_guidance_scale: 3.0
unconditional_guidance_label: [""]
trainer:
benchmark: True
val_check_interval: 5000000 # really sorry
num_sanity_val_steps: 0
accumulate_grad_batches: 2

View file

@ -38,6 +38,18 @@ settings = {
"max_s_box": 300,
"marg": 10,
},
"512train-large": { # TODO: experimental
"p_irr": 0.5,
"min_n_irr": 1,
"max_n_irr": 5,
"max_l_irr": 450,
"max_w_irr": 400,
"min_n_box": 1,
"max_n_box": 4,
"min_s_box": 75,
"max_s_box": 450,
"marg": 10,
},
}
@ -128,14 +140,18 @@ def gen_large_mask(prng, img_h, img_w,
return mask
make_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w,
**settings["256train"])
make_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w, **settings["256train"])
make_narrow_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w, **settings["256narrow"])
make_512_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w, **settings["512train"])
make_512_lama_mask_large = lambda prng, h, w: gen_large_mask(prng, h, w, **settings["512train-large"])
make_narrow_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w,
**settings["256narrow"])
make_512_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w,
**settings["512train"])
MASK_MODES = {
"256train": make_lama_mask,
"256narrow": make_narrow_lama_mask,
"512train": make_512_lama_mask,
"512train-large": make_512_lama_mask_large
}
if __name__ == "__main__":
import sys

View file

@ -16,7 +16,7 @@ from webdataset.handlers import warn_and_continue
from ldm.util import instantiate_from_config
from ldm.data.inpainting.synthetic_mask import gen_large_mask, make_lama_mask, make_narrow_lama_mask, make_512_lama_mask
from ldm.data.inpainting.synthetic_mask import gen_large_mask, MASK_MODES
from ldm.data.base import PRNGMixin
@ -232,9 +232,10 @@ class AddLR(object):
class AddMask(PRNGMixin):
def __init__(self, size=512):
def __init__(self, mode="512train"):
super().__init__()
self.make_mask = make_512_lama_mask if size == 512 else make_lama_mask
assert mode in list(MASK_MODES.keys()), f'unknown mask generation mode "{mode}"'
self.make_mask = MASK_MODES[mode]
def __call__(self, sample):
# sample['jpg'] is tensor hwc in [-1, 1] at this point

View file

@ -1487,9 +1487,10 @@ class LatentUpscaleDiffusion(LatentDiffusion):
x_low = x_low.to(memory_format=torch.contiguous_format).float()
zx, noise_level = self.low_scale_model(x_low)
all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
#import pudb; pu.db
if log_mode:
# TODO: maybe disable if too expensive
interpretability = True
interpretability = False
if interpretability:
zx = zx[:, :, ::2, ::2]
x_low_rec = self.low_scale_model.decode(zx)
@ -1567,13 +1568,13 @@ class LatentUpscaleDiffusion(LatentDiffusion):
if k == "c_crossattn":
assert isinstance(c[k], list) and len(c[k]) == 1
uc[k] = [uc_tmp]
elif k == "c_adm":
elif k == "c_adm": # todo: only run with text-based guidance?
assert isinstance(c[k], torch.Tensor)
uc[k] = torch.ones_like(c[k]) * (self.low_scale_model.max_noise_level-1)
uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
elif isinstance(c[k], list):
uc[k] = [torch.zeros_like(c[k][i]) for i in range(len(c[k]))]
uc[k] = [c[k][i] for i in range(len(c[k]))]
else:
uc[k] = torch.zeros_like(c[k])
uc[k] = c[k]
with ema_scope("Sampling with classifier-free guidance"):
samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
@ -1642,8 +1643,7 @@ class LatentInpaintDiffusion(LatentDiffusion):
new_entry[:, :self.keep_dims, ...] = sd[k]
sd[k] = new_entry
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False)
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0:
print(f"Missing Keys: {missing}")

View file

@ -255,8 +255,9 @@ class LowScaleEncoder(nn.Module):
z = z * self.scale_factor
noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
z = self.q_sample(z, noise_level)
#z = torch.nn.functional.interpolate(z, size=self.out_size, mode="nearest") # TODO: experiment with mode
z = z.repeat_interleave(2, -2).repeat_interleave(2, -1)
if self.out_size is not None:
z = torch.nn.functional.interpolate(z, size=self.out_size, mode="nearest") # TODO: experiment with mode
# z = z.repeat_interleave(2, -2).repeat_interleave(2, -1)
return z, noise_level
def decode(self, z):

View file

@ -0,0 +1,27 @@
#!/bin/bash
# mpi version for node rank
H=`hostname`
THEID=`echo -e $HOSTNAMES | python3 -c "import sys;[sys.stdout.write(str(i)) for i,line in enumerate(next(sys.stdin).split(' ')) if line.strip() == '$H'.strip()]"`
export NODE_RANK=${THEID}
echo THEID=$THEID
echo "##########################################"
echo MASTER_ADDR=${MASTER_ADDR}
echo MASTER_PORT=${MASTER_PORT}
echo NODE_RANK=${NODE_RANK}
echo WORLD_SIZE=${WORLD_SIZE}
echo "##########################################"
# debug environment worked great so we stick with it
# no magic there, just a miniconda python=3.9, pytorch=1.12, cudatoolkit=11.3
# env with pip dependencies from stable diffusion's requirements.txt
eval "$(/fsx/stable-diffusion/debug/miniconda3/bin/conda shell.bash hook)"
conda activate torch111
cd /fsx/robin/stable-diffusion/stable-diffusion
CONFIG="/fsx/robin/stable-diffusion/stable-diffusion/configs/stable-diffusion/upscaling/upscale-v1-with-f16.yaml"
# debugging
#EXTRA="${EXTRA} -d True lightning.callbacks.image_logger.params.batch_frequency=50"
python main.py --base $CONFIG --gpus 0,1,2,3,4,5,6,7 -t --num_nodes ${WORLD_SIZE} --scale_lr False #$EXTRA

View file

@ -0,0 +1,42 @@
#!/bin/bash
#SBATCH --partition=compute-od-gpu
#SBATCH --job-name=stable-diffusion-v1-upscaling-f16-pretraining-512-aesthetics
#SBATCH --nodes 8
#SBATCH --ntasks-per-node 1
#SBATCH --cpus-per-gpu=4
#SBATCH --gres=gpu:8
#SBATCH --exclusive
#SBATCH --output=%x_%j.out
#SBATCH --comment "Key=Monitoring,Value=ON"
module load intelmpi
source /opt/intel/mpi/latest/env/vars.sh
export LD_LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib64:/usr/local/cuda-11.0/efa/lib:/usr/local/cuda-11.0/lib:/usr/local/cuda-11.0/lib64:/usr/local/cuda-11.0:/opt/nccl/build/lib:/opt/aws-ofi-nccl-inst
all/lib:/opt/aws-ofi-nccl/lib:$LD_LIBRARY_PATH
export NCCL_PROTO=simple
export PATH=/opt/amazon/efa/bin:$PATH
export LD_PRELOAD="/opt/nccl/build/lib/libnccl.so"
export FI_EFA_FORK_SAFE=1
export FI_LOG_LEVEL=1
export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn
export NCCL_DEBUG=info
export PYTHONFAULTHANDLER=1
export CUDA_LAUNCH_BLOCKING=0
export OMPI_MCA_mtl_base_verbose=1
export FI_EFA_ENABLE_SHM_TRANSFER=0
export FI_PROVIDER=efa
export FI_EFA_TX_MIN_CREDITS=64
export NCCL_TREE_THRESHOLD=0
# sent to sub script
export HOSTNAMES=`scontrol show hostnames "$SLURM_JOB_NODELIST"`
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
export MASTER_PORT=12802
export COUNT_NODE=`scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l`
export WORLD_SIZE=$COUNT_NODE
echo go $COUNT_NODE
echo $HOSTNAMES
echo $WORLD_SIZE
mpirun -n $COUNT_NODE -perhost 1 /fsx/robin/stable-diffusion/stable-diffusion/scripts/slurm/v1-upscaling-f16-pretraining-512-aesthetics/launcher.sh

View file

@ -0,0 +1,27 @@
#!/bin/bash
# mpi version for node rank
H=`hostname`
THEID=`echo -e $HOSTNAMES | python3 -c "import sys;[sys.stdout.write(str(i)) for i,line in enumerate(next(sys.stdin).split(' ')) if line.strip() == '$H'.strip()]"`
export NODE_RANK=${THEID}
echo THEID=$THEID
echo "##########################################"
echo MASTER_ADDR=${MASTER_ADDR}
echo MASTER_PORT=${MASTER_PORT}
echo NODE_RANK=${NODE_RANK}
echo WORLD_SIZE=${WORLD_SIZE}
echo "##########################################"
# debug environment worked great so we stick with it
# no magic there, just a miniconda python=3.9, pytorch=1.12, cudatoolkit=11.3
# env with pip dependencies from stable diffusion's requirements.txt
eval "$(/fsx/stable-diffusion/debug/miniconda3/bin/conda shell.bash hook)"
conda activate torch111
cd /fsx/robin/stable-diffusion/stable-diffusion
CONFIG="/fsx/robin/stable-diffusion/stable-diffusion/configs/stable-diffusion/inpainting/v1-finetune-for-inpainting-laion-aesthetic-larger-masks.yaml"
# debugging
#EXTRA="${EXTRA} -d True lightning.callbacks.image_logger.params.batch_frequency=50"
python main.py --base $CONFIG --gpus 0,1,2,3,4,5,6,7 -t --num_nodes ${WORLD_SIZE} --scale_lr False

View file

@ -0,0 +1,42 @@
#!/bin/bash
#SBATCH --partition=compute-od-gpu
#SBATCH --job-name=stable-diffusion-v1-v1_inpainting_aesthetics-larger-masks
#SBATCH --nodes 24
#SBATCH --ntasks-per-node 1
#SBATCH --cpus-per-gpu=4
#SBATCH --gres=gpu:8
#SBATCH --exclusive
#SBATCH --output=%x_%j.out
#SBATCH --comment "Key=Monitoring,Value=ON"
module load intelmpi
source /opt/intel/mpi/latest/env/vars.sh
export LD_LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib64:/usr/local/cuda-11.0/efa/lib:/usr/local/cuda-11.0/lib:/usr/local/cuda-11.0/lib64:/usr/local/cuda-11.0:/opt/nccl/build/lib:/opt/aws-ofi-nccl-inst
all/lib:/opt/aws-ofi-nccl/lib:$LD_LIBRARY_PATH
export NCCL_PROTO=simple
export PATH=/opt/amazon/efa/bin:$PATH
export LD_PRELOAD="/opt/nccl/build/lib/libnccl.so"
export FI_EFA_FORK_SAFE=1
export FI_LOG_LEVEL=1
export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn
export NCCL_DEBUG=info
export PYTHONFAULTHANDLER=1
export CUDA_LAUNCH_BLOCKING=0
export OMPI_MCA_mtl_base_verbose=1
export FI_EFA_ENABLE_SHM_TRANSFER=0
export FI_PROVIDER=efa
export FI_EFA_TX_MIN_CREDITS=64
export NCCL_TREE_THRESHOLD=0
# sent to sub script
export HOSTNAMES=`scontrol show hostnames "$SLURM_JOB_NODELIST"`
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
export MASTER_PORT=12802
export COUNT_NODE=`scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l`
export WORLD_SIZE=$COUNT_NODE
echo go $COUNT_NODE
echo $HOSTNAMES
echo $WORLD_SIZE
mpirun -n $COUNT_NODE -perhost 1 /fsx/robin/stable-diffusion/stable-diffusion/scripts/slurm/v1_inpainting_aesthetics-larger-masks/launcher.sh