From 30af1b9828c89d479ef49eac97fca8a5c5314bba Mon Sep 17 00:00:00 2001 From: Patrick Esser Date: Sat, 23 Jul 2022 09:45:27 +0000 Subject: [PATCH] misc configs --- ...tinode-clip-encoder-f16-1024-laion-hr.yaml | 133 +++++++++++++++++ ...-clip-encoder-improved_aesthetics-256.yaml | 137 ++++++++++++++++++ ...-clip-encoder-improved_aesthetics-512.yaml | 135 +++++++++++++++++ .../stable-diffusion/v2_laionhr1024_2.yaml | 132 +++++++++++++++++ 4 files changed, 537 insertions(+) create mode 100644 configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-1024-laion-hr.yaml create mode 100644 configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-256.yaml create mode 100644 configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-512.yaml create mode 100644 configs/stable-diffusion/v2_laionhr1024_2.yaml diff --git a/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-1024-laion-hr.yaml b/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-1024-laion-hr.yaml new file mode 100644 index 0000000..a3acd6a --- /dev/null +++ b/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-1024-laion-hr.yaml @@ -0,0 +1,133 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.001 + linear_end: 0.015 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 16 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.22765929 # magic number + + #ckpt_path: "/home/mchorse/stable-diffusion-ckpts/768f16-2022-06-23-pruned.ckpt" + + #scheduler_config: # 10000 warmup steps + # target: ldm.lr_scheduler.LambdaLinearScheduler + # params: + # warm_up_steps: [ 10000 ] + # cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + # f_start: [ 1.e-6 ] + # f_max: [ 1. ] + # f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 # not really needed + in_channels: 16 + out_channels: 16 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 16 + monitor: val/rec_loss + ddconfig: + double_z: True + z_channels: 16 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1 + num_res_blocks: 2 + attn_resolutions: [ 16 ] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + + +data: + target: ldm.data.laion.WebDataModuleFromConfig + params: + tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/" + batch_size: 3 + num_workers: 4 + multinode: True + train: + shards: '{00000..17279}.tar -' + shuffle: 10000 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 1024 + interpolation: 3 + - target: torchvision.transforms.RandomCrop + params: + size: 1024 + + # NOTE use enough shards to avoid empty validation loops in workers + validation: + shards: '{17280..17535}.tar -' + shuffle: 0 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 1024 + interpolation: 3 + - target: torchvision.transforms.CenterCrop + params: + size: 1024 + + +lightning: + find_unused_parameters: False + + modelcheckpoint: + params: + every_n_train_steps: 2000 + + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 2000 + max_images: 2 + increase_log_steps: False + log_first_step: False + log_images_kwargs: + use_ema_scope: False + inpaint: False + plot_progressive_rows: False + plot_diffusion_rows: False + N: 2 + unconditional_guidance_scale: 5.0 + unconditional_guidance_label: [""] + + trainer: + benchmark: True + val_check_interval: 5000000 + num_sanity_val_steps: 0 + accumulate_grad_batches: 4 diff --git a/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-256.yaml b/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-256.yaml new file mode 100644 index 0000000..2aa78d4 --- /dev/null +++ b/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-256.yaml @@ -0,0 +1,137 @@ +model: + base_learning_rate: 8.e-05 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 32 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 416 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: [ 2, 2, 2, 2 ] + channel_mult: [ 1, 2, 4, 4 ] + disable_self_attentions: [ False, False, False, False ] # converts the self-attention to a cross-attention layer if true + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ckpt_path: "/fsx/stable-diffusion/stable-diffusion/models/first_stage_models/kl-f8/model.ckpt" + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + + +data: + target: ldm.data.laion.WebDataModuleFromConfig + params: + tar_base: "__improvedaesthetic__" + batch_size: 8 + num_workers: 4 + multinode: True + train: + shards: '{00000..17279}.tar -' + shuffle: 10000 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 256 + interpolation: 3 + - target: torchvision.transforms.RandomCrop + params: + size: 256 + +# # NOTE use enough shards to avoid empty validation loops in workers + validation: + shards: '{17280..17535}.tar -' + shuffle: 0 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 256 + interpolation: 3 + - target: torchvision.transforms.CenterCrop + params: + size: 256 + + +lightning: + find_unused_parameters: false + modelcheckpoint: + params: + every_n_train_steps: 5000 + callbacks: + image_logger: + target: main.ImageLogger + params: + disabled: True + batch_frequency: 2500 + max_images: 4 + increase_log_steps: False + log_first_step: False + log_images_kwargs: + use_ema_scope: False + inpaint: False + plot_progressive_rows: False + plot_diffusion_rows: False + N: 4 + unconditional_guidance_scale: 3.0 + unconditional_guidance_label: [""] + + trainer: + #replace_sampler_ddp: False + benchmark: True + val_check_interval: 5000000 # really sorry + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 diff --git a/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-512.yaml b/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-512.yaml new file mode 100644 index 0000000..0fe26f5 --- /dev/null +++ b/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-512.yaml @@ -0,0 +1,135 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 32 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 416 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: [ 2, 2, 2, 2 ] + channel_mult: [ 1, 2, 4, 4 ] + disable_self_attentions: [ False, False, False, False ] # converts the self-attention to a cross-attention layer if true + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + + +data: + target: ldm.data.laion.WebDataModuleFromConfig + params: + tar_base: "__improvedaesthetic__" + batch_size: 1 + num_workers: 4 + multinode: True + train: + shards: '{00000..17279}.tar -' + shuffle: 10000 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 512 + interpolation: 3 + - target: torchvision.transforms.RandomCrop + params: + size: 512 + +# # NOTE use enough shards to avoid empty validation loops in workers + validation: + shards: '{17280..17535}.tar -' + shuffle: 0 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 512 + interpolation: 3 + - target: torchvision.transforms.CenterCrop + params: + size: 512 + + +lightning: + find_unused_parameters: false + modelcheckpoint: + params: + every_n_train_steps: 5000 + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 2500 + max_images: 2 + increase_log_steps: False + log_first_step: False + log_images_kwargs: + use_ema_scope: False + inpaint: False + plot_progressive_rows: False + plot_diffusion_rows: False + N: 2 + unconditional_guidance_scale: 3.0 + unconditional_guidance_label: [""] + + trainer: + #replace_sampler_ddp: False + benchmark: True + val_check_interval: 5000000 # really sorry + num_sanity_val_steps: 0 + accumulate_grad_batches: 2 diff --git a/configs/stable-diffusion/v2_laionhr1024_2.yaml b/configs/stable-diffusion/v2_laionhr1024_2.yaml new file mode 100644 index 0000000..f2c721d --- /dev/null +++ b/configs/stable-diffusion/v2_laionhr1024_2.yaml @@ -0,0 +1,132 @@ +model: + base_learning_rate: 7.5e-05 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.001 + linear_end: 0.015 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 16 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.22765929 # magic number + + # NOTE disabled for resuming + #scheduler_config: # 10000 warmup steps + # target: ldm.lr_scheduler.LambdaLinearScheduler + # params: + # warm_up_steps: [ 10000 ] + # cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + # f_start: [ 1.e-6 ] + # f_max: [ 1. ] + # f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 # not really needed + in_channels: 16 + out_channels: 16 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 16 + monitor: val/rec_loss + ddconfig: + double_z: True + z_channels: 16 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1 + num_res_blocks: 2 + attn_resolutions: [ 16 ] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + + +data: + target: ldm.data.laion.WebDataModuleFromConfig + params: + tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/" + batch_size: 3 + num_workers: 4 + multinode: True + train: + shards: '{00000..17279}.tar -' + shuffle: 10000 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 1024 + interpolation: 3 + - target: torchvision.transforms.RandomCrop + params: + size: 1024 + + # NOTE use enough shards to avoid empty validation loops in workers + validation: + shards: '{17280..17535}.tar -' + shuffle: 0 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 1024 + interpolation: 3 + - target: torchvision.transforms.CenterCrop + params: + size: 1024 + + +lightning: + find_unused_parameters: False + + modelcheckpoint: + params: + every_n_train_steps: 2000 + + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 2000 + max_images: 2 + increase_log_steps: False + log_first_step: False + log_images_kwargs: + use_ema_scope: False + inpaint: False + plot_progressive_rows: False + plot_diffusion_rows: False + N: 2 + unconditional_guidance_scale: 5.0 + unconditional_guidance_label: [""] + + trainer: + benchmark: True + val_check_interval: 5000000 + num_sanity_val_steps: 0 + accumulate_grad_batches: 2