From 58f2ba8a4451c20a1b68f557cc32686207ee8837 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 8 Jun 2022 21:00:53 +0000 Subject: [PATCH 1/2] 512 config --- ...B-multinode-clip-encoder-high-res-512.yaml | 131 ++++++++++++++++++ ldm/data/laion.py | 17 ++- 2 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml diff --git a/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml b/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml new file mode 100644 index 0000000..0f03920 --- /dev/null +++ b/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml @@ -0,0 +1,131 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 32 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ckpt_path: "models/first_stage_models/kl-f8/model.ckpt" + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + + +data: + target: ldm.data.laion.WebDataModuleFromConfig + params: + tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/" + batch_size: 50 + num_workers: 4 + multinode: True + train: + shards: '{00000..17279}.tar -' + shuffle: 10000 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 512 + interpolation: 3 + - target: torchvision.transforms.RandomCrop + params: + size: 512 + + # NOTE use enough shards to avoid empty validation loops in workers + validation: + shards: '{17280..17535}.tar -' + shuffle: 0 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 512 + interpolation: 3 + - target: torchvision.transforms.CenterCrop + params: + size: 512 + + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 5000 + max_images: 4 + increase_log_steps: False + log_first_step: False + log_images_kwargs: + use_ema_scope: False + inpaint: False + plot_progressive_rows: False + plot_diffusion_rows: False + N: 4 + unconditional_guidance_scale: 3.0 + unconditional_guidance_label: [""] + + trainer: + #replace_sampler_ddp: False + benchmark: True + val_check_interval: 5000000 # really sorry + num_sanity_val_steps: 0 + accumulate_grad_batches: 2 diff --git a/ldm/data/laion.py b/ldm/data/laion.py index d80434b..b63c5b8 100644 --- a/ldm/data/laion.py +++ b/ldm/data/laion.py @@ -253,4 +253,19 @@ def example01(): if __name__ == "__main__": - example01() + #example01() + from omegaconf import OmegaConf + from torch.utils.data.distributed import DistributedSampler + from torch.utils.data import IterableDataset + from torch.utils.data import DataLoader, RandomSampler, Sampler, SequentialSampler + from pytorch_lightning.trainer.supporters import CombinedLoader, CycleIterator + + + config = OmegaConf.load("configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml") + datamod = WebDataModuleFromConfig(**config["data"]["params"]) + dataloader = datamod.train_dataloader() + + for batch in dataloader: + print(batch.keys()) + print(batch["jpg"].shape) + break From 676e6acfef4bfcc47797ce5da6bd4cab00ac41c9 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 8 Jun 2022 21:15:19 +0000 Subject: [PATCH 2/2] up --- .../txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml b/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml index 0f03920..f97af3e 100644 --- a/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml +++ b/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml @@ -9,7 +9,7 @@ model: timesteps: 1000 first_stage_key: "jpg" cond_stage_key: "txt" - image_size: 32 + image_size: 64 channels: 4 cond_stage_trainable: false # Note: different from the one we trained before conditioning_key: crossattn @@ -28,7 +28,7 @@ model: unet_config: target: ldm.modules.diffusionmodules.openaimodel.UNetModel params: - image_size: 32 + image_size: 32 # unused in_channels: 4 out_channels: 4 model_channels: 320