diff --git a/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512-inference.yaml b/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512-inference.yaml new file mode 100644 index 0000000..aa9e5e7 --- /dev/null +++ b/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512-inference.yaml @@ -0,0 +1,69 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/configs/stable-diffusion/txt2img-upscale-clip-encoder-f16-1024.yaml b/configs/stable-diffusion/txt2img-upscale-clip-encoder-f16-1024.yaml index 325abc2..053238d 100644 --- a/configs/stable-diffusion/txt2img-upscale-clip-encoder-f16-1024.yaml +++ b/configs/stable-diffusion/txt2img-upscale-clip-encoder-f16-1024.yaml @@ -1,5 +1,5 @@ model: - base_learning_rate: 1.0e-04 + base_learning_rate: 5.0e-05 target: ldm.models.diffusion.ddpm.LatentUpscaleDiffusion params: low_scale_key: "lr" @@ -66,10 +66,11 @@ model: image_size: 64 # not really needed in_channels: 20 out_channels: 16 - model_channels: 192 - attention_resolutions: [ 4, 2, 1 ] + model_channels: 96 + attention_resolutions: [ 8, 4, 2 ] # -> at 32, 16, 8 num_res_blocks: 2 - channel_mult: [ 1, 2, 4, 4 ] + channel_mult: [ 1, 2, 4, 8, 8 ] + # -> res, ds: (64, 1), (32, 2), (16, 4), (8, 8), (4, 16) num_heads: 8 use_spatial_transformer: True transformer_depth: 1 @@ -105,7 +106,7 @@ data: target: ldm.data.laion.WebDataModuleFromConfig params: tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/" - batch_size: 8 + batch_size: 10 num_workers: 4 train: shards: '{00000..17279}.tar -' @@ -143,6 +144,8 @@ data: factor: 4 lightning: + find_unused_parameters: False + callbacks: image_logger: target: main.ImageLogger diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py index 0c5a2f4..8d53d4c 100644 --- a/ldm/models/diffusion/ddpm.py +++ b/ldm/models/diffusion/ddpm.py @@ -1556,7 +1556,7 @@ class LatentUpscaleDiffusion(LatentDiffusion): uc[k] = [uc_tmp] elif k == "c_adm": assert isinstance(c[k], torch.Tensor) - uc[k] = torch.ones_like(c[k]) * (self.low_scale_model.max_max_noise_level-1) + uc[k] = torch.ones_like(c[k]) * (self.low_scale_model.max_noise_level-1) elif isinstance(c[k], list): uc[k] = [torch.zeros_like(c[k][i]) for i in range(len(c[k]))] else: diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py index f4eff39..f0f99c4 100644 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -253,9 +253,9 @@ class SpatialTransformer(nn.Module): x_in = x x = self.norm(x) x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c') + x = rearrange(x, 'b c h w -> b (h w) c').contiguous() for block in self.transformer_blocks: x = block(x, context=context) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) + x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() x = self.proj_out(x) - return x + x_in \ No newline at end of file + return x + x_in diff --git a/main.py b/main.py index 10624d6..e8946a5 100644 --- a/main.py +++ b/main.py @@ -759,6 +759,9 @@ if __name__ == "__main__": del callbacks_cfg['ignore_keys_callback'] trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg] + if not lightning_config.get("find_unused_parameters", True): + from pytorch_lightning.plugins import DDPPlugin + trainer_kwargs["plugins"] = DDPPlugin(find_unused_parameters=False) trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs) trainer.logdir = logdir ###