model: base_learning_rate: 2.0e-06 target: ldm.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.0015 linear_end: 0.0205 log_every_t: 100 timesteps: 1000 loss_type: l1 first_stage_key: image cond_stage_key: coordinates_bbox image_size: 64 channels: 3 conditioning_key: crossattn cond_stage_trainable: true unet_config: target: ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 64 in_channels: 3 out_channels: 3 model_channels: 128 attention_resolutions: - 8 - 4 - 2 num_res_blocks: 2 channel_mult: - 1 - 2 - 3 - 4 num_head_channels: 32 use_spatial_transformer: true transformer_depth: 3 context_dim: 512 first_stage_config: target: ldm.models.autoencoder.VQModelInterface params: embed_dim: 3 n_embed: 8192 monitor: val/rec_loss ddconfig: double_z: false z_channels: 3 resolution: 256 in_channels: 3 out_ch: 3 ch: 128 ch_mult: - 1 - 2 - 4 num_res_blocks: 2 attn_resolutions: [] dropout: 0.0 lossconfig: target: torch.nn.Identity cond_stage_config: target: ldm.modules.encoders.modules.BERTEmbedder params: n_embed: 512 n_layer: 16 vocab_size: 8192 max_seq_len: 92 use_tokenizer: false monitor: val/loss_simple_ema data: target: main.DataModuleFromConfig params: batch_size: 24 wrap: false num_workers: 10 train: target: ldm.data.openimages.OpenImagesBBoxTrain params: size: 256 validation: target: ldm.data.openimages.OpenImagesBBoxValidation params: size: 256