model:
  base_learning_rate: 1.0e-04
  target: ldm.models.diffusion.ddpm.LatentUpscaleDiffusion
  params:
    low_scale_key: "LR_image" # TODO: adapt
    linear_start: 0.001
    linear_end: 0.015
    num_timesteps_cond: 1
    log_every_t: 200
    timesteps: 1000
    first_stage_key: "image"
    #first_stage_key: "jpg"  # TODO: use this later
    cond_stage_key: "caption"
    #cond_stage_key: "txt" # TODO: use this later
    image_size: 64
    channels: 16
    cond_stage_trainable: false
    conditioning_key: "hybrid-adm"
    monitor: val/loss_simple_ema
    scale_factor: 0.22765929   # magic number

    low_scale_config:
      target: ldm.modules.encoders.modules.LowScaleEncoder
      params:
        scale_factor: 0.18215
        linear_start: 0.00085
        linear_end: 0.0120
        timesteps: 1000
        max_noise_level: 100
        output_size: 64
        model_config:
          target: ldm.models.autoencoder.AutoencoderKL
          params:
            embed_dim: 4
            monitor: val/rec_loss
            ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
            ddconfig:
              double_z: true
              z_channels: 4
              resolution: 256
              in_channels: 3
              out_ch: 3
              ch: 128
              ch_mult:
                - 1
                - 2
                - 4
                - 4
              num_res_blocks: 2
              attn_resolutions: [ ]
              dropout: 0.0
            lossconfig:
              target: torch.nn.Identity

    scheduler_config: # 10000 warmup steps
      target: ldm.lr_scheduler.LambdaLinearScheduler
      params:
        warm_up_steps: [ 10000 ]
        cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
        f_start: [ 1.e-6 ]
        f_max: [ 1. ]
        f_min: [ 1. ]

    unet_config:
      target: ldm.modules.diffusionmodules.openaimodel.UNetModel
      params:
        num_classes: 1000  #  timesteps for noise conditoining
        image_size: 64    # not really needed
        in_channels: 20
        out_channels: 16
        model_channels: 32 # TODO: more
        attention_resolutions: [ 4, 2, 1 ]
        num_res_blocks: 2
        channel_mult: [ 1, 2, 4, 4 ]
        num_heads: 8
        use_spatial_transformer: True
        transformer_depth: 1
        context_dim: 768
        use_checkpoint: True
        legacy: False

    first_stage_config:
      target: ldm.models.autoencoder.AutoencoderKL
      params:
        embed_dim: 16
        monitor: val/rec_loss
        ckpt_path: "models/first_stage_models/kl-f16/model.ckpt"
        ddconfig:
          double_z: True
          z_channels: 16
          resolution: 256
          in_channels: 3
          out_ch: 3
          ch: 128
          ch_mult: [ 1,1,2,2,4 ]  # num_down = len(ch_mult)-1
          num_res_blocks: 2
          attn_resolutions: [ 16 ]
          dropout: 0.0
        lossconfig:
          target: torch.nn.Identity

    cond_stage_config:
      target: ldm.modules.encoders.modules.FrozenCLIPEmbedder


#data:
#  target: ldm.data.laion.WebDataModuleFromConfig
#  params:
#    tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
#    batch_size: 4
#    num_workers: 4
#    multinode: True
#    min_size: 256   # TODO: experiment. Note: for 2B, images are stored at max 384 resolution
#    train:
#      shards: '{000000..231317}.tar -'
#      shuffle: 10000
#      image_key: jpg
#      image_transforms:
#      - target: torchvision.transforms.Resize
#        params:
#          size: 1024
#          interpolation: 3
#      - target: torchvision.transforms.RandomCrop
#        params:
#          size: 1024
#
#    # NOTE use enough shards to avoid empty validation loops in workers
#    validation:
#      shards: '{231318..231349}.tar -'
#      shuffle: 0
#      image_key: jpg
#      image_transforms:
#      - target: torchvision.transforms.Resize
#        params:
#          size: 1024
#          interpolation: 3
#      - target: torchvision.transforms.CenterCrop
#        params:
#          size: 1024

data:
  target: main.DataModuleFromConfig
  params:
    batch_size: 8
    num_workers: 7
    wrap: false
    train:
      target: ldm.data.imagenet.ImageNetSRTrain
      params:
        size: 1024
        downscale_f: 4
        degradation: "cv_nearest"

lightning:
  callbacks:
    image_logger:
      target: main.ImageLogger
      params:
        batch_frequency: 10
        max_images: 4
        increase_log_steps: False
        log_first_step: False
        log_images_kwargs:
          sample: False
          use_ema_scope: False
          inpaint: False
          plot_progressive_rows: False
          plot_diffusion_rows: False
          N: 4
          #unconditional_guidance_scale: 3.0
          #unconditional_guidance_label: [""]

  trainer:
    benchmark: True
    # val_check_interval: 5000000  # really sorry # TODO: bring back in
    num_sanity_val_steps: 0
    accumulate_grad_batches: 1