name: TAHG engine: TAHG result_dir: ./result max_pairs: 1000000 distributed: model: # broadcast_buffers: False misc: random_seed: 324 checkpoint: epoch_interval: 1 # one checkpoint every 1 epoch n_saved: 2 interval: print_per_iteration: 10 # print once per 10 iteration tensorboard: scalar: 10 image: 500 model: generator: _type: TAHG-Generator style_in_channels: 3 content_in_channels: 23 discriminator: _type: TAHG-Discriminator in_channels: 3 loss: gan: loss_type: lsgan real_label_val: 1.0 fake_label_val: 0.0 weight: 1.0 edge: criterion: 'L1' hed_pretrained_model_path: "/root/network-bsds500.pytorch" weight: 2 perceptual: layer_weights: # "3": 1.0 "0": 1.0 "5": 1.0 "10": 1.0 "19": 1.0 criterion: 'L2' style_loss: True perceptual_loss: False weight: 100.0 recon: level: 1 weight: 2 optimizers: generator: _type: Adam lr: 0.0001 betas: [ 0.5, 0.999 ] weight_decay: 0.0001 discriminator: _type: Adam lr: 1e-4 betas: [ 0.5, 0.999 ] weight_decay: 0.0001 data: train: scheduler: start_proportion: 0.5 target_lr: 0 buffer_size: 50 dataloader: batch_size: 4 shuffle: True num_workers: 2 pin_memory: True drop_last: True dataset: _type: GenerationUnpairedDatasetWithEdge root_a: "/data/i2i/VoxCeleb2Anime/trainA" root_b: "/data/i2i/VoxCeleb2Anime/trainB" edge_type: "hed_landmark" random_pair: True pipeline: - Load - Resize: size: [ 256, 256 ] - ToTensor - Normalize: mean: [ 0.5, 0.5, 0.5 ] std: [ 0.5, 0.5, 0.5 ] test: dataloader: batch_size: 8 shuffle: False num_workers: 1 pin_memory: False drop_last: False dataset: _type: GenerationUnpairedDatasetWithEdge root_a: "/data/i2i/VoxCeleb2Anime/testA" root_b: "/data/i2i/VoxCeleb2Anime/testB" edge_type: "hed_landmark" random_pair: False pipeline: - Load - Resize: size: [ 256, 256 ] - ToTensor - Normalize: mean: [ 0.5, 0.5, 0.5 ] std: [ 0.5, 0.5, 0.5 ] video_dataset: _type: SingleFolderDataset root: "/data/i2i/VoxCeleb2Anime/test_video_frames/" with_path: True pipeline: - Load - Resize: size: [ 256, 256 ] - ToTensor - Normalize: mean: [ 0.5, 0.5, 0.5 ] std: [ 0.5, 0.5, 0.5 ]