费了半天劲儿终于找到了training_loop
中调用augment_pipe
的地方。
1 2
| loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) ....
|
1 2 3 4 5 6 7 8 9 10 11
| class StyleGAN2Loss(Loss): ...... def run_D(self, img, c, sync): if self.augment_pipe is not None: img = self.augment_pipe(img) with misc.ddp_sync(self.D, sync): logits = self.D(img, c) return logits ...... def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain): ......
|
1 2 3 4 5 6 7
| loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, sync=sync, gain=gain)
if (ada_stats is not None) and (batch_idx % ada_interval == 0): ada_stats.update() adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000) augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
|