diff --git a/enhance.py b/enhance.py index f7ebb75..bd1e587 100755 --- a/enhance.py +++ b/enhance.py @@ -48,11 +48,11 @@ add_arg('--epochs', default=10, type=int, help='Total add_arg('--epoch-size', default=72, type=int, help='Number of batches trained in an epoch.') add_arg('--save-every', default=10, type=int, help='Save generator after every training epoch.') add_arg('--batch-shape', default=192, type=int, help='Resolution of images in training batch.') -add_arg('--batch-size', default=10, type=int, help='Number of images per training batch.') +add_arg('--batch-size', default=15, type=int, help='Number of images per training batch.') add_arg('--buffer-size', default=1500, type=int, help='Total image fragments kept in cache.') add_arg('--buffer-similar', default=5, type=int, help='Fragments cached for each image loaded.') -add_arg('--learning-rate', default=5E-4, type=float, help='Parameter for the ADAM optimizer.') -add_arg('--learning-period', default=100, type=int, help='How often to decay the learning rate.') +add_arg('--learning-rate', default=1E-4, type=float, help='Parameter for the ADAM optimizer.') +add_arg('--learning-period', default=50, type=int, help='How often to decay the learning rate.') add_arg('--learning-decay', default=0.5, type=float, help='How much to decay the learning rate.') add_arg('--generator-upscale', default=2, type=int, help='Steps of 2x up-sampling as post-process.') add_arg('--generator-downscale',default=0, type=int, help='Steps of 2x down-sampling as preprocess.')