From a7829a9cd6e8c288c66878f8bbd318c9a4d3e3cc Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Sat, 4 Mar 2023 00:15:52 +0530 Subject: [PATCH 01/27] Add the example for Super-Resolution --- examples/super_resolution/README.md | 37 +++++++ examples/super_resolution/data.py | 78 ++++++++++++++ examples/super_resolution/dataset.py | 37 +++++++ examples/super_resolution/main.py | 115 +++++++++++++++++++++ examples/super_resolution/model.py | 30 ++++++ examples/super_resolution/super_resolve.py | 42 ++++++++ 6 files changed, 339 insertions(+) create mode 100644 examples/super_resolution/README.md create mode 100644 examples/super_resolution/data.py create mode 100644 examples/super_resolution/dataset.py create mode 100644 examples/super_resolution/main.py create mode 100644 examples/super_resolution/model.py create mode 100644 examples/super_resolution/super_resolve.py diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md new file mode 100644 index 000000000000..d86594ab0e0c --- /dev/null +++ b/examples/super_resolution/README.md @@ -0,0 +1,37 @@ +# Superresolution using an efficient sub-pixel convolutional neural network + +ported from [pytorch-examples](https://github.com/pytorch/examples/tree/main/super_resolution) + +This example illustrates how to use the efficient sub-pixel convolution layer described in ["Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network" - Shi et al.](https://arxiv.org/abs/1609.05158) for increasing spatial resolution within your network for tasks such as superresolution. + +``` +usage: main.py [-h] --upscale_factor UPSCALE_FACTOR [--batchSize BATCHSIZE] + [--testBatchSize TESTBATCHSIZE] [--nEpochs NEPOCHS] [--lr LR] + [--cuda] [--threads THREADS] [--seed SEED] + +PyTorch Super Res Example + +optional arguments: + -h, --help show this help message and exit + --upscale_factor super resolution upscale factor + --batchSize training batch size + --testBatchSize testing batch size + --nEpochs number of epochs to train for + --lr Learning Rate. Default=0.01 + --cuda use cuda + --mps enable GPU on macOS + --threads number of threads for data loader to use Default=4 + --seed random seed to use. Default=123 +``` + +This example trains a super-resolution network on the [BSD300 dataset](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/), using crops from the 200 training images, and evaluating on crops of the 100 test images. A snapshot of the model after every epoch with filename model*epoch*.pth + +## Example Usage: + +### Train + +`python main.py --upscale_factor 3 --batchSize 4 --testBatchSize 100 --nEpochs 30 --lr 0.001` + +### Super Resolve + +`python super_resolve.py --input_image dataset/BSDS300/images/test/16077.jpg --model model_epoch_500.pth --output_filename out.png` diff --git a/examples/super_resolution/data.py b/examples/super_resolution/data.py new file mode 100644 index 000000000000..e199a86c87cb --- /dev/null +++ b/examples/super_resolution/data.py @@ -0,0 +1,78 @@ +import tarfile +from os import makedirs, remove +from os.path import basename, exists, join + +from dataset import DatasetFromFolder +from six.moves import urllib +from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor + + +def download_bsd300(dest="dataset"): + output_image_dir = join(dest, "BSDS300/images") + + if not exists(output_image_dir): + makedirs(dest) + url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz" + print("downloading url ", url) + + data = urllib.request.urlopen(url) + + file_path = join(dest, basename(url)) + with open(file_path, "wb") as f: + f.write(data.read()) + + print("Extracting data") + with tarfile.open(file_path) as tar: + for item in tar: + tar.extract(item, dest) + + remove(file_path) + + return output_image_dir + + +def calculate_valid_crop_size(crop_size, upscale_factor): + return crop_size - (crop_size % upscale_factor) + + +def input_transform(crop_size, upscale_factor): + return Compose( + [ + CenterCrop(crop_size), + Resize(crop_size // upscale_factor), + ToTensor(), + ] + ) + + +def target_transform(crop_size): + return Compose( + [ + CenterCrop(crop_size), + ToTensor(), + ] + ) + + +def get_training_set(upscale_factor): + root_dir = download_bsd300() + train_dir = join(root_dir, "train") + crop_size = calculate_valid_crop_size(256, upscale_factor) + + return DatasetFromFolder( + train_dir, + input_transform=input_transform(crop_size, upscale_factor), + target_transform=target_transform(crop_size), + ) + + +def get_test_set(upscale_factor): + root_dir = download_bsd300() + test_dir = join(root_dir, "test") + crop_size = calculate_valid_crop_size(256, upscale_factor) + + return DatasetFromFolder( + test_dir, + input_transform=input_transform(crop_size, upscale_factor), + target_transform=target_transform(crop_size), + ) diff --git a/examples/super_resolution/dataset.py b/examples/super_resolution/dataset.py new file mode 100644 index 000000000000..a02ce2172df7 --- /dev/null +++ b/examples/super_resolution/dataset.py @@ -0,0 +1,37 @@ +from os import listdir +from os.path import join + +import torch.utils.data as data +from PIL import Image + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"]) + + +def load_img(filepath): + img = Image.open(filepath).convert("YCbCr") + y, _, _ = img.split() + return y + + +class DatasetFromFolder(data.Dataset): + def __init__(self, image_dir, input_transform=None, target_transform=None): + super(DatasetFromFolder, self).__init__() + self.image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)] + + self.input_transform = input_transform + self.target_transform = target_transform + + def __getitem__(self, index): + input = load_img(self.image_filenames[index]) + target = input.copy() + if self.input_transform: + input = self.input_transform(input) + if self.target_transform: + target = self.target_transform(target) + + return input, target + + def __len__(self): + return len(self.image_filenames) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py new file mode 100644 index 000000000000..28115e2da74f --- /dev/null +++ b/examples/super_resolution/main.py @@ -0,0 +1,115 @@ +from __future__ import print_function + +import argparse +from math import log10 + +import torch +import torch.nn as nn +import torch.optim as optim +from data import get_test_set, get_training_set +from model import Net +from torch.utils.data import DataLoader + +from ignite.engine import Engine, Events +from ignite.handlers import Checkpoint +from ignite.metrics import PSNR + +# Training settings +parser = argparse.ArgumentParser(description="PyTorch Super Res Example") +parser.add_argument("--upscale_factor", type=int, required=True, help="super resolution upscale factor") +parser.add_argument("--batchSize", type=int, default=64, help="training batch size") +parser.add_argument("--testBatchSize", type=int, default=10, help="testing batch size") +parser.add_argument("--nEpochs", type=int, default=2, help="number of epochs to train for") +parser.add_argument("--lr", type=float, default=0.01, help="Learning Rate. Default=0.01") +parser.add_argument("--cuda", action="store_true", help="use cuda?") +parser.add_argument("--mps", action="store_true", default=False, help="enables macOS GPU training") +parser.add_argument("--threads", type=int, default=4, help="number of threads for data loader to use") +parser.add_argument("--seed", type=int, default=123, help="random seed to use. Default=123") +opt = parser.parse_args() + +print(opt) + +if opt.cuda and not torch.cuda.is_available(): + raise Exception("No GPU found, please run without --cuda") +if not opt.mps and torch.backends.mps.is_available(): + raise Exception("Found mps device, please run with --mps to enable macOS GPU") + +torch.manual_seed(opt.seed) +use_mps = opt.mps and torch.backends.mps.is_available() + +if opt.cuda: + device = torch.device("cuda") +elif use_mps: + device = torch.device("mps") +else: + device = torch.device("cpu") + +print("===> Loading datasets") +train_set = get_training_set(opt.upscale_factor) +test_set = get_test_set(opt.upscale_factor) +training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) +testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False) + +print("===> Building model") +model = Net(upscale_factor=opt.upscale_factor).to(device) +criterion = nn.MSELoss() + +optimizer = optim.Adam(model.parameters(), lr=opt.lr) + + +def train_step(engine, batch): + input, target = batch[0].to(device), batch[1].to(device) + + optimizer.zero_grad() + loss = criterion(model(input), target) + loss.backward() + optimizer.step() + + return loss.item() + + +def validation_step(engine, batch): + model.eval() + with torch.no_grad(): + x, y = batch[0].to(device), batch[1].to(device) + y_pred = model(x) + + return y_pred, y + + +trainer = Engine(train_step) +evaluator = Engine(validation_step) +psnr = PSNR(data_range=1) +psnr.attach(evaluator, "psnr") +validate_every = 1 +log_interval = 10 + + +@trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) +def log_training_loss(engine): + print( + "===> Epoch[{}]({}/{}): Loss: {:.4f}".format( + engine.state.epoch, engine.state.iteration, len(training_data_loader), engine.state.output + ) + ) + + +@trainer.on(Events.EPOCH_COMPLETED(every=validate_every)) +def run_validation(): + evaluator.run(testing_data_loader) + + +@trainer.on(Events.EPOCH_COMPLETED(every=validate_every)) +def log_validation(): + metrics = evaluator.state.metrics + print(f"Epoch: {trainer.state.epoch}, Avg. PSNR: {metrics['psnr']} dB") + + +@trainer.on(Events.EPOCH_COMPLETED) +def checkpoint(): + model_out_path = "model_epoch_{}.pth".format(trainer.state.epoch) + torch.save(model, model_out_path) + print("Checkpoint saved to {}".format(model_out_path)) + + +trainer.run(training_data_loader, opt.nEpochs) diff --git a/examples/super_resolution/model.py b/examples/super_resolution/model.py new file mode 100644 index 000000000000..5ad5418c16f6 --- /dev/null +++ b/examples/super_resolution/model.py @@ -0,0 +1,30 @@ +import torch +import torch.nn as nn +import torch.nn.init as init + + +class Net(nn.Module): + def __init__(self, upscale_factor): + super(Net, self).__init__() + + self.relu = nn.ReLU() + self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) + self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) + self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)) + self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1)) + self.pixel_shuffle = nn.PixelShuffle(upscale_factor) + + self._initialize_weights() + + def forward(self, x): + x = self.relu(self.conv1(x)) + x = self.relu(self.conv2(x)) + x = self.relu(self.conv3(x)) + x = self.pixel_shuffle(self.conv4(x)) + return x + + def _initialize_weights(self): + init.orthogonal_(self.conv1.weight, init.calculate_gain("relu")) + init.orthogonal_(self.conv2.weight, init.calculate_gain("relu")) + init.orthogonal_(self.conv3.weight, init.calculate_gain("relu")) + init.orthogonal_(self.conv4.weight) diff --git a/examples/super_resolution/super_resolve.py b/examples/super_resolution/super_resolve.py new file mode 100644 index 000000000000..8b9e5ea1a1dc --- /dev/null +++ b/examples/super_resolution/super_resolve.py @@ -0,0 +1,42 @@ +from __future__ import print_function + +import argparse + +import numpy as np +import torch +from PIL import Image +from torchvision.transforms import ToTensor + +# Training settings +parser = argparse.ArgumentParser(description="PyTorch Super Res Example") +parser.add_argument("--input_image", type=str, required=True, help="input image to use") +parser.add_argument("--model", type=str, required=True, help="model file to use") +parser.add_argument("--output_filename", type=str, help="where to save the output image") +parser.add_argument("--cuda", action="store_true", help="use cuda") +opt = parser.parse_args() + +print(opt) +img = Image.open(opt.input_image).convert("YCbCr") +y, cb, cr = img.split() + +model = torch.load(opt.model) +img_to_tensor = ToTensor() +input = img_to_tensor(y).view(1, -1, y.size[1], y.size[0]) + +if opt.cuda: + model = model.cuda() + input = input.cuda() + +out = model(input) +out = out.cpu() +out_img_y = out[0].detach().numpy() +out_img_y *= 255.0 +out_img_y = out_img_y.clip(0, 255) +out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode="L") + +out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC) +out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC) +out_img = Image.merge("YCbCr", [out_img_y, out_img_cb, out_img_cr]).convert("RGB") + +out_img.save(opt.output_filename) +print("output image saved to ", opt.output_filename) From 1b0baf3522a2a7b43ea1d662d8f1d5a7bf08662e Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Sat, 4 Mar 2023 00:29:31 +0530 Subject: [PATCH 02/27] Made some changes --- examples/super_resolution/main.py | 2 -- examples/super_resolution/model.py | 1 - 2 files changed, 3 deletions(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 28115e2da74f..0e12a5bbad97 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -1,7 +1,6 @@ from __future__ import print_function import argparse -from math import log10 import torch import torch.nn as nn @@ -11,7 +10,6 @@ from torch.utils.data import DataLoader from ignite.engine import Engine, Events -from ignite.handlers import Checkpoint from ignite.metrics import PSNR # Training settings diff --git a/examples/super_resolution/model.py b/examples/super_resolution/model.py index 5ad5418c16f6..1f80c95d0643 100644 --- a/examples/super_resolution/model.py +++ b/examples/super_resolution/model.py @@ -1,4 +1,3 @@ -import torch import torch.nn as nn import torch.nn.init as init From 7ebee4908c42db43c374e890cf4922e47452cd29 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Tue, 7 Mar 2023 01:34:50 +0530 Subject: [PATCH 03/27] Made some changes --- examples/super_resolution/README.md | 4 ++-- examples/super_resolution/data.py | 2 +- examples/super_resolution/main.py | 2 -- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index d86594ab0e0c..39d462f993f4 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -1,4 +1,4 @@ -# Superresolution using an efficient sub-pixel convolutional neural network +# Super-Resolution using an efficient sub-pixel convolutional neural network ported from [pytorch-examples](https://github.com/pytorch/examples/tree/main/super_resolution) @@ -24,7 +24,7 @@ optional arguments: --seed random seed to use. Default=123 ``` -This example trains a super-resolution network on the [BSD300 dataset](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/), using crops from the 200 training images, and evaluating on crops of the 100 test images. A snapshot of the model after every epoch with filename model*epoch*.pth +This example trains a super-resolution network on the [BSD300 dataset](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/), using crops from the 200 training images, and evaluating on crops of the 100 test images. A snapshot of the model after every epoch with filename `model_epoch_.pth` ## Example Usage: diff --git a/examples/super_resolution/data.py b/examples/super_resolution/data.py index e199a86c87cb..383729736800 100644 --- a/examples/super_resolution/data.py +++ b/examples/super_resolution/data.py @@ -1,9 +1,9 @@ import tarfile +import urllib from os import makedirs, remove from os.path import basename, exists, join from dataset import DatasetFromFolder -from six.moves import urllib from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 0e12a5bbad97..211eaf336b97 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import argparse import torch From 3982d7b7cdd083dbfd270f1821e9bd82ed463d11 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Thu, 16 Mar 2023 00:18:14 +0530 Subject: [PATCH 04/27] Add the time profiling features --- examples/super_resolution/main.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 211eaf336b97..b9794334ef44 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -101,6 +101,16 @@ def log_validation(): print(f"Epoch: {trainer.state.epoch}, Avg. PSNR: {metrics['psnr']} dB") +@trainer.on(Events.EPOCH_COMPLETED) +def log_epoch_time(): + print(f"Epoch {trainer.state.epoch}, Time Taken : {trainer.state.times['EPOCH_COMPLETED']}") + + +@trainer.on(Events.COMPLETED) +def log_total_time(): + print(f"Total Time: {trainer.state.times['COMPLETED']}") + + @trainer.on(Events.EPOCH_COMPLETED) def checkpoint(): model_out_path = "model_epoch_{}.pth".format(trainer.state.epoch) From 982a0ebd4b8772efa002b5dc30efc86b4b706483 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Sat, 18 Mar 2023 00:53:24 +0530 Subject: [PATCH 05/27] Added torchvision dataset --- examples/super_resolution/data.py | 78 ---------------------------- examples/super_resolution/dataset.py | 37 ------------- examples/super_resolution/main.py | 37 +++++++++++-- 3 files changed, 32 insertions(+), 120 deletions(-) delete mode 100644 examples/super_resolution/data.py delete mode 100644 examples/super_resolution/dataset.py diff --git a/examples/super_resolution/data.py b/examples/super_resolution/data.py deleted file mode 100644 index 383729736800..000000000000 --- a/examples/super_resolution/data.py +++ /dev/null @@ -1,78 +0,0 @@ -import tarfile -import urllib -from os import makedirs, remove -from os.path import basename, exists, join - -from dataset import DatasetFromFolder -from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor - - -def download_bsd300(dest="dataset"): - output_image_dir = join(dest, "BSDS300/images") - - if not exists(output_image_dir): - makedirs(dest) - url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz" - print("downloading url ", url) - - data = urllib.request.urlopen(url) - - file_path = join(dest, basename(url)) - with open(file_path, "wb") as f: - f.write(data.read()) - - print("Extracting data") - with tarfile.open(file_path) as tar: - for item in tar: - tar.extract(item, dest) - - remove(file_path) - - return output_image_dir - - -def calculate_valid_crop_size(crop_size, upscale_factor): - return crop_size - (crop_size % upscale_factor) - - -def input_transform(crop_size, upscale_factor): - return Compose( - [ - CenterCrop(crop_size), - Resize(crop_size // upscale_factor), - ToTensor(), - ] - ) - - -def target_transform(crop_size): - return Compose( - [ - CenterCrop(crop_size), - ToTensor(), - ] - ) - - -def get_training_set(upscale_factor): - root_dir = download_bsd300() - train_dir = join(root_dir, "train") - crop_size = calculate_valid_crop_size(256, upscale_factor) - - return DatasetFromFolder( - train_dir, - input_transform=input_transform(crop_size, upscale_factor), - target_transform=target_transform(crop_size), - ) - - -def get_test_set(upscale_factor): - root_dir = download_bsd300() - test_dir = join(root_dir, "test") - crop_size = calculate_valid_crop_size(256, upscale_factor) - - return DatasetFromFolder( - test_dir, - input_transform=input_transform(crop_size, upscale_factor), - target_transform=target_transform(crop_size), - ) diff --git a/examples/super_resolution/dataset.py b/examples/super_resolution/dataset.py deleted file mode 100644 index a02ce2172df7..000000000000 --- a/examples/super_resolution/dataset.py +++ /dev/null @@ -1,37 +0,0 @@ -from os import listdir -from os.path import join - -import torch.utils.data as data -from PIL import Image - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"]) - - -def load_img(filepath): - img = Image.open(filepath).convert("YCbCr") - y, _, _ = img.split() - return y - - -class DatasetFromFolder(data.Dataset): - def __init__(self, image_dir, input_transform=None, target_transform=None): - super(DatasetFromFolder, self).__init__() - self.image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)] - - self.input_transform = input_transform - self.target_transform = target_transform - - def __getitem__(self, index): - input = load_img(self.image_filenames[index]) - target = input.copy() - if self.input_transform: - input = self.input_transform(input) - if self.target_transform: - target = self.target_transform(target) - - return input, target - - def __len__(self): - return len(self.image_filenames) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index b9794334ef44..8839006a7b7d 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -3,7 +3,8 @@ import torch import torch.nn as nn import torch.optim as optim -from data import get_test_set, get_training_set +import torchvision +import torchvision.transforms as transforms from model import Net from torch.utils.data import DataLoader @@ -41,10 +42,36 @@ device = torch.device("cpu") print("===> Loading datasets") -train_set = get_training_set(opt.upscale_factor) -test_set = get_test_set(opt.upscale_factor) -training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) -testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False) + + +class SRDataset(torch.utils.data.Dataset): + def __init__(self, dataset, scale_factor): + self.dataset = dataset + self.transform = transforms.Resize( + (len(dataset[0][0][0]) * scale_factor, len(dataset[0][0][0][0]) * scale_factor) + ) + + def __getitem__(self, index): + lr_image, _ = self.dataset[index] + hr_image = self.transform(lr_image) + return lr_image, hr_image + + def __len__(self): + return len(self.dataset) + + +transform = transforms.Compose([transforms.ToTensor()]) + +trainset = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform) +testset = torchvision.datasets.CIFAR10(root="./data", train=False, download=True, transform=transform) + +trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor) +testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor) + +training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) +testing_data_loader = DataLoader( + dataset=testset_sr, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False +) print("===> Building model") model = Net(upscale_factor=opt.upscale_factor).to(device) From 0cd5c59d973739940ac77bd906e39b2dabcb0621 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Sat, 18 Mar 2023 01:31:25 +0530 Subject: [PATCH 06/27] Changed the dataset used in README to cifar10 --- examples/super_resolution/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index 39d462f993f4..9674940aaf9d 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -24,7 +24,7 @@ optional arguments: --seed random seed to use. Default=123 ``` -This example trains a super-resolution network on the [BSD300 dataset](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/), using crops from the 200 training images, and evaluating on crops of the 100 test images. A snapshot of the model after every epoch with filename `model_epoch_.pth` +This example trains a super-resolution network on the [Cifar10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). A snapshot of the model after every epoch with filename `model_epoch_.pth` ## Example Usage: From 7bcea2f5c2dbc705864a6bbc8e39df625a77b032 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Tue, 21 Mar 2023 00:11:22 +0530 Subject: [PATCH 07/27] Used snake case in arguments --- examples/super_resolution/README.md | 2 +- examples/super_resolution/main.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index 9674940aaf9d..c80ab42d6530 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -30,7 +30,7 @@ This example trains a super-resolution network on the [Cifar10 dataset](https:// ### Train -`python main.py --upscale_factor 3 --batchSize 4 --testBatchSize 100 --nEpochs 30 --lr 0.001` +`python main.py --upscale_factor 3 --batch_size 4 --test_batch_size 100 --n_epochs 30 --lr 0.001` ### Super Resolve diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 8839006a7b7d..dcba9334345c 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -14,9 +14,9 @@ # Training settings parser = argparse.ArgumentParser(description="PyTorch Super Res Example") parser.add_argument("--upscale_factor", type=int, required=True, help="super resolution upscale factor") -parser.add_argument("--batchSize", type=int, default=64, help="training batch size") -parser.add_argument("--testBatchSize", type=int, default=10, help="testing batch size") -parser.add_argument("--nEpochs", type=int, default=2, help="number of epochs to train for") +parser.add_argument("--batch_size", type=int, default=64, help="training batch size") +parser.add_argument("--test_batch_size", type=int, default=10, help="testing batch size") +parser.add_argument("--n_epochs", type=int, default=2, help="number of epochs to train for") parser.add_argument("--lr", type=float, default=0.01, help="Learning Rate. Default=0.01") parser.add_argument("--cuda", action="store_true", help="use cuda?") parser.add_argument("--mps", action="store_true", default=False, help="enables macOS GPU training") @@ -68,9 +68,9 @@ def __len__(self): trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor) testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor) -training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True) +training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True) testing_data_loader = DataLoader( - dataset=testset_sr, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False + dataset=testset_sr, num_workers=opt.threads, batch_size=opt.test_batch_size, shuffle=False ) print("===> Building model") @@ -105,7 +105,7 @@ def validation_step(engine, batch): psnr = PSNR(data_range=1) psnr.attach(evaluator, "psnr") validate_every = 1 -log_interval = 10 +log_interval = 100 @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) @@ -145,4 +145,4 @@ def checkpoint(): print("Checkpoint saved to {}".format(model_out_path)) -trainer.run(training_data_loader, opt.nEpochs) +trainer.run(training_data_loader, opt.n_epochs) From 698d76f43a0c0cea94e6869e1291a0df53446f5e Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Tue, 21 Mar 2023 01:58:37 +0530 Subject: [PATCH 08/27] Made some changes --- examples/super_resolution/main.py | 69 +++++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 17 deletions(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index dcba9334345c..22addc08fbfd 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -4,9 +4,9 @@ import torch.nn as nn import torch.optim as optim import torchvision -import torchvision.transforms as transforms from model import Net from torch.utils.data import DataLoader +from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor from ignite.engine import Engine, Events from ignite.metrics import PSNR @@ -45,28 +45,67 @@ class SRDataset(torch.utils.data.Dataset): - def __init__(self, dataset, scale_factor): + def __init__(self, dataset, scale_factor, input_transform=None, target_transform=None): self.dataset = dataset - self.transform = transforms.Resize( - (len(dataset[0][0][0]) * scale_factor, len(dataset[0][0][0][0]) * scale_factor) - ) + self.input_transform = input_transform + self.target_transform = target_transform def __getitem__(self, index): - lr_image, _ = self.dataset[index] - hr_image = self.transform(lr_image) + image, _ = self.dataset[index] + img = image.convert("YCbCr") + lr_image, _, _ = img.split() + + hr_image = lr_image.copy() + if self.input_transform: + lr_image = self.input_transform(lr_image) + if self.target_transform: + hr_image = self.target_transform(hr_image) return lr_image, hr_image def __len__(self): return len(self.dataset) -transform = transforms.Compose([transforms.ToTensor()]) +def calculate_valid_crop_size(crop_size, upscale_factor): + return crop_size - (crop_size % upscale_factor) + + +def input_transform(crop_size, upscale_factor): + return Compose( + [ + CenterCrop(crop_size), + Resize(crop_size // upscale_factor), + ToTensor(), + ] + ) + + +def target_transform(crop_size): + return Compose( + [ + CenterCrop(crop_size), + ToTensor(), + ] + ) + + +crop_size = calculate_valid_crop_size(256, opt.upscale_factor) -trainset = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform) -testset = torchvision.datasets.CIFAR10(root="./data", train=False, download=True, transform=transform) +trainset = torchvision.datasets.Caltech101(root="./data", download=True) +testset = torchvision.datasets.Caltech101(root="./data", download=False) -trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor) -testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor) +trainset_sr = SRDataset( + trainset, + scale_factor=opt.upscale_factor, + input_transform=input_transform(crop_size, opt.upscale_factor), + target_transform=target_transform(crop_size), +) +testset_sr = SRDataset( + testset, + scale_factor=opt.upscale_factor, + input_transform=input_transform(crop_size, opt.upscale_factor), + target_transform=target_transform(crop_size), +) training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True) testing_data_loader = DataLoader( @@ -117,13 +156,9 @@ def log_training_loss(engine): ) -@trainer.on(Events.EPOCH_COMPLETED(every=validate_every)) -def run_validation(): - evaluator.run(testing_data_loader) - - @trainer.on(Events.EPOCH_COMPLETED(every=validate_every)) def log_validation(): + evaluator.run(testing_data_loader) metrics = evaluator.state.metrics print(f"Epoch: {trainer.state.epoch}, Avg. PSNR: {metrics['psnr']} dB") From 51f47b4c6d6f13f05ffbf7937292680b109b0a6e Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Tue, 21 Mar 2023 02:34:43 +0530 Subject: [PATCH 09/27] Make some formatting changes --- examples/super_resolution/README.md | 6 +++--- examples/super_resolution/super_resolve.py | 2 -- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index c80ab42d6530..1d4003b82600 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -14,9 +14,9 @@ PyTorch Super Res Example optional arguments: -h, --help show this help message and exit --upscale_factor super resolution upscale factor - --batchSize training batch size - --testBatchSize testing batch size - --nEpochs number of epochs to train for + --batch_size training batch size + --test_batch_size testing batch size + --n_epochs number of epochs to train for --lr Learning Rate. Default=0.01 --cuda use cuda --mps enable GPU on macOS diff --git a/examples/super_resolution/super_resolve.py b/examples/super_resolution/super_resolve.py index 8b9e5ea1a1dc..964d7a1344d2 100644 --- a/examples/super_resolution/super_resolve.py +++ b/examples/super_resolution/super_resolve.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import argparse import numpy as np From 235c908a2056c1a1840c3cd73588ab6b3c0ad5b0 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Tue, 21 Mar 2023 02:43:21 +0530 Subject: [PATCH 10/27] Make the formatting changes --- examples/super_resolution/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index 1d4003b82600..8292e5b94500 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -5,8 +5,8 @@ ported from [pytorch-examples](https://github.com/pytorch/examples/tree/main/sup This example illustrates how to use the efficient sub-pixel convolution layer described in ["Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network" - Shi et al.](https://arxiv.org/abs/1609.05158) for increasing spatial resolution within your network for tasks such as superresolution. ``` -usage: main.py [-h] --upscale_factor UPSCALE_FACTOR [--batchSize BATCHSIZE] - [--testBatchSize TESTBATCHSIZE] [--nEpochs NEPOCHS] [--lr LR] +usage: main.py [-h] --upscale_factor UPSCALE_FACTOR [--batch_size BATCHSIZE] + [--test_batch_size TESTBATCHSIZE] [--n_epochs NEPOCHS] [--lr LR] [--cuda] [--threads THREADS] [--seed SEED] PyTorch Super Res Example From 3b2fde9af74c8e21b4bc58340ab2aa9a0c5c0589 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Tue, 21 Mar 2023 03:31:06 +0530 Subject: [PATCH 11/27] some changes --- examples/super_resolution/README.md | 4 +- examples/super_resolution/main.py | 56 ++++++---------------- examples/super_resolution/super_resolve.py | 5 +- 3 files changed, 18 insertions(+), 47 deletions(-) diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index 8292e5b94500..f9be6c92f563 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -24,7 +24,7 @@ optional arguments: --seed random seed to use. Default=123 ``` -This example trains a super-resolution network on the [Cifar10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). A snapshot of the model after every epoch with filename `model_epoch_.pth` +This example trains a super-resolution network on the [Caltech101 dataset](https://pytorch.org/vision/main/generated/torchvision.datasets.Caltech101.html). A snapshot of the model after every epoch with filename `model_epoch_.pth` ## Example Usage: @@ -34,4 +34,4 @@ This example trains a super-resolution network on the [Cifar10 dataset](https:// ### Super Resolve -`python super_resolve.py --input_image dataset/BSDS300/images/test/16077.jpg --model model_epoch_500.pth --output_filename out.png` +`python super_resolve.py --input_image .jpg --model model_epoch_500.pth --output_filename out.png` diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 22addc08fbfd..f39b4629b22b 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -6,7 +6,7 @@ import torchvision from model import Net from torch.utils.data import DataLoader -from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor +from torchvision.transforms.functional import center_crop, resize, to_tensor from ignite.engine import Engine, Events from ignite.metrics import PSNR @@ -45,21 +45,22 @@ class SRDataset(torch.utils.data.Dataset): - def __init__(self, dataset, scale_factor, input_transform=None, target_transform=None): + def __init__(self, dataset, scale_factor, crop_size=256): self.dataset = dataset - self.input_transform = input_transform - self.target_transform = target_transform + self.scale_factor = scale_factor + self.crop_size = crop_size def __getitem__(self, index): image, _ = self.dataset[index] img = image.convert("YCbCr") - lr_image, _, _ = img.split() - - hr_image = lr_image.copy() - if self.input_transform: - lr_image = self.input_transform(lr_image) - if self.target_transform: - hr_image = self.target_transform(hr_image) + hr_image, _, _ = img.split() + hr_image = center_crop(hr_image, self.crop_size) + lr_image = hr_image.copy() + if self.scale_factor != 1: + dim = self.crop_size // self.scale_factor + lr_image = resize(lr_image, [dim, dim]) + hr_image = to_tensor(hr_image) + lr_image = to_tensor(lr_image) return lr_image, hr_image def __len__(self): @@ -70,42 +71,13 @@ def calculate_valid_crop_size(crop_size, upscale_factor): return crop_size - (crop_size % upscale_factor) -def input_transform(crop_size, upscale_factor): - return Compose( - [ - CenterCrop(crop_size), - Resize(crop_size // upscale_factor), - ToTensor(), - ] - ) - - -def target_transform(crop_size): - return Compose( - [ - CenterCrop(crop_size), - ToTensor(), - ] - ) - - crop_size = calculate_valid_crop_size(256, opt.upscale_factor) trainset = torchvision.datasets.Caltech101(root="./data", download=True) testset = torchvision.datasets.Caltech101(root="./data", download=False) -trainset_sr = SRDataset( - trainset, - scale_factor=opt.upscale_factor, - input_transform=input_transform(crop_size, opt.upscale_factor), - target_transform=target_transform(crop_size), -) -testset_sr = SRDataset( - testset, - scale_factor=opt.upscale_factor, - input_transform=input_transform(crop_size, opt.upscale_factor), - target_transform=target_transform(crop_size), -) +trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor, crop_size=crop_size) +testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor, crop_size=crop_size) training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True) testing_data_loader = DataLoader( diff --git a/examples/super_resolution/super_resolve.py b/examples/super_resolution/super_resolve.py index 964d7a1344d2..5c5f3c87accd 100644 --- a/examples/super_resolution/super_resolve.py +++ b/examples/super_resolution/super_resolve.py @@ -3,7 +3,7 @@ import numpy as np import torch from PIL import Image -from torchvision.transforms import ToTensor +from torchvision.transforms.functional import to_tensor # Training settings parser = argparse.ArgumentParser(description="PyTorch Super Res Example") @@ -18,8 +18,7 @@ y, cb, cr = img.split() model = torch.load(opt.model) -img_to_tensor = ToTensor() -input = img_to_tensor(y).view(1, -1, y.size[1], y.size[0]) +input = to_tensor(y).view(1, -1, y.size[1], y.size[0]) if opt.cuda: model = model.cuda() From 0e2f9a3a979edacb24ab4c55ac86b469d59c176c Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Tue, 21 Mar 2023 18:16:04 +0530 Subject: [PATCH 12/27] update the crop method --- examples/super_resolution/main.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index f39b4629b22b..db9c1d941482 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -67,17 +67,11 @@ def __len__(self): return len(self.dataset) -def calculate_valid_crop_size(crop_size, upscale_factor): - return crop_size - (crop_size % upscale_factor) - - -crop_size = calculate_valid_crop_size(256, opt.upscale_factor) - trainset = torchvision.datasets.Caltech101(root="./data", download=True) testset = torchvision.datasets.Caltech101(root="./data", download=False) -trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor, crop_size=crop_size) -testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor, crop_size=crop_size) +trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor) +testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor) training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True) testing_data_loader = DataLoader( From 3d9dda7ebe7ce0c93fa656b1a193200666d81d6a Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Tue, 21 Mar 2023 21:39:37 +0530 Subject: [PATCH 13/27] Made the suggested changes --- examples/super_resolution/main.py | 9 ++++----- examples/super_resolution/super_resolve.py | 4 +++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index db9c1d941482..d46deec1701c 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -57,8 +57,8 @@ def __getitem__(self, index): hr_image = center_crop(hr_image, self.crop_size) lr_image = hr_image.copy() if self.scale_factor != 1: - dim = self.crop_size // self.scale_factor - lr_image = resize(lr_image, [dim, dim]) + size = self.crop_size // self.scale_factor + lr_image = resize(lr_image, [size, size]) hr_image = to_tensor(hr_image) lr_image = to_tensor(lr_image) return lr_image, hr_image @@ -74,9 +74,7 @@ def __len__(self): testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor) training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True) -testing_data_loader = DataLoader( - dataset=testset_sr, num_workers=opt.threads, batch_size=opt.test_batch_size, shuffle=False -) +testing_data_loader = DataLoader(dataset=testset_sr, num_workers=opt.threads, batch_size=opt.test_batch_size) print("===> Building model") model = Net(upscale_factor=opt.upscale_factor).to(device) @@ -86,6 +84,7 @@ def __len__(self): def train_step(engine, batch): + model.train() input, target = batch[0].to(device), batch[1].to(device) optimizer.zero_grad() diff --git a/examples/super_resolution/super_resolve.py b/examples/super_resolution/super_resolve.py index 5c5f3c87accd..05c841037692 100644 --- a/examples/super_resolution/super_resolve.py +++ b/examples/super_resolution/super_resolve.py @@ -24,7 +24,9 @@ model = model.cuda() input = input.cuda() -out = model(input) +model.eval() +with torch.no_grad(): + out = model(input) out = out.cpu() out_img_y = out[0].detach().numpy() out_img_y *= 255.0 From 689b7e47e98978d8c9b4a4397f83b199f1383bd5 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Wed, 22 Mar 2023 15:52:38 +0530 Subject: [PATCH 14/27] Add SR example to unit tests --- .github/workflows/unit-tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 699f233ee2cc..78ce59c29da6 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -184,3 +184,8 @@ jobs: #train mkdir -p ~/.cache/torch/checkpoints/ && wget "https://download.pytorch.org/models/vgg16-397923af.pth" -O ~/.cache/torch/checkpoints/vgg16-397923af.pth python examples/fast_neural_style/neural_style.py train --epochs 1 --cuda 0 --dataset test --dataroot . --image_size 32 --style_image examples/fast_neural_style/images/style_images/mosaic.jpg --style_size 32 + - name: Run SR Example + if: ${{ matrix.os == 'ubuntu-latest' }} + run: | + # Super-Resolution + python /content/main.py --upscale_factor 3 --batch_size 4 --cuda --test_batch_size 100 --n_epochs 4 --lr 0.001 --threads 2 From fb3f64ab3918d1e3c7e0f3222e86811ba632bd98 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Wed, 22 Mar 2023 16:25:12 +0530 Subject: [PATCH 15/27] Add tqdm to the SR example and some CI changes --- .github/workflows/unit-tests.yml | 2 +- examples/super_resolution/main.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 78ce59c29da6..90fdbc4a8a4f 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -188,4 +188,4 @@ jobs: if: ${{ matrix.os == 'ubuntu-latest' }} run: | # Super-Resolution - python /content/main.py --upscale_factor 3 --batch_size 4 --cuda --test_batch_size 100 --n_epochs 4 --lr 0.001 --threads 2 + python /examples/super_resolution/main.py --upscale_factor 3 --batch_size 4 --cuda --test_batch_size 100 --n_epochs 4 --lr 0.001 --threads 2 diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index d46deec1701c..0efec2b65314 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -8,6 +8,8 @@ from torch.utils.data import DataLoader from torchvision.transforms.functional import center_crop, resize, to_tensor +from ignite.contrib.handlers import ProgressBar + from ignite.engine import Engine, Events from ignite.metrics import PSNR @@ -145,4 +147,6 @@ def checkpoint(): print("Checkpoint saved to {}".format(model_out_path)) +ProgressBar().attach(trainer) + trainer.run(training_data_loader, opt.n_epochs) From 051999e485a23dd4efc8d7deb387a8b915b61668 Mon Sep 17 00:00:00 2001 From: Aryan Gupta <97878444+guptaaryan16@users.noreply.github.com> Date: Wed, 22 Mar 2023 16:51:18 +0530 Subject: [PATCH 16/27] Update unit-tests.yml --- .github/workflows/unit-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 90fdbc4a8a4f..ce56da813b79 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -188,4 +188,4 @@ jobs: if: ${{ matrix.os == 'ubuntu-latest' }} run: | # Super-Resolution - python /examples/super_resolution/main.py --upscale_factor 3 --batch_size 4 --cuda --test_batch_size 100 --n_epochs 4 --lr 0.001 --threads 2 + python examples/super_resolution/main.py --upscale_factor 3 --batch_size 4 --cuda --test_batch_size 100 --n_epochs 4 --lr 0.001 --threads 2 From e36beffe35097043cbe4be58786896a5022f991d Mon Sep 17 00:00:00 2001 From: Aryan Gupta <97878444+guptaaryan16@users.noreply.github.com> Date: Wed, 22 Mar 2023 17:13:00 +0530 Subject: [PATCH 17/27] Update unit-tests.yml --- .github/workflows/unit-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index ce56da813b79..291fec23fe0f 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -188,4 +188,4 @@ jobs: if: ${{ matrix.os == 'ubuntu-latest' }} run: | # Super-Resolution - python examples/super_resolution/main.py --upscale_factor 3 --batch_size 4 --cuda --test_batch_size 100 --n_epochs 4 --lr 0.001 --threads 2 + python examples/super_resolution/main.py --upscale_factor 3 --batch_size 4 --test_batch_size 100 --n_epochs 1 --lr 0.001 --threads 2 From 87456cd755fed10f3b4417a4a4a691bcf1d6cf9d Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Wed, 22 Mar 2023 17:35:45 +0530 Subject: [PATCH 18/27] changed crop_size in SR example --- examples/super_resolution/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 0efec2b65314..19555fc6bf9a 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -47,7 +47,7 @@ class SRDataset(torch.utils.data.Dataset): - def __init__(self, dataset, scale_factor, crop_size=256): + def __init__(self, dataset, scale_factor, crop_size=180): self.dataset = dataset self.scale_factor = scale_factor self.crop_size = crop_size From 780dbdbece7f2247cc36d9e32fe33150c26bdb35 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Wed, 22 Mar 2023 19:55:59 +0530 Subject: [PATCH 19/27] Made crop_size a parameter in SR example --- .github/workflows/unit-tests.yml | 2 +- examples/super_resolution/README.md | 5 +++-- examples/super_resolution/main.py | 8 +++++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 291fec23fe0f..a1f5ca16006a 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -188,4 +188,4 @@ jobs: if: ${{ matrix.os == 'ubuntu-latest' }} run: | # Super-Resolution - python examples/super_resolution/main.py --upscale_factor 3 --batch_size 4 --test_batch_size 100 --n_epochs 1 --lr 0.001 --threads 2 + python examples/super_resolution/main.py --upscale_factor 3 --crop_size 180 --batch_size 4 --test_batch_size 100 --n_epochs 1 --lr 0.001 --threads 2 diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index f9be6c92f563..7b907d994415 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -5,7 +5,7 @@ ported from [pytorch-examples](https://github.com/pytorch/examples/tree/main/sup This example illustrates how to use the efficient sub-pixel convolution layer described in ["Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network" - Shi et al.](https://arxiv.org/abs/1609.05158) for increasing spatial resolution within your network for tasks such as superresolution. ``` -usage: main.py [-h] --upscale_factor UPSCALE_FACTOR [--batch_size BATCHSIZE] +usage: main.py [-h] --upscale_factor UPSCALE_FACTOR [--crop_size CROPSIZE] [--batch_size BATCHSIZE] [--test_batch_size TESTBATCHSIZE] [--n_epochs NEPOCHS] [--lr LR] [--cuda] [--threads THREADS] [--seed SEED] @@ -14,6 +14,7 @@ PyTorch Super Res Example optional arguments: -h, --help show this help message and exit --upscale_factor super resolution upscale factor + --crop_size cropped size of the images for training --batch_size training batch size --test_batch_size testing batch size --n_epochs number of epochs to train for @@ -30,7 +31,7 @@ This example trains a super-resolution network on the [Caltech101 dataset](https ### Train -`python main.py --upscale_factor 3 --batch_size 4 --test_batch_size 100 --n_epochs 30 --lr 0.001` +`python main.py --upscale_factor 3 --crop_size 180 --batch_size 4 --test_batch_size 100 --n_epochs 30 --lr 0.001` ### Super Resolve diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 19555fc6bf9a..69b190d613c3 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -15,6 +15,7 @@ # Training settings parser = argparse.ArgumentParser(description="PyTorch Super Res Example") +parser.add_argument("--crop_size", type=int, default=256, help="cropped size of the images for training") parser.add_argument("--upscale_factor", type=int, required=True, help="super resolution upscale factor") parser.add_argument("--batch_size", type=int, default=64, help="training batch size") parser.add_argument("--test_batch_size", type=int, default=10, help="testing batch size") @@ -24,6 +25,7 @@ parser.add_argument("--mps", action="store_true", default=False, help="enables macOS GPU training") parser.add_argument("--threads", type=int, default=4, help="number of threads for data loader to use") parser.add_argument("--seed", type=int, default=123, help="random seed to use. Default=123") + opt = parser.parse_args() print(opt) @@ -47,7 +49,7 @@ class SRDataset(torch.utils.data.Dataset): - def __init__(self, dataset, scale_factor, crop_size=180): + def __init__(self, dataset, scale_factor, crop_size=256): self.dataset = dataset self.scale_factor = scale_factor self.crop_size = crop_size @@ -72,8 +74,8 @@ def __len__(self): trainset = torchvision.datasets.Caltech101(root="./data", download=True) testset = torchvision.datasets.Caltech101(root="./data", download=False) -trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor) -testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor) +trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor, crop_size=opt.crop_size) +testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor, crop_size=opt.crop_size) training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True) testing_data_loader = DataLoader(dataset=testset_sr, num_workers=opt.threads, batch_size=opt.test_batch_size) From b69c914d009768115bf7f7b9d7399d0688129bf6 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Fri, 24 Mar 2023 17:07:09 +0530 Subject: [PATCH 20/27] Add debug mode in SR example --- .github/workflows/unit-tests.yml | 2 +- examples/super_resolution/README.md | 3 ++- examples/super_resolution/main.py | 22 +++++++++++----------- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index a1f5ca16006a..1873560eed99 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -188,4 +188,4 @@ jobs: if: ${{ matrix.os == 'ubuntu-latest' }} run: | # Super-Resolution - python examples/super_resolution/main.py --upscale_factor 3 --crop_size 180 --batch_size 4 --test_batch_size 100 --n_epochs 1 --lr 0.001 --threads 2 + python examples/super_resolution/main.py --upscale_factor 3 --crop_size 180 --batch_size 4 --test_batch_size 100 --n_epochs 1 --lr 0.001 --threads 2 --debug diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index 7b907d994415..16231a592f6b 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -7,7 +7,7 @@ This example illustrates how to use the efficient sub-pixel convolution layer de ``` usage: main.py [-h] --upscale_factor UPSCALE_FACTOR [--crop_size CROPSIZE] [--batch_size BATCHSIZE] [--test_batch_size TESTBATCHSIZE] [--n_epochs NEPOCHS] [--lr LR] - [--cuda] [--threads THREADS] [--seed SEED] + [--cuda] [--threads THREADS] [--seed SEED] [--debug] PyTorch Super Res Example @@ -23,6 +23,7 @@ optional arguments: --mps enable GPU on macOS --threads number of threads for data loader to use Default=4 --seed random seed to use. Default=123 + --debug debug mode for testing ``` This example trains a super-resolution network on the [Caltech101 dataset](https://pytorch.org/vision/main/generated/torchvision.datasets.Caltech101.html). A snapshot of the model after every epoch with filename `model_epoch_.pth` diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 69b190d613c3..2bb916734aa9 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -25,6 +25,7 @@ parser.add_argument("--mps", action="store_true", default=False, help="enables macOS GPU training") parser.add_argument("--threads", type=int, default=4, help="number of threads for data loader to use") parser.add_argument("--seed", type=int, default=123, help="random seed to use. Default=123") +parser.add_argument("--debug", action="store_true", help="use debug") opt = parser.parse_args() @@ -115,19 +116,17 @@ def validation_step(engine, batch): validate_every = 1 log_interval = 100 - -@trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) -def log_training_loss(engine): - print( - "===> Epoch[{}]({}/{}): Loss: {:.4f}".format( - engine.state.epoch, engine.state.iteration, len(training_data_loader), engine.state.output - ) - ) +if opt.debug: + epoch_length = 100 + validate_epoch_length=7 +else: + epoch_length = len(training_data_loader) + validate_epoch_length= len(testing_data_loader) @trainer.on(Events.EPOCH_COMPLETED(every=validate_every)) def log_validation(): - evaluator.run(testing_data_loader) + evaluator.run(testing_data_loader, epoch_length=validate_epoch_length) metrics = evaluator.state.metrics print(f"Epoch: {trainer.state.epoch}, Avg. PSNR: {metrics['psnr']} dB") @@ -149,6 +148,7 @@ def checkpoint(): print("Checkpoint saved to {}".format(model_out_path)) -ProgressBar().attach(trainer) +ProgressBar().attach(trainer, output_transform=lambda x: {'loss': x}) +ProgressBar().attach(evaluator) -trainer.run(training_data_loader, opt.n_epochs) +trainer.run(training_data_loader, opt.n_epochs, epoch_length=epoch_length) From 4b1d337b2d1f230b90bdb2f2bf565b3575add7dd Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Fri, 24 Mar 2023 17:35:55 +0530 Subject: [PATCH 21/27] Added Cifar image example --- examples/super_resolution/README.md | 12 ++++++++++++ .../images/bicubic_image_cifar.png | Bin 0 -> 10386 bytes .../super_resolution/images/input_cifar.png | Bin 0 -> 2078 bytes examples/super_resolution/images/out_cifar.png | Bin 0 -> 13906 bytes 4 files changed, 12 insertions(+) create mode 100644 examples/super_resolution/images/bicubic_image_cifar.png create mode 100644 examples/super_resolution/images/input_cifar.png create mode 100644 examples/super_resolution/images/out_cifar.png diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index 16231a592f6b..d4a90ce92dc0 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -37,3 +37,15 @@ This example trains a super-resolution network on the [Caltech101 dataset](https ### Super Resolve `python super_resolve.py --input_image .jpg --model model_epoch_500.pth --output_filename out.png` + +## Example Usage on an Image of Cifar-10 + +### Input Image +![Cifar input image](./images/input_cifar.png) + +## Output Images +| Output image from Model | Output from bicubic sampling | +|-------------------------------|------------------------------------| +| ![Cifar output image](./images/out_cifar.png) | ![Cifar output from bicubic sampling](./images/bicubic_image_cifar.png)| + + diff --git a/examples/super_resolution/images/bicubic_image_cifar.png b/examples/super_resolution/images/bicubic_image_cifar.png new file mode 100644 index 0000000000000000000000000000000000000000..b5bd4d9cf1b4675d6f2ae5a535c428a14f053342 GIT binary patch literal 10386 zcmV;DC~en?P)>x&iQyej^jXpTMIV=F;o9T!XO3_ zKmi00AON(z{{AEV9vAstkBA5l_i%GlBWM$*K!jThcVy$ zBcem&+N@UB8d_@-S5vLwF^t3MbQ4NKl97KVHSYUJ~l)G}EGGuFLFZ zGXdePv{Fk19zT4*k3XiIC8Z6WyDRaRP!|CB64+9XiN=!d!|M9=34GK4HwJg(gc{{&6UEf~IGMlQK)~Z!a(r`E&S}mp; z?)b-x{0*Q#z=dtct{!gI>atv~m)Dn<&%eAqe<{lxW)Wc?4jv9Ct;Je(ZLS(QAp#T# zY+eii==W2#KUjkLs;mHQ@-t7D>GF1Yd%3*6)MbV{5d|nXGmF;RU#6POk-BG(2=r_H zz9alkqGFwexoIuSyiC`+EUt|aiHTX5$O0MxGt;KEv{JOy!4F?w=J&RuH-P>K3mpIj zeEr3=wYtpnJWs7vAv%mBa!O20RGZCBi9EEmQtC2mJ%t$}R+S0>n^7Y0?=+!5T7uT8 z1x47VVDmK9QV@|uBuP%fM4;Nt%-ve8^R!IY!|8NJWvyd26kd zGSAa=otAkhL=OcVq&b5--G%QT(O!+6Mt!}0NPIG%)s!1q4>cRcGA=Fx1EouS6MG}eR!}##wQ%R$4}?O!v~U-Mqoi@i(|oyVEo7F6$DDE!3JX_SnE{cc%iG)A>&tL_0P~SJvoH&RK)n_>_?)V7p5 zEu}7nB1%c6Hf=e4KzK^R5};P*>GJx@oKhYR$A@tonK>U0BD_zAfCzje(ErA`z71*m z*2lj-^|$A_E6VL#@%~@>mZaYg*(1Ck=B{RHR-2WoOR1q=q?OuC2f#pah@3&p)=HV) zUg5}jIG#?2!zd|n63J=3eyrehzee{f*FD`jsQ4T5_K%RD?QZ|3%dBphn3$QELF55- zuWF^LHD#jKG{RXxOc4OFu(eW5EiA@ye0Y2~9*-&KH0CsB;T-o8bU(S@9NWJlLH8&9 zy8w#)pZ^~LL;zrsVH^*~!!VAL5{S&Bsg>HaDKmQnm^e`o0z}M00&^zi!1DHbc|MKf zkaEh$F&_rX6wIsaViTcwH;kZv4KV=k0s57WY~PJv>&WfgxO*dRK@s9Mp!MII^QGH& z13*gYcsxBmemFfm4CBa>K%{0>RaHd-pM4{K%-h7jU0rICe`3`CA-^K^X`5*`lY@#DwC=}02!z>=BPVDARSK9=7Shp&#m z51?%!c5W7Te>P+Ix6tXJKz7ECL@7x7E zfg2&aQyl;ZB6@F?zF{sw!~jVehVk+7!w*0H>Cflu%96W6Fo|^kF=ln3#wO6hy0W{6@^*vytyt4XX|RyHu>l$}1IC zHB&Y10Hu^Vb9)caHwaN|Lm`s!aQyV$_qDdmWqNsfou=8%Rjt-$>IiZ~gDnIGL^!9! zNk;L^1n_d1FX=VWFbt>Tmg{8-4^V`|G!`f#SSaNLVIKxa5{fpLDc;y*7>>seABV$9M2^BKryv-@!w3j4 zgSMrM2vaw8?Ff1V{8otnH%L%7H22>8{dTdU$vsW){gA)T1N% zw#^PuAD0It$rm~cIo?l;H&u?c}XAm3~9ua|{!jcP^5a59ldGm%AWIP^D z$B$oz<4JOsA<2-CL_;-<0==FuodqD8Y0+A(nOi6H{SMy%XkF#sW{b4{v!U;9wbje@ z^7QoO`Q>?@Csh^Z#}6NxshXZnkK;HFc_6;sV^1INF~C~gU>26cAwGTj^q0TU>^E6vRUPJv)hQ(8(8D3HMe5)(i!F;A~=ufKfeaiAnTCdxwNP+=O52$)kq zfCK`fYMpf{T2-65nVFk=M7YNeP^@CI1ug^%T9^A?f~Z?n-hv%dZSy?8zPEYo|fBq-;SmycV>1DcH z5%Ko?HcykfQK!hNzJ!N!&`4>dECeAKwdH(${pGVUL0FCh4I*~XL>kfUWvB@t03xcE zsa==0RBftF)y%X@Qbc?uK|8ZtW2((T`L@IWf!RSE?zPovp5D%{PtQ+3|MK&{{PkbX zmvaZ`dU^WtG+i%ls!gf|pdsbQBL>O(5My#CGi#+-Z5$y0!mBP*yUuN{ ztu}3{s-|7rz6R*tMnr_UyIW62ARHj>)Re*l+S)WvujjX?=cm8^{L{~$fBEIh=kw*F zwTke(EEWzR55va~A3uEfkcYhbj*W!$1#k~DZK~lOA}J*iNr_WV5&q}D{P}!-Q>}ov zT5DVE>NQl|+=CeaCaza52PDZM0tPvi5?0_eCOM6b{jhwJ$0H5N-0ECfEik9>2y0dr zn~E+?>*k9s+}#6+uVS&DNT&eYOjT98CDCvrA{GY0+*@n&GQXbRo}QjRKYjk|Pe1+q z<(H?|m&@g%wGz=%in$SRJe~gZr$2q)z8`!L=@H}#bGKn?UR$fRy4g4kL(Y_?-|BQa z{`jXKO#gclLijvguJgG@Yl}5ibEhBzEuHJWM~Fe)yOk zjvl_urIf`@!^~YZN{gaV)O52zzNycjpm_>w$BB1K6X{}mH zDW&v`P)?E(a}t1v<9Hf>{HO2D+xh(Z^DqDR^Ybs)HqBOCUELV0j2}`M^mUZpD8LCrrG>^@TZKV<1^#`qCRVN2T0l2$0t@Aux zFPC|q&D0U*UR#})a-FYl=kxRH%gfvA>*YMxrMWr65fo6fQp+??=gax!_2v2H*~7+h z$f*Os5olG*QkT+7DYaHam>zRtW~j1zm^L#b!FbFMACDh@c>MIkMhWM4e&rWwiJQzHJD0w5$FDc z0}*g{)mEnY^8E7j`uaLAOV!r2Eu}8C%w?ID={nETvXoLZj4~%n5kU?X@rc^m^*TL2 zKOY~CrIyp_FbqS^NmwGH6rHAdo@=euv=Omt6^6SJMNhy2!3gy#13i2^e)`jg$Lnc0 zC%Oc2tVArlgi0W>WJw?#vSdc$K&^Im4P!>i32-?cJh3wzVdO^LFEJnlAqqk`!r_S3 z%~x#3G9rkGnA}~ps@B@dJWp@ux2NZ)^W`!v^HR%FYt^>DnwdvXmO)rpgj1+@QdnEN zT(3{hPiaWYGCw>V$8pR#i%3M2rCqP{JeSs*n=x}KC$ly$*9fm=X|0OzkVUl_YCg)t zr{m+N5VSIl6JPt#IYc1xQ zb2=UdVob}`(DG4EAI8Uzr^m|)LAr#ua1R8xh{cRs4T^}wE*!!_L5T2{(>x3w&dVI9 z>_ciCi>tZ;Ar2Vu6N^-{`po^Y+#T5E{_Giob`!#EytPDw#>opt^@v4>B}K}KnVFbu zF3am%)fUsB$J7R?90jHx5fBp&qFATzX5FJjM0$RC0*Huvfb)jgJ_sA|AVU4|j$EJ@Z81Mq+w%){N?dY;7K z1~?)ZA;O2laC$f%j^l95X%rdg^mzF0`;S%~MqWaTHFp3;;n6$uR6<9j0ftC9C04dl z9TBbSlIqBH;KC^68bGkvYL?cq=p8^`p1y3!E&>p~RxWeSodJhKEtNew z!!QOz!@)F;$%=|357gFbtL|3ooDyfDBrMD%M9joJVB`S8Eno;kC{Zwh zm^mj-Mq*%RLCTz%!%v}*5OQjZw$dEc%rq<#r zfj~)OU>7%6UCCcFHMKyb&tE#GvD5=j@(jt-~qNYP3X&m#2z%nnTEDBww+M0!j5E*fGn$;4F2+YDP z0fYCd8!UwhJW&V*3kirg0@k!HwUp9Y56L_t($mXxM3|b^=4uEq=y*IG$77cK;qik7 zoYGS5Fg_EBS@*BRB++xR5gZUuA_j3{mY&25^gQNg=W~Q>)3y|A#^5Z8K&B=v!!R}rA~uU! z+dMDRbxuQin3p<^LEfx-cxEnnAPuW-wTF-!9VKGnoOu*JCOYIa3^|QC51A!H60u04 zlp_xqMm`*P7=(q0ScoN25>CR2IdP}qiLxZ5`hA-&OKa+ZrdnHbHv~LfBPx*qNj0kq ziAM-AF@*=%C?HG%CdrbAJdA-9<}<=owN-7ZY7vp9>FT|YeO|m-;&d1f$Kx>o77>xA zT1vZKr}O!Ge!HCC&UwtI(;+7TMb+wIP$l&`&QQ|Dv{YB~0Ep6%kB9tt98N=e9P(+* zhr>7?$CR@LHFa}m!7yMP>2Q?eLmoy3yft%+mB0lAB*6ksEJT?P^6}$i`-=e-K}#vi zvV?hpa43dPnXIvyGQA%l)@sN(E;dpXLgkLXA@Nji&J)lqXGP`># zb(t1#Hl!g*a_?(T0IAusESJmma=A{^yex~kEz`UV11OqScSBRE*78iw-caV+N-$_a2NJH;rm5807?Ftv=26lir6IAq1zbI-8^!Q|MNkAq zI72QqinIPKya+#L7AgC_Yn!1Mq&7+v7rVL)`GEp$X0S1E-Z}KyaPEtxq!c0Nv zFDH1awYYgJwbttD!U7@|;T}>V)O0CjUgp|rMD$7pJY07W1nGa?J`Jeyi|M+hojw4yqGGC|5^V`e$dOp8gFE4YQs!)`p)KT)t z*8}pv!!QhaOlbf-q6HLS2v|7fVHnyt9*)Py|)bE#8Znw2c$VaT68e*DvqKRz4=E7Nq2>wI0x z@^(JIzMQYmbDNp~^&oYWl#zzW10Rpahf|E>nmDnZpW$HPl=Cpuc2M#(+)P z*4;ttX^OYEx6fZbPs{T8%kxh^|Mcba=kxhZTcrp%thVazs#>bG8mjQ{zE5YVr8RX0 zS&w>`dA^*_R-p`aAFr?Hx9fbVHkklgsSO<<-oCjHjA3`i?cu9w2$|K1DlG3!4!Yt$AXck)3n+Dt|EKq}Bp)d%X z5CT?`W+qBYp_E29b0?-e#ON`tvpUc7`C8{0=0e1vzGzOM%qgeDY^s;H^B1ASure?6^?W^_&)0b_wW=w?01q>7 zifn8U3o_T*3X`f@Db3VU;=`D7769t-K#+t8NlwFI7>_|bO|wU&obxcck#~=e@PLBg zp>PC(0T6SDMVO14v_?Wbr#$emhwcDUN~x#j#l!&+)U=dx;Y*+C^V93=^Yi8PRn4;G zB%KHY=30t14Rx4%uj#luBG#_c;cz$}$HS1a5F^|z+=9S44TsTTrsf)9YUb+U6f8)B zq`{^UZmL>}TGPa&O_LA+i|`-^IdhP3o>QKJq_kE_b?^GYK@yBxb`YRmpaF;=y}rCU zEn!I_yfqoqR-Hx~#=+b}*a3+4jD6Ea^BihnetvzO&u{bfVrnW`ScEteGa}R(lq3PZ z){g;r5Ob0=j)#YbhYydBj}H$Irw6z$X093lBdgU+HwS}o05i7-!z@<2H|QBMyy zr{-iq+DuisdwxQIoyeIT>~l3t?qLxcq1rc>5r~2$cul1OJry4A>psOq!je-S@^Bar zqX_p@{oca}P|D*V?&ttD4i5_Q@YZTHwW`bI+UCVeH8rpYOAv!uhq5A;s&lnc*?b@p5+?3#$Q)boZ11!o>SZb>W)UI@hpF`{JKPyzMZaOk@p@D8KSl!4Xq8B*L!-D_@aRgI1ZB<|m*RW>Z%&VDsu%txXD*|h7bghza(!G~& zi5zQcwZGgWV%?43>rrePv- zFb7zKh*fQx7J#(bd>jXnOrVHf2wO^7mbNT)X|1TLn?;zp*CyusE&6saVrB>hh^Xh% z%|p$rTJ0S-w_cHVZ5Fh4O~ls!78IaxFalw2O?z=bYgQsoIWrUWma%XENwP$s2RUNy z;!)L{P}JO^CLRY#sqZjm0kOGPH8l%z?`4PRz!QNeK+FMZ&2%niUTa&f`8W)xoD*|1 zUCJ_-vaA_@cZ3spkb8JD(!RxxHX$G;CI*Cvs3Yev^{CBi54wqnWoxUrGqSW}v{i0; zLnj4#VN0zwt+l0`i3Kc)gs9hm03cD~4n7A0L{1@&zG>e!fLJ0kfk{$;>Pnq_t&*<8 zU?PS`^j0JfR}WG%*QQNG^I#ny)u}AAHUqJwaZD_!wlxLg;qcz-OWSUKID&$l7`)Z; zqmRy;xqE<#!4e>_uuveFKpxRMEmnYtBD|yI{o3xXYNa-o8lV96ine#%08%io7Re$A zpdcb9FoNCLJ%d=1^bDzp0bHpx?Z%RrnK@Xv*X?^C-P{lk_i$!vMH@l6RF(v4s#7g3 zJe`h*4@Ws3)oq#!m(p5mP2HWDm_(R+iaa75@YQ+400UXTqNlWj&?9T~l+oIl=!o8% z+0)5P1UQ2|0;_;UgsRojK;i)pm>M&!&jKMNUnyerdVSZm)hZJ8BvS$lgouN=f7)x) zK?Gq+i6pTIKmqmU9%dE}>YZBT(IPacf+WziEv*^jDEah}$6*k6RW~(Lr?B1u*O`fT zS#<;K?Jdw-`w1~|5IDe{0nyf)$V3DV4qEpp^uDeLs5v$5N)R3*w08Iq zQd8YpEZm#31)GzDiE|!kbdYn;nwqz!)od=Mlu}hA2{ThllrpD;fEk>cM)Z(mt>&7O zyE`nv4%Jr8TqrlwX4cHY-Q3N?B0S(A=>R2%Ihl7~$=v(Ng4~IGb<#||VotQTi-1;R z9f*id9KqO>misq}iA54i5?š>Nk0WgS=YHht#rsko}hGfAG;xvwj?Bd*f(WY&w zbuM+P^;*lMtwb=58OSMd&Lk-!AnsiSwzk9QCEOl5I=Da8B_fElSt%9s>>jlgt!hof z903ORY?gtPbZy}iAtDIEtaC9+!fQ)m^ck85n?-Ng?1d%w=m;_Z=5Dq@BAj*xlryI+ zNkANdUTEVW0I5~I2S}XYl$?Y`hEYN^*jlt`OVi7|T$W{OwYVu$N=eq1(B5YY2Mo~8 z0D=Ix_fk^0xjEP)JPbfU%}c4y>=C6~X*DQ9q?A{1OU`9kR2Qx%CLO?y}A`qB* zTT`$8Yz-@Yr8f7!1zI8^l2Xc&a!Q#~BJLEZe{tm}6}f??#xpTO0;;ee5oXnMJ6-c7OTqSo4bs zCWwF&8HGE7K@!AH;Y3bRz%zIzog4&BRtl= zlIYnz?$K7Q?B>AQEU-3E1gWBy-g9Yg9-!#G_Sk(@B61P&=**zg7GBLX1$ElCmZm`H z6>IejI|y_6)xD?`~x{axEr zmy=aF85>pWbMG6|y0~=;(7(HPt?tniK#9S9x6?{9g9GTS-^{96F>$LEj+xJ3cAVY(R1tsTeiXrtzG5Zc_qcVy>x%nz-^WHehS(R^h)>; zOl#t%+op~%?bF&C3Hw%`yIA8c`q=Kf_5DP+`})ED1tPlJ?GtP7kegelJhhe9ineO1 z?uG~^5+)bM&D6$r?R7n|J&Eq!Y~LS!562cnVofsO>(g!n#U?$t^(J>vBY68^2Wahu zBEr|#+3xqNkM`qh39>bGc2(`w#GX}dO>3*QwbomS5MA>nrLHv*K?Ig{syp=1#rt)@MJTJC7FW4`pOpZ8ZSi4kAiz z1;}c>2dGyP*PeVt5RnK2iI~`UZl);X08Cy%|&bg^%2RgA6rtVgSIXXh;9JgFxsXJtVK*-uYlfTxyg&WThAul0n|lp z-%z{*v^vN40PWk5Zds!s5M(=uD3wxr%de@Mxv6yrzwYz*jd%2PY9J!m*wn~3t=kuT5~fa5ME7WL2?RYoCJmZFjd;^P3p2yZ*NX%8Gj*!oCJ8K)b=%a$UVHb@MD+ zN@BI{^ls9dAiTFjn-*^nbfu{f@R%5Ktx-PuII1Tb=cN#&A9fjt{y)kV&gyWUpwdy32@`i#4w68S zuVY=80;|3dHq8Ro0Hb$S{tBSYaP9)MM-F%|L;DFgfuc1Wmlc(D2izwA2+-YAI`3pB z?&uqOjcomhB0Bk(`_s}cfWZ1>j7?Eig<@t&skaMl0QIC5y#pxr8aCbFj?K_wRqt&V zGTqee*Mao<{coUyI|ZT5GVairDMURZz4B@J znhTD;^^}DH1Xd;{kZ%AK@E#RMM39HmYO`WBR?I9+Yy8!}u}cfxG~@k@b|<%;yBfyL z_T6*noy%{w>{l%LS6~5qB^9eM-s76aaongp1H6v9+w5)=h#+fI*-Fe1Ah7oyfd0iz zdJ!Jv5s2X6y@HGG*j6r$RT6#M@daQD)PFsTjluQN1Bh^J263fAH|5&59okS{E15)rsC~@_T@~q6ghc?)}KqFbqHs(t9CbWoUh?b`Q-BtA?)? z1L9SV(HC%C83Z^85m-}WH%8l2I-6$Q&;F~Q=&R2HKwqbgPVGEkN8pAGB0Rb)_K5cv zyQAt~1;$;vcQVkS+aCInQqCQqEeqQph0u$CP9Ey!cC!v0qb+{gROn_$Kt$Ya5PGqu z>uGG$-K@;G+UzeB!{ZYSZ?E`vOeVieUtib2RGn1r@p@VPAGPLp+2a0lCpW~JA`8Yb9a^4RysCO z#9rUJo%7DSZyJ7Ese6dUr6~aEwt6>L+V^kWj{Xh+>KoN> z%=8;RrVATx=iRaWO@QxZ@I5fMxNBwQo4tzL={M`!O=&0iUwOe>u5!cn?u0F>SUDzt*3au=n%yoAaJO=$$dx7~8vi*qxhsZ!*?f z1p0G1RzeVP_uh4JJ9DS}Ut8tfeC>@)yY~Y#bpm%Ixv@6sb;Q;sBoHXwq2v951O3PT z>djZSJs0K91l(}lS3J6Vq%BuZ+-KcWN)$IlVl!Gd+1jUkH|^C@bxQ0`)_22ih4*)b zip}(Q%NQOJdp6l^OI>XvWaeH;?Dy@4^frJ!M@&d}va{1szx})t>hO-mTqDiuWmQMEYi0-(PN9;`N6fH*NR-^_l%-cxS^~?64(2iHO$zp4%gPXdPf+ z*S)o|hiHrDzX8zJV7Gbxo7CNwdW&%H4(2XC*ad(6@|Gp;LL6I4ex)GmO@LMGZo0D@ zC0YxGE9kyU;rjRHJm@Rn?)7e0mra_9dNnCFAIF10vjNkUJ=ngt#uf}y+v`FwqB8nlkRw*l;`Lhl!97YGnyea^!sfbW(Ov^|pV z{kQJ``i+B#7~Hc)UrEG%7iPXc|Ni(&f_9VGmtWrBdH*fN`(xL}Z;tQXufGk@ip0O| w_$`3GIgERa+uz*J{H>AyYXiV-3cou3ADJl08M_+z1poj507*qoM6N<$f>$aQ9smFU literal 0 HcmV?d00001 diff --git a/examples/super_resolution/images/input_cifar.png b/examples/super_resolution/images/input_cifar.png new file mode 100644 index 0000000000000000000000000000000000000000..217b7e67d38573617b046c20a9d2271dd6b4e446 GIT binary patch literal 2078 zcmV+(2;ujMP)qBFqfWG6WE@D$3D# zxU|&wrcPQd6-I)@fa)oSGyo9MGZ6p)KvQPp{Rexuvn(%Od;9y{BnBV^d>VuX01&P=W^{M}-h+V1 zQ$e0JoVon*c=YJe?!n>U=y)_65AWRge0*{OARzLz7@j?S5%CB6`ww^buU@}({nmC_ z%no;VHm`kB%?d>Xi2sLx0R-TGB+NCj7vZmVkl9&hr)9l=Jic{*cjtIi6~&!TKHj~v z#bQ}R7{CJv2mlDrKm7#n$%0S*WbWA^5Fp|cd3a%Cqt)r=MFmJ(dnbvGbZu^3`C$3n zT7Uf_Izd4JWRm}0I(;=G0HOd2fC#WKdt;2q+}zxQ>i1W^`@J{U&YY_%Gai@scaO5# z?A+bn{NP_rJ{1&CA}9#pKpcpFLLfLNnCF5ZUflNmd$(`jZj52!xoH-~pkICM^&h?S zP87zzmfbMcK`V2 zKjnG$>hR>PH{On-h(#E{d0yyrF1>r{{@r_j`^)>eb$1Sq`k2Hu{rkPY>g5GmTP)Hn zV1JnJXVa{%>-imcK$=WO*<_3gj|L|nUHkacTQ~A?+F0|=mtI^wwW5^Hr)8R^8|&+5 z*H?e}t6%ILe0lZ8&8AS>`$w(QYP7Xg{sEqQ?d8dAT9$c(sj&^SXn=;<145P$FJJj^ z=kfk9PbbBc+F{(ZPe#MbS1xaCoR6aDV1HnZsmgRVO#=9av)~Y5LiX&CR=adaKJ$!Iy`(w>Hn6TkCZb zrB!3ZG`09TAUM0e8ufx}mv4Rk;a&EsrQq9(?S8kTR(k2FcuX9a*jZPWrM~jVeYYP`yVqXmpNiYuTU5Pnx7%Gv7Fut<{Yo~> zwzeL4i@RwZ1c5$s(}{DPgp!qmy~8{&^n*tavXja4Uwb}D+K(Tf>^$6EXeW&=F|VrB z0pZu5yZGFxUZ>wZIm!ABou&5L z>4Qg)OgYs>mX&oCwc>1=?d&~T>~(!nW8EAc-9LOhL~4Hi)1UBc{Pz$4v2*iaxfdR7 zeVq@>%}H_3OtE9}?)64nhjW7y?{Bb8e~ApOn?b zjTd(OBpr`@w0K<9W$DxLF)CH#BrU(BFkux+s7he(xE&@7y}nY!;`PE(Pj}j8R1EL! zonPo7h1068$zc*ls0PR5Pp{rs>vTqwG^?r^Lp96+P^&Nrq&8-drk+$wCrCuG0$>nZ z6jh#A{pCeq)Ets{3>5{|U+6A5Qq!tZ-31Ec>NsoCaypt0nbM}*c==mLgHbxk78crd zS=b=18&_E-!Z?fttp`?GlM`k>8KlmG4w2Gynoe?Kc-s>%?37z~|f1RP8zfGFMvNzx5N@4Sd8O$h{~C#f?Znw)GL8vh{FK0SXDp1~g@DLC=X7--N z0}=`X&ugc!X8~eiQyFV5g8-obFtdQP;yBcb5Vh7Cfe^*mrfw>04JoCG#Itd(aUKbg zXg;x@@A>DxUiaQ}&i#Bp-}m>N#};P)a9tF<2mk=Mj0|B&W_$1d zCkHF@8hH3$FaV$?U;`(z;NkVwFTNabJH)P5+$O4BYP&gC-O#3VOUpU^$Sy|fW)_43MF&y@ z0GSYw(nR=lB=d1#3A7vl!vTEE0XTki&c=)@N>L9h+&0*kn5yDthy9v>VIXo zKp_1AzmnBblHz|_6AMTDw?InDz{!=V`%s4TgVlEc01#*iJB2@V!cRu8T604fnV8*f z49CTjrT;&3`)f!{Ow6x^{)8hwi}X?K=0uBY?+DZhofR!{K?$3eY)YBszm);_FH;Vy z=#k(n0mE{DaCw;b{O15zUQ`9{0&vZCvU^faqulJ$1GC!a_=UyAr}UZ5Krr?bnm-Na zxBAu*-IN8HBAzXsLp6uvzt;)bJT`-EYdN8N_aY@Z_J1Esb!S>F?%h$2cCqTd|7It@ zkhZ@3^{XCGa1omeXPXhNhMa!}A7~S+=1qa5Ve~o1-cW`dZ%>5JpZb-|b$T!NZH7}^ zrm=ZyrKS=eR7~v;s&hpE@`a9rGk-s7rS8uJ}hLCZ7x)S4hD#}07 zuZX+}FL@rBhPf#sQdv=f%sELs`?J=v(0AZ?fn`=h1_U+E&w(y$`t)jln=pG=96alv zzZEJhBl3Ay`_3H!VErPe*!Qw%;yj`-Uvtf6_10O#Uy+Gjn~h(rXPw}g5=J30v1(O3 z4C)rUrDta0=u)~!Vq3G=qxAH@fLnoovw}P_^*-+c8>CK@w;k-PcY%PIO_qB$#NDMb zWt(Yd`9dO*h|g>!fRwW7QA8IJKF>xsDeU*M-o2z@T^JJ4^$?kXpMpN(&V&?>VqKqw zM@)?szUVKzJYinB(0{vEt5)OIJFSU{355h5HQKz^MDRji1^H%CW;+a3BDqQH$-vR3 zZg-8|*{&=PcgYD4ns4WSiTRx17#rWes4&p*WiqlnTt4+)(|k~uji=4K7Olsa{Bviy z5AdgP7E4`tfK6Zt+0h9RMtOk9QQ%dAvG+mtJK6R6B44kr#{of_j8pKbv+A*% zM%^NEGSY$(jrNfFS|p$gV1q4(VyH3Dn< znybjOF`}jVZ%{*@6Qf<;N5aZm22iIiRy`6&*N~S1=5z*nX5uxoJx|OK^$6R93Rkza zeFb6bHPep)x&4ymC( z6+9OivGPzmOMH8ib}HVsvU%h1`gZ*U{;Dy!iKb>s)$G-Tpwzm4+F?7S@rcuW__V_3 z+9~xj_`_Y>F7r$Ykf`fy?d~7wz^YdOG3#8+_UeM7CPS@s++5@)*2~bZXNR=%)A=Pn z&2(5F29s<6aPzPvhc;{XYOhUDe9DnaKRN>&9yl!-+0^I0`qYwJ<=;k=Xi`5pTeGR; z0Hn!a3tX}wX4D9+n-h$1%E5T4`q9bh#zqivUeJ@n+E}wPd#I36#(Fi`*XVJU1FytZ ze}rViX3yAA(IjgIhERadkj7dAh07>BSv2lY$bZD$IDZp^uzUYQgZ8{cLkV~IpuM8Mz~KTKi28O!Z` zc;H#B@6gzj>9=DxZnho{XgfL_W+T}ka%>KDS*tpx-gG>vbG0%x&n^~_^BjJxYqNB4 zpqL%bZ}3#n{vQBSK5%8*Uk|Y13xGR``@ZFiceJdi@h2yHAVfeG1glHQDV}}x-5FbV zSUpL@Ywk~(_7V)C3?yQ5D#T`Hxq!Gz?BeaUx${fL^T6k?v$4U3bXzC~pgi2>=zAIA zyF0&pp>YA6 z&%NDp|2x*ByOIbPGe$9DFNbG>6bGL@sgS1%ocH+|MAZilx9?R5FhYaem0b_7=$w7- zD23wIH{8z-x$2avr^?6(%vyY6Z-&B7E8f<-Ev$zy^`8#n5RysPM{ji zm#8t`ybgHrCFbw`3bEr)-&^k6p1CY-wJ$#q!eaYJ{7xLJ{v&g)s9rtaAHklOuyvic zMcr+2Gd;0_(YL9<)YVT{PBb{_T4uSwKRl&1-xvyAjNSJCi?MKDp;|1p5pbM?DMKf2 z-wxjkxgwpMgnHOtG}%&n(Jue@pta594~E9QR#~h#3S}@EhNURj5_xX69Nc*A{$Cm0 z=EryGb~giQ?B{fl?s@LJ{GN@C?!9+Ol%rE6P%8q;_56FD=cTIPn^C4+ABBm{VrrzV zF9#d1V*}!8f$F8ifS$A_JJJ1o{XB6P8yoAiUmYliHIKK*LEJ7OReiQk0seDhv5OnP3z`Vv@c?3sjDnpRn}P@kI)7wIRG@vt9zA+ZU4^AN90!tj-SRLp1va{hN#TN0aM%0^=`@!5=3wLgnlp(T z(jFiX>{Z-D^u^+0>Z(-pS8vLgEMCF@)qMF8-fSl9+qgS;sSk?fw$@IfraQjIFLcNmYicLP z&jrOZnxZ=c^RiOJaMi3SY#baCj2@v-L;#oa(pm*pL&!6Xf_HQo5`-S>4`4WSxpNMT zOLy0tzc4tSbtgK`HqHH9jczizf3z!f-c{*9_0AjGI(Mir<0LUjAiZdGB}~n0;^af5 z%h;g$X}q@M)DWqd&2@cmTPv7oYWDT`cOyU~>9n3mI~b1&kM2*1kphHgXZc|O9T_t| zL*A-RHkYOEHl~ZL&B496*bB~V@@H*<4fSvvI8JpoF<%fb@^SY=wY)2V-xKz` zqlA-B!EDa`w8(kp(Y2U(wBEI-ofs3Jn27oY9-F%D`9;wTY|JryuRplt=~MH)d7SsN zyk{#BT9THuC{x<}e7}cJVAme*FxmpCHl|w#e}>_D;y>AS$?7ULPa12x=4_Mx5v>Lj zUzC25&z9eu_9NUP>dte(KyI5$e7c0J8Q40ix2>W8Dg78J-C&hr2v8in=S&*l7*q=G zJem&fJDhhrE8W|?c@&y$7v6jrFJfVFt7Qll9=I3blSEr`t(~#IkU3 z&ujoWv~4O4`g87NbnA=eUW`UxAMH*=Dfc%VZI@v@rX%*LHi|gAKt#Qy1^mplTkkQ0 zC36Fw32=WcIb#KgS_8yA&=|v(-z_!+LF=Q^#Wn-Y(5NogbL!b15=uZ853ock)3}2l zV|tp2S$5y5N6!LM9_w>(Hm0!t`t|FO7Tn)kcG$ORJl219vRUgVUd`L)-Q}m|ZYI9- zl^Py5LnLo+eQ%Nx!$Ir5Z4;kX!G30;_o7>7`E?c3a>dy{wAo)^X9uuxkCXXX-k4lR z2aXqrR>N=NSnz)*++laRYb)k~HU(oVI&HDU5n%Fi$%jrgbaP4H<`F-AD<(5kjr?VC zQN1*%YaXm?39iB;`?P#tj;xzfGsC@5_w;d_r;~2^oP~*omS(k=es~r!(vPO|A>r}{ z5Z+>sc&MB8F0HG4?BfwRr&p`;&HCX{;gHB;;GgKybi2O5%y0*UttA_a`#m<%Qd8`z=b!j}~wWdqn;{8pmI+B=HYHjC%U} zucs1Y50^`Wdpey~Twe+RENVJ%R79U@kLfgv?x!8%;>kwXYNd99=1ofB_CdUc6S}+= zE3Re_sF+}M5|ZNfHpUY-|CRQ(9SJw_)=rE&jM!_Q4<6p`>OX1fr?Ker30N6Qs4ryu z-4pkbMaXl|wQf?I9JzEcw!-8H_MEH>a`U{*BP04>Lj<0UIT zO*lKH9$k4VqMdLEgVj=3A~bvBwiPL)=*5I-RUw8m_`(b*IVD5(meOjdZLIGzkZp|tZM1D2CZN* zXfrLsH=!2HyfK(aj%=cJ1Up@|c!+{S&FR?d!R31X z5d8#SeJ5e}?*0Eo{T2eEpt!Fw72y>Uw5+busfa8h&A^DHk%vT<9k~iJeAGmh22WRZ zoK;-N#=B;#2NNq1{rm zNs;-92-4DzUOPGVu;~tbw@A8QNj779D?aXQg)Wk~KUBGJ64N7GtC;fNN*J_a0{n>C zsgrhYTqO3zKK8l=8Si1|(K21OWUedQAu9@F@dCMTQT@GFpOvu2?EbkXUOQvT3+-eSgmTRFe~S*_&>zr@xNQcskx3tnZDJvW21r z4o8=eGJaY@IhnQ|0v;vF5rPl#j%Wrq5|Mo9puYI}kufH<4mwn3wL4^mXeQ9L#v#sW zUk?rrmSXk~NJm#hbWT@>M3(vEX=4z>DM$4v4PpQ6SkCaz0Utzp0gA>EH2^kIIN{I& zj3us9?z_o7@gIgdd2WhCosrJ=gRe}8obA{4pY@-G%Rd`(55v+voM)ng*|&cDfkT@j zm17MpowRjfa`W0{dhF>w*swx%LP9%!FKRoaFicn>VQ~+PeO#3Xv&}W@BS8Ow!Bg|} zsVbCWzml;y-=R#()th9Ds@g3Pu88&SZiuWTSM39=gSR%HG5CZL(W1rP_eP0d=g8FPjmg` z-7~1dwTp&vE(l2}Xa-7mWk`=(D+e$Z&O1y|6=MV4^^z}qPM89C0W3b04e&ol>LW?y z`wdl;Qbu9xZ+!~%yJGsYn)tXlwq)wT@&0eEN3mufq3PN7T)d$ustVVSFvnjTq=|Lh z?0S^wr^s?VPbjSuvRp_KOO4R397=pmBwhl$$QPQ@E>lCGB155^?g1r|WAu%_-lX=k z&C~h*SWWFg5&FuP;I8|u7A)*MHYzCDCBDjK@x}E&`oF(ahOc=EGm{6?wby9b z?Vzr;p^q;B;CovE+&1F3e$OXbZND!ylX<`-V*m@}Whetz?_(p1^xsL;7SV~1*^7v| z03qjE4a2`HUA8J>sY>4XbKeGDO^zP?XI#3whBqx#tDK2MCS0a>6E@3k;6l4-0-huG z9wrIQU%b|x|D8%9vSez#Yo5M6g!9As#m65<_?3C9q`iN)@q+e6HMlP^=4}6kFnuY^ z{|h;s^wvQ%ZN`2tqB~}@(k%U`c{iIJ>&t0gOT~tEZc&a-ri0rg{L3Ounu|R`C=>LV zI7X={7iEI6jcRYjWr>ye-9$T*97L=lD0LYi>wY?O+R*8>m5GO2%;Y&fKC#4;$AWR^ zb}9CCCvZ|RKdOIsp+Opjq4TDHs8j$cno_@p_&VAly8HVVgVf2-!kM|^__naV=1ub< z5|k{36*gB$r_&F9eG^j8_Ds@d3YCTCrt?}8N%qCc&yBvL!_qEr{3Ac=9uAsX6OwpAwDA(C9T1kMAzdiw9Hj<+YP5QB(| zkuF1C_I?`-#uanO_t1)Djhg+Pcn-*A>b>H*&L&MMtaV0(Jd>S$*-M!2gm^8^F#7~j zSO?`wFlSr1y_rCpoep-p_WoVip8wutS8zB_N}k--9?ay&R>8H)R zw9ebL+b5{$Lp5i4FF&*GP)+n&N`Ym#9-_Vj=dO3n2s}CP;c!~(P6BeWGY~F{3*0?# zg}~-HdjC7-masd?ySyi&)215Eu^vREtJQV)L5dqCWDmN0Ta9akj9K znTd8RZf7QS9qA(hPt!py+Ch2fbw0-i--*`n|#_CvHlMsI&oTrGq9X`i4KlCx2z)jWs5^4JUEZM;s;g}sg2 z3&dck;5ZCu``tniVg zn7)0;fVGEw>3C3dmofCA1PgqC3HDYxWxevZ^OpZFI@zHmNU#KX_V(>8A5`PgYf3kc zrojMo2hyBTANg@b-(&?XGxh?h&}n2>sP9`sCa24^eZgkm*=o@Jza7pJ5ASlm5HVQG z+NJev&U#!7`!Ugp*4BFDLRU9MX9C$wR#dn?&3y<7dmx9<84S)yNwf9p3b zc0keLz02u7=)lbFvNnmt`1m(vap?86>6ZS)qj6?azUP@a@@K|8ef#Z{6CJn}bf!4u zXA-V+IXa-H`F>=yLhE~3$_=D6yJ9MvsQe?vb1zffO~5gm%Zi7%Vp7Na$VyGi1F z=UJo!eOO-mIa5_ksU3l9y}>k7YAlPKsLOOmAGG+iG}uj~(oO!PcsW3;Jl&H^0v_#FB4$)xl@$x3lGfiBZHAS!&{*;?NV;!02d+be!tZi7gwG6DCG0fGDb7 zF(p1eF4HP?pJyb8B4RY-kC`K_G59aP`q)E)mMOhpyogokHaWFIJ&h5oHdO;=M_VW-npHuZ61Zj z9Y^dBB^|q048>-b;^sHixWj<2O_Wy!HZYW4RoG7?B4cDkOj4h?<`~%=sP5YoDu-Ey z3cu6hqHlePcKNGX5?3YafXGzlElPttI{vlzWe6I(2H;M9Cu~+b`Za_<&0WLoO;@2+ zYTsiL@OcYhYK#r)GF7$-n?A@cp0GV>{7M@Ah(ALq}OE}xv;q?@Mq%rPZlC}*s zH_}H?g()Wz=& zW%~TCpy$riwHgS!IoV#FqOAETmkAIzqZekTV*6(^#q1>~kqD$zE;g@iG00SPl^H53 zjb+(hFD|U_-S-@uqe8No&WGtY@KbWN;#khYzJc#E_TN^N;=IZ|?m2<7=*L6RI)i?x zxi09l`J^-TGwR}Tmbxx92N!#@i?*&ZF+@KI6a2(a-u^;(Lpc7uF&pupmMJx_*6UwG zUQj0Ki_c^=mUojT-m?adoy|8FLU`gOyiEuof~ce=(9t&vkjf& zqqC*|UuLT>GYZW&6BD7mnba7K8w!lA1FCWH6aD3HBJ|M%UBqJtVlz|47kYD8>AYOC zTKQLMpPDkB&kNnOjsR1!%g|dM!Un$a&-L%#SK?$NJ1TvNs7P)iP@u|x?%sL+9;$BI6-Yce z4XR+onb<3@5<2Rqu$xgt;(P`0Xd4^p`uL&vXkt%U`lx-bvph-T(btIKZ8qyDm!rwj z>EOk_KslY9JVg?8yNZr>mTev?RWfWP4 ztKo(ln*`{}KcxLhY$x>aUR9){sG8HCFY`S2Gyy-(i>*LY9uz-a=s)_kP3vmO`9Y%7 zb&g3o$ESP!I+YVsbHw?+)@F1<2`NMa%Zo6fAABn>PRq$7yZ2?@Q6-$bo!F^n1+61p2#G9D`90rB=P4Oo@ImMIF_+)lPRSP!smkb&_ciLS}Gji z+TVSbZJ>BOK2Gy!eDpw3`%NO9i4QicTes#<*3@Z3=Xkw66R0>pTTfYS3$up%HkU33&zMX?6ZI0=noaImGhXu*glF^}VA)`j$C!Y>&f^spdK- z^GS#O*$NX>MJDs#_#X%uN;J|Ua{)I0L{Y9I&TJGd_yUFCthQ*!k-jPNc=~Ai$S*y zzO1X^4`Sj$^o;oUJ<<@0xIMqTar#1v<6VOt6DyQ1?wkPFbETlcS^t3bHEMYEGTR*@ zDHn=8jf8xLLemPbFLq*i?**teZIylIyX*wtS+ahPb zNYq+$z^nBh#?apnc^Qh#fUQ-2S;3uOuJnuf0u*Hnl4@J&3Gmxn4H8VFZ;y(2aL|)W z7%wWnMXaPFJ1Q*j&q6Qj9*PC1Rm?oGul^HJ^7yxg+0)93a0*>xDX+sYb1Qx$J(TaQ zyIC_$Wqx_w&h7RhpRQ*@*L()D$fM*g?}UfxBJ#rfK$0`9KOtdt+n&h zWEd~~XPBt%ea9JoMpb2Pbuzv%dUH9pAWx1sbVX$G?3kV&t#f*fiR6YAa?dg^?pi~hm2Zmh4VyHFj!Rh4)Y~toAM)#Zf`3JtGw1eR{uJA;%>+`|x z3d1e+u6-zi7>L0JI?>mL)dGq2*NpC!d!hkVosR<5gh<(YiIrH+6Kg~3$cx*F?aFE2 zgsUlZW}0T+SOrc?{5h<6uV`pW16?=2@MIAo??NN{O;u;R=1I65tniAqP6daB0^JycS1A?V$mxu9;y!D*G zm-D#X{al(FxdxO@`g#yz{k?NN0{{|YAjaOmO-^RR>t`5NB}i06Df1^b}L*?yF|c!ToxzXG+yxB`vwyo5>(niV?Q5Jn2TpTT=tvQS;~X z189CZe!|7wQwrS0$Ou{_WhRi;+n3b4B8bG0YCsqNQs<1G0_-@X(s9T}~#tH=;Iz+mluK zjI&>gf*y8K7-hvj`B75u$PavrZwXARn7YGYq@W7=LHuK}S8Za%Ky}vdb5Z<5inZGx zD&-3yWH7eO>U@B{Z3fv;O3?%eypY8s&c4<=z#k4Fdwmq^_IdU)=d{S?#9Fnt!L98Xn<|P??QMjBL$}=-u^^W;@i;}6w(%ey>^Qx8LYj^mv zsG<@MHbXnh8)od>g(DX< z6gcLm`IMw0LSaO2R;ELQCwxH9;fA{!*%xx8%&FE+zc@0))J#VW@6nO{FCAg;g!Oa4 zW7s;?>`Z0yl5EK&lS1$TNjS0zoLLfJ77@fR>2tjBD~aW_atdXAmE6+_1nSwmLk@Jf z{0)M`;hw|g!JaJVwBD2%cBFE2|8%)+5YnVhma?$591JaaFPP7d&>ztFYqPAK(#j6_ z9VOr0pP5c?^%InQy|~dAxBFj{GNppT*fX!B&{Z}`WK%#>nE|`l z))+ZfNpdJPRf6GOG88L_(@kp%-DS%w&f3MqW&@dvd%QAvGB})MKm;UYC?M1FS*UQa z5fH=iz-aRRUBhk|kV8yBbgUcjoY#+Ln;XzYQf$#0cN_Zl9mud^6Fe zl33UX@q#s$Uo}x1tOniVhdHTz=VW0zEL{w=hR3RayZ0|8&VV;)biF&R(SWqIFq~)KZPu6$*`;=8$mDpUBo5D4=CE*nq%?c_i)!LMyg53|bV+AQrg-&_OkeP zDZ5}NkMnF6=4sc|`oxs1FyMibbAWWfsvtwK#Sp|}46e2^@tT>DB>yZkhAe{|{C@uZ z*iU%qRI5rk(X5>5Xy$u0D90PInl33tw3m*J2Zd>xmQvsgLusx5fgIJk_# zh}Tm_j%ZKUxjJL8G~tR_R{O4V@AJnLC7FwLp$lDd0+%3y=;vMB`ZmQEEf8Qh zysC~5z{=)ffgH>mQRGZ7EOb`Md@epLHjQL42tsR>XArbgVVPr{e!Z9h>r{J=#~hOg z7PJ8H0oeF4l*KX=$nk)QM66%=c6zaVc;!M9D+?z(kS?1pNwOysyl((aIqFkhVgS7P zjj`DV&xSWPZ+c~?y{n;XxTS?^mYFflm17#%_ba6eKd&05NTu4h%$wlJoX=+pqji#vMKS0 zrEv+>-cx>TpT#TrK)Y6Z8Nx`c;7RsM<__5Kc(0f}P}ZKxjpKhNHSnHd3enFgGtRtV zW0aZ`M$zR=^OR8naF;+r#xM@!OL+hRI=4fJU*f>hAjWw@gR4OftDz>Ao;zso^Ilrfu@t$% zfU-zb1q66VQ~aYq*u1%57<5=`DDR?Ty?&c4 zUZ!Qadp@()ZORP~0n*jbT7CtpZ2bT?uIz~4C#Oc# zuv)?he8C@EvFFN-07JawBnnso>>v1_+q7L&gl<0P1FrRNh~gID-IIHJL?>nbx`2kijKXlmASS(B4y2X8@h!K#Qn ztmTKksIG27d7eiT7(5Z~wOr(J(FmCGT}lZnJ0M`q!LC>TsU%sOo|?l7s6n#v0+1I8 z)mg>EikP{H(tdkeT|IPFy!@!GKK%wrNtXY1%3HfvuhENKQ=Lu7i$E(IP7^?eH2Ik* z05JIq_+Xwjne?X9DFxz;CUbNAcb)4aAvEBEbMw58EY?sSEfJXsSr!gMV80_h%<8>5 z4Lo%vSU+|v7-kj$5y$)qZ1@bKzWpk-6DtqnS1$qUt35~B){*&!jeCYqcE zWJ1PQ|7!d=1-icf1Y3Z7f65BDcM`F_(>BE;-wI2Zno~`~NDgJ3>ThcmHiEI-x%jka z2(W}P_6Z^L2P1Yx25<7(U)5%OQmdX-@X2LhPE6o>Blf5oYa99iRNb_wG##}AzhqQj zowL+4DZNmF`1cxT&8OCj^M^T@fif;3pyjhiuN_xaq<0HbU&wzn7{7aUHH?&-SEt4z z58~5T1%((hSL?#9BM0poFV%_KhEbiK8lRK4lR;$yT{MMr_B*G!fxVGWuJuNJf9|`K zJt5>&@Bm``+UrO7y+|90H_hSmHTcf;xi_4b%x=kMU;cP)5@%NLc-Rx3)|%)N`rt)} zT}=BUU8r%^tUd>x-;=lAGDXy5T%6^qknOuNWkEh5ue&R;{<`w;GJ?Im&Omn@dP{y( zncS%B!uITwe&66p7cPwGO8b1EY46Qdz=68(>iSV6oYk@CRm-R7K-|ffko)MPplc50 zP6Y3%ZtweiRR&mI&q-0$ZGQh;z5np4vdTk6rQ{_O!n;@a1#dzOyX2#WVFPwH7^WKl z;D-U=yp*&@k6Lb<3KL%r`5y8X1PBJ8w^u+bwNGWu-H&tp~Am zfE?8jye&d0c9(fzvbmh{nWQ&!IWn8SK;EL$L4w5hlF1Ew_=~U4`-TKo;!mJ)gHHi2 z8jo5U@`YYy*?FnW_C z*SsTa9^pqij-S{)6a7!;>7Y|W-H6DP#rSh;xjy`0UT*z8dqV49572bwb~yFN-4KoDz&VkvIAxqSfKej+6KzBsk4dmxXQoqQ~2Y42NDX5T2vG;ltzi$i?T| zbC3LOPt0MdoUeQ{;#``3=zMS+SU%Er;@8xtNjvbMyrRLq z=9od_u93;03{1VZerg)`$97C!^%xu$X34k%6jQht>&c&=C!24b*|IUrbL3B%(f|&_ ze~errKaZYZPVf(Mpz}_??~8{aX7FVR4a5|1ZM43V04EC+arQUKRvYff?m^w{VVZ|2yc)CC8Gzd z&C83f{qDiaP{lmhXCzb5zpQT$UcdRT+vD@cZF))GERzK0YD^#5iMG|8-CMm`ltjPx z1-ou~{lLTr_&Ay0IM6rHD^L4@`F+t2M;y_xfiY;yXJ%`|>;YA0^h7avxuk zYwrn{+4;-G`}-7+LTVbkiaW#xIc<^Ma$#4(NfOPAdlE0Fy&f4Cvi+NEo53xQd=nIZ z%?6}Ic)DJ%~MKBcOP^?62>{B&dPZt=rk%GPU|`ze}TH>2`p`#`U(kDOY% zvd0gR|0+(T=7ppBmbrPSSDJ4oNNlhEZuy3<%&}-*VLuS&`*PV<($SgOb$Muf$RrS+ z*lNwzEY||7`7PkKXwcu~tX^NTj5jxlGyoQ1kg_&e zc7KeZyu)CIAhE@&W#OL$B;nSj`wH=WA+V1V-2M$~tznCZOV@gBpRA^AM|)!jH*9T@ zA7sLK#hee2*QX?Nk-GW&CB{9uMk|L^LW91%PRND-mWmqMWOb7}u8kW4IMAg1 zw)6%}JwQy#x?eElL7)>`LA}SntXJz&P-+-VSOOovXbpau8=YdxX$dZ&dI*F)&I&97 zSaMl{;cWuYZy z;vmOsPYC29;gUlghv%|7@W~z?0^K0DWfpDgd2>Kd zFke$%AIt{kqYWY>7htKpTdM-DIw>~hHh(|5H{3NEw`H2Wd(r6V$?Za9DjQ%*_D3LF z@L0;qsGW(tjmvKjXiLA8rDwub&j6Zj%d7!NS^{uUNc_K8Gf})~jqbo!$&bdr4`0_f zxQ~C<@NBR1ZhkzC$l-oxlQinD*Kh+2vCArG2i%+Vg?Tlz46Jg4zG+QBpR`6jp!Rsw z&P&Eg!-Y#ET73c%G;NIjQ6zr~d*#VtVPSEJ^>QJzHfJ0r_|aD7GO#@iwYUecIC(pA zeLFWGFYoGeQ?RLf^HSw*m#a15^xNHC*s6J+uDoi9%yhzQ)$bAG+jbitWOj7EF}|;` zMnu2^UG2a@6(y~53vrs>iROb$1nfJYMK9=Q9G6SNg?Nd({@T)AFYKVmcH-)^fOgl6eOsE zEp+HElBJ-;PgflEHuJkgq*ev7A`ci)F zj|>gwvgX7NNrTnTyCvjFwB)|+i^d_bol!S7e09g^>$@j)Z8M0JNtBKB^amgBs2-!u zMuw&8{l(5es>*2by-b_irE}VDOW3)Wt4=LKPHaWpX?oe?s;e%EJKjtFuR~X-r}5Wq z!#hzgR)dr$|3g6wUf)RiYTvTpxs*4loQ5_bYI=8F7@lmcyKUVa<6U=%|GCn1HD~px kCa)%WHAI@r3LI3~& literal 0 HcmV?d00001 From 93766f7deb0761e13dc7d101c66743b5b9c73f27 Mon Sep 17 00:00:00 2001 From: guptaaryan16 Date: Fri, 24 Mar 2023 12:07:13 +0000 Subject: [PATCH 22/27] autopep8 fix --- examples/super_resolution/main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 2bb916734aa9..9adfcc102af2 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -118,10 +118,10 @@ def validation_step(engine, batch): if opt.debug: epoch_length = 100 - validate_epoch_length=7 + validate_epoch_length = 7 else: epoch_length = len(training_data_loader) - validate_epoch_length= len(testing_data_loader) + validate_epoch_length = len(testing_data_loader) @trainer.on(Events.EPOCH_COMPLETED(every=validate_every)) @@ -148,7 +148,7 @@ def checkpoint(): print("Checkpoint saved to {}".format(model_out_path)) -ProgressBar().attach(trainer, output_transform=lambda x: {'loss': x}) +ProgressBar().attach(trainer, output_transform=lambda x: {"loss": x}) ProgressBar().attach(evaluator) trainer.run(training_data_loader, opt.n_epochs, epoch_length=epoch_length) From 93d1584a18960060a69746be63bb06168cbd6f2c Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Fri, 24 Mar 2023 17:39:55 +0530 Subject: [PATCH 23/27] Some reformatting of files --- examples/super_resolution/main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 2bb916734aa9..9adfcc102af2 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -118,10 +118,10 @@ def validation_step(engine, batch): if opt.debug: epoch_length = 100 - validate_epoch_length=7 + validate_epoch_length = 7 else: epoch_length = len(training_data_loader) - validate_epoch_length= len(testing_data_loader) + validate_epoch_length = len(testing_data_loader) @trainer.on(Events.EPOCH_COMPLETED(every=validate_every)) @@ -148,7 +148,7 @@ def checkpoint(): print("Checkpoint saved to {}".format(model_out_path)) -ProgressBar().attach(trainer, output_transform=lambda x: {'loss': x}) +ProgressBar().attach(trainer, output_transform=lambda x: {"loss": x}) ProgressBar().attach(evaluator) trainer.run(training_data_loader, opt.n_epochs, epoch_length=epoch_length) From 655e5696689ac6fa955067f1472ab852c7097c89 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Mon, 27 Mar 2023 16:04:22 +0530 Subject: [PATCH 24/27] Added Basic Profile Handler in SR example --- examples/super_resolution/main.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 9adfcc102af2..85a9eb88c82c 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -11,6 +11,7 @@ from ignite.contrib.handlers import ProgressBar from ignite.engine import Engine, Events +from ignite.handlers import BasicTimeProfiler from ignite.metrics import PSNR # Training settings @@ -131,16 +132,6 @@ def log_validation(): print(f"Epoch: {trainer.state.epoch}, Avg. PSNR: {metrics['psnr']} dB") -@trainer.on(Events.EPOCH_COMPLETED) -def log_epoch_time(): - print(f"Epoch {trainer.state.epoch}, Time Taken : {trainer.state.times['EPOCH_COMPLETED']}") - - -@trainer.on(Events.COMPLETED) -def log_total_time(): - print(f"Total Time: {trainer.state.times['COMPLETED']}") - - @trainer.on(Events.EPOCH_COMPLETED) def checkpoint(): model_out_path = "model_epoch_{}.pth".format(trainer.state.epoch) @@ -148,7 +139,13 @@ def checkpoint(): print("Checkpoint saved to {}".format(model_out_path)) +# Attach basic profiler +basic_profiler = BasicTimeProfiler() +basic_profiler.attach(trainer) + ProgressBar().attach(trainer, output_transform=lambda x: {"loss": x}) -ProgressBar().attach(evaluator) trainer.run(training_data_loader, opt.n_epochs, epoch_length=epoch_length) + +results = basic_profiler.get_results() +basic_profiler.print_results(results) From 2ce87499aeb855a4873f3967d4961590018184e1 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Mon, 27 Mar 2023 16:45:20 +0530 Subject: [PATCH 25/27] made some changes --- examples/super_resolution/main.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/super_resolution/main.py b/examples/super_resolution/main.py index 85a9eb88c82c..816d1caea7f2 100644 --- a/examples/super_resolution/main.py +++ b/examples/super_resolution/main.py @@ -115,11 +115,10 @@ def validation_step(engine, batch): psnr = PSNR(data_range=1) psnr.attach(evaluator, "psnr") validate_every = 1 -log_interval = 100 if opt.debug: - epoch_length = 100 - validate_epoch_length = 7 + epoch_length = 10 + validate_epoch_length = 1 else: epoch_length = len(training_data_loader) validate_epoch_length = len(testing_data_loader) From 9f81e33899e58aed7a82201341f7dbd4f7a26089 Mon Sep 17 00:00:00 2001 From: Aryan Gupta Date: Wed, 29 Mar 2023 03:26:04 +0530 Subject: [PATCH 26/27] Update README --- examples/super_resolution/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index d4a90ce92dc0..00f6d127f9d2 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -34,16 +34,16 @@ This example trains a super-resolution network on the [Caltech101 dataset](https `python main.py --upscale_factor 3 --crop_size 180 --batch_size 4 --test_batch_size 100 --n_epochs 30 --lr 0.001` -### Super Resolve +### Super-Resolve `python super_resolve.py --input_image .jpg --model model_epoch_500.pth --output_filename out.png` -## Example Usage on an Image of Cifar-10 +### Super-resolve example on a Cifar-10 image -### Input Image +##### Input Image ![Cifar input image](./images/input_cifar.png) -## Output Images +#### Output Images | Output image from Model | Output from bicubic sampling | |-------------------------------|------------------------------------| | ![Cifar output image](./images/out_cifar.png) | ![Cifar output from bicubic sampling](./images/bicubic_image_cifar.png)| From 5ccd25d4c59c25bbf83a15d2ca2fe65e45f4c400 Mon Sep 17 00:00:00 2001 From: Aryan Gupta <97878444+guptaaryan16@users.noreply.github.com> Date: Wed, 29 Mar 2023 17:34:22 +0530 Subject: [PATCH 27/27] Update README.md --- examples/super_resolution/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/super_resolution/README.md b/examples/super_resolution/README.md index 00f6d127f9d2..d874747dc1cd 100644 --- a/examples/super_resolution/README.md +++ b/examples/super_resolution/README.md @@ -40,7 +40,7 @@ This example trains a super-resolution network on the [Caltech101 dataset](https ### Super-resolve example on a Cifar-10 image -##### Input Image +#### Input Image ![Cifar input image](./images/input_cifar.png) #### Output Images