我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用torchvision.transforms.ToPILImage()。
def imshow(tensor, imsize=512, title=None): image = tensor.clone().cpu() image = image.view(*tensor.size()) image = transforms.ToPILImage()(image) plt.imshow(image) if title is not None: plt.title(title) plt.pause(5)
def test(argv=sys.argv[1:]): input = "../dataset/BSDS300/images/val/54082.jpg" #input = "../dataset/BSDS300/images/val/159008.jpg" output = "sr_{}".format(basename(input)) # save in cwd output2 = "sr__{}".format(basename(input)) model = "snapshot/gnet-epoch-1-pretrain.pth" #model = "snapshot/gnet-epoch-200.pth" cuda = True img = Image.open(input) width, height = img.size gennet = torch.load(model) img = ToTensor()(img) # [c,w,h]->[1,c,h,w] input = Variable(img).view(1, 3, height, width) if cuda: gennet = gennet.cuda() input = input.cuda() pred = gennet(input).cpu() save_image(pred.data, output) #ToPILImage()(pred.data).save(output) toImage(pred).save(output2)
def __init__(self, env): super(CartPoleWrapper, self).__init__() self.env = env.unwrapped self.resize = T.Compose([T.ToPILImage(), T.Scale(40, interpolation=Image.CUBIC), T.ToTensor()]) self.screen_width = 600 self.action_space = self.env.action_space
def tensor2img(self, tensor): decode = transforms.Compose([transforms.Lambda(lambda x: x.mul_(1./255)), transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961], std=[1,1,1]), transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]), ]) tensor = decode(tensor) loader = transforms.Compose([transforms.ToPILImage()]) img = loader(tensor.clamp_(0, 1)) img.save(self.img_path + "/result.jpg")
def postprocess_torch(output): # Should we? def denormalize(image): for t in range(3): image[t, :, :] = (image[t, :, :] * STD[t]) + MEAN[t] return image transformer = transforms.Compose([ transforms.ToPILImage()]) image = output.cpu().data[0] image = torch.clamp(denormalize(image), min=0, max=1) return transformer(image)
def __init__(self): self.loader = transforms.Compose([ transforms.Scale(image_size), transforms.ToTensor() ]) self.un_loader = transforms.ToPILImage()
def test(self, loader, e): self.dis.eval() self.gen.eval() topilimg = transforms.ToPILImage() if not os.path.exists('visualize/'): os.makedirs('visualize/') idx = random.randint(0, len(loader) - 1) _features = loader.dataset[idx] orig_x = Variable(self.cudafy(_features[0])) orig_y = Variable(self.cudafy(_features[1])) orig_x = orig_x.view(1, orig_x.size(0), orig_x.size(1), orig_x.size(2)) orig_y = orig_y.view(1, orig_y.size(0), orig_y.size(1), orig_x.size(3)) gen_y = self.gen(orig_x) if self.cuda: orig_x_np = normalize(orig_x.squeeze().cpu().data, 0, 1) orig_y_np = normalize(orig_y.squeeze().cpu().data, 0, 1) gen_y_np = normalize(gen_y.squeeze().cpu().data, 0, 1) else: orig_x_np = normalize(orig_x.squeeze().data, 0, 1) orig_y_np = normalize(orig_y.squeeze().data, 0, 1) gen_y_np = normalize(gen_y.squeeze().data, 0, 1) orig_x_np = topilimg(orig_x_np) orig_y_np = topilimg(orig_y_np) gen_y_np = topilimg(gen_y_np) f, (ax1, ax2, ax3) = plt.subplots( 3, 1, sharey='row' ) ax1.imshow(orig_x_np) ax1.set_title('x') ax2.imshow(orig_y_np) ax2.set_title('target y') ax3.imshow(gen_y_np) ax3.set_title('generated y') f.savefig('visualize/{}.png'.format(e))