我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用chainer.training.extensions.snapshot()。
def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None): if opt_name == "Adam": opt = getattr(optimizers, opt_name)() else: opt = getattr(optimizers, opt_name)(lr) opt.setup(self.model) opt.add_hook(optimizer.GradientClipping(g_clip)) updater = training.StandardUpdater(self.train_iter, opt, device=gpu) self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=out_dir) self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu)) self.trainer.extend(extensions.dump_graph('main/loss')) self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch')) self.trainer.extend(extensions.LogReport()) self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) self.trainer.extend(extensions.ProgressBar())
def __init__(self, **kwargs): required_keys = [] optional_keys = [ 'dump_graph', 'Evaluator', 'ExponentialShift', 'LinearShift', 'LogReport', 'observe_lr', 'observe_value', 'snapshot', 'PlotReport', 'PrintReport', ] super().__init__( required_keys, optional_keys, kwargs, self.__class__.__name__)
def train(args): model = EmbeddingTagger(args.model, 50, 20, 30) model.setup_training(args.embed) if args.initmodel: print('Load model from', args.initmodel) chainer.serializers.load_npz(args.initmodel, model) train = CCGBankDataset(args.model, args.train) train_iter = chainer.iterators.SerialIterator(train, args.batchsize) val = CCGBankDataset(args.model, args.val) val_iter = chainer.iterators.SerialIterator( val, args.batchsize, repeat=False, shuffle=False) optimizer = chainer.optimizers.SGD(lr=0.01) optimizer.setup(model) updater = training.StandardUpdater(train_iter, optimizer) trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model) val_interval = 5000, 'iteration' log_interval = 200, 'iteration' val_model = model.copy() trainer.extend(extensions.Evaluator(val_iter, val_model), trigger=val_interval) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.snapshot(), trigger=val_interval) trainer.extend(extensions.snapshot_object( model, 'model_iter_{.updater.iteration}'), trigger=val_interval) trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.PrintReport([ 'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', ]), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.run()
def main(): unit = 1000 batchsize = 100 epoch = 20 model = L.Classifier(MLP(unit, 10)) optimizer = chainer.optimizers.Adam() optimizer.setup(model) train, test = chainer.datasets.get_mnist() train_iter = chainer.iterators.SerialIterator(train, batchsize) test_iter = chainer.iterators.SerialIterator(test, batchsize, repeat=False, shuffle=False) updater = training.StandardUpdater(train_iter, optimizer) trainer = training.Trainer(updater, (epoch, 'epoch'), out='result') trainer.extend(extensions.Evaluator(test_iter, model)) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.snapshot(), trigger=(epoch, 'epoch')) trainer.extend(extensions.LogReport()) trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) trainer.extend(extensions.ProgressBar()) trainer.run()
def fit(model, train, valid, device=-1, batchsize=4096, n_epoch=500, resume=None, alpha=1e-3): if device >= 0: chainer.cuda.get_device(device).use() model.to_gpu(device) optimizer = chainer.optimizers.Adam(alpha) optimizer.setup(model) # Setup iterators train_iter = chainer.iterators.SerialIterator(train, batchsize) valid_iter = chainer.iterators.SerialIterator(valid, batchsize, repeat=False, shuffle=False) updater = training.StandardUpdater(train_iter, optimizer, device=device) trainer = training.Trainer(updater, (n_epoch, 'epoch'), out='out_' + str(device)) # Setup logging, printing & saving keys = ['loss', 'rmse', 'bias', 'kld0', 'kld1'] keys += ['kldg', 'kldi', 'hypg', 'hypi'] keys += ['hypglv', 'hypilv'] reports = ['epoch'] reports += ['main/' + key for key in keys] reports += ['validation/main/rmse'] trainer.extend(TestModeEvaluator(valid_iter, model, device=device)) trainer.extend(extensions.Evaluator(valid_iter, model, device=device)) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.snapshot(), trigger=(10, 'epoch')) trainer.extend(extensions.LogReport(trigger=(1, 'epoch'))) trainer.extend(extensions.PrintReport(reports)) trainer.extend(extensions.ProgressBar(update_interval=10)) # If previous model detected, resume if resume: print("Loading from {}".format(resume)) chainer.serializers.load_npz(resume, trainer) # Run the model trainer.run()
def pretrain_source_cnn(data, args, epochs=1000): print(":: pretraining source encoder") source_cnn = Loss(num_classes=10) if args.device >= 0: source_cnn.to_gpu() optimizer = chainer.optimizers.Adam() optimizer.setup(source_cnn) train_iterator, test_iterator = data2iterator(data, args.batchsize, multiprocess=False) # train_iterator = chainer.iterators.MultiprocessIterator(data, args.batchsize, n_processes=4) updater = chainer.training.StandardUpdater(iterator=train_iterator, optimizer=optimizer, device=args.device) trainer = chainer.training.Trainer(updater, (epochs, 'epoch') ,out=args.output) # learning rate decay # trainer.extend(extensions.ExponentialShift("alpha", rate=0.9, init=args.learning_rate, target=args.learning_rate*10E-5)) trainer.extend(extensions.Evaluator(test_iterator, source_cnn, device=args.device)) # trainer.extend(extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'), trigger=(10, "epoch")) trainer.extend(extensions.snapshot_object(optimizer.target, "source_model_epoch_{.updater.epoch}"), trigger=(epochs, "epoch")) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.extend(extensions.LogReport(trigger=(1, "epoch"))) trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) trainer.run() return source_cnn
def train_target_cnn(source, target, source_cnn, target_cnn, args, epochs=10000): print(":: training encoder with target domain") discriminator = Discriminator() if args.device >= 0: source_cnn.to_gpu() target_cnn.to_gpu() discriminator.to_gpu() # target_optimizer = chainer.optimizers.Adam(alpha=1.0E-5, beta1=0.5) target_optimizer = chainer.optimizers.RMSprop(lr=args.lr) # target_optimizer = chainer.optimizers.MomentumSGD(lr=1.0E-4, momentum=0.99) target_optimizer.setup(target_cnn.encoder) target_optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay)) # discriminator_optimizer = chainer.optimizers.Adam(alpha=1.0E-5, beta1=0.5) discriminator_optimizer = chainer.optimizers.RMSprop(lr=args.lr) # discriminator_optimizer = chainer.optimizers.MomentumSGD(lr=1.0E-4, momentum=0.99) discriminator_optimizer.setup(discriminator) discriminator_optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay)) source_train_iterator, source_test_iterator = data2iterator(source, args.batchsize, multiprocess=False) target_train_iterator, target_test_iterator = data2iterator(target, args.batchsize, multiprocess=False) updater = ADDAUpdater(source_train_iterator, target_train_iterator, source_cnn, target_optimizer, discriminator_optimizer, args) trainer = chainer.training.Trainer(updater, (epochs, 'epoch'), out=args.output) trainer.extend(extensions.Evaluator(target_test_iterator, target_cnn, device=args.device)) # trainer.extend(extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'), trigger=(10, "epoch")) trainer.extend(extensions.snapshot_object(target_cnn, "target_model_epoch_{.updater.epoch}"), trigger=(epochs, "epoch")) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.extend(extensions.LogReport(trigger=(1, "epoch"))) trainer.extend(extensions.PrintReport( ["epoch", "loss/discrim", "loss/encoder", "validation/main/loss", "validation/main/accuracy", "elapsed_time"])) trainer.run()
def parse_args(generators, discriminators, updaters): parser = argparse.ArgumentParser(description='Semantic Segmentation using Adversarial Networks') parser.add_argument('--generator', choices=generators.keys(), default='fcn32s', help='Generator(segmentor) architecture') parser.add_argument('--discriminator', choices=discriminators.keys(), default='largefov', help='Discriminator architecture') parser.add_argument('--updater', choices=updaters.keys(), default='gan', help='Updater') parser.add_argument('--initgen_path', default='pretrained_model/vgg16.npz', help='Pretrained model of generator') parser.add_argument('--initdis_path', default=None, help='Pretrained model of discriminator') parser.add_argument('--batchsize', '-b', type=int, default=1, help='Number of images in each mini-batch') parser.add_argument('--iteration', '-i', type=int, default=100000, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='snapshot', help='Directory to output the result') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--evaluate_interval', type=int, default=1000, help='Interval of evaluation') parser.add_argument('--snapshot_interval', type=int, default=10000, help='Interval of snapshot') parser.add_argument('--display_interval', type=int, default=10, help='Interval of displaying log to console') return parser.parse_args()
def fit(self, X, y=None, **kwargs): """If hyper parameters are set to None, then instance's variable is used, this functionality is used Grid search with `set_params` method. Also if instance's variable is not set, _default_hyperparam is used. Usage: model.fit(train_dataset) or model.fit(X, y) Args: train: training dataset, assumes chainer's dataset class test: test dataset for evaluation, assumes chainer's dataset class batchsize: batchsize for both training and evaluation iterator_class: iterator class used for this training, currently assumes SerialIterator or MultiProcessIterator optimizer: optimizer instance to update parameter epoch: training epoch out: directory path to save the result snapshot_frequency (int): snapshot frequency in epoch. Negative value indicates not to take snapshot. dump_graph: Save computational graph info or not, default is False. log_report: Enable LogReport or not plot_report: Enable PlotReport or not print_report: Enable PrintReport or not progress_report: Enable ProgressReport or not resume: specify trainer saved path to resume training. """ kwargs = self.filter_sk_params(self.fit_core, kwargs) return self.fit_core(X, y, **kwargs)
def train(args): model = JaCCGEmbeddingTagger(args.model, args.word_emb_size, args.char_emb_size) if args.initmodel: print('Load model from', args.initmodel) chainer.serializers.load_npz(args.initmodel, model) if args.pretrained: print('Load pretrained word embeddings from', args.pretrained) model.load_pretrained_embeddings(args.pretrained) train = JaCCGTaggerDataset(args.model, args.train) train_iter = chainer.iterators.SerialIterator(train, args.batchsize) val = JaCCGTaggerDataset(args.model, args.val) val_iter = chainer.iterators.SerialIterator( val, args.batchsize, repeat=False, shuffle=False) optimizer = chainer.optimizers.AdaGrad() optimizer.setup(model) # optimizer.add_hook(WeightDecay(1e-8)) my_converter = lambda x, dev: convert.concat_examples(x, dev, (None,-1,None,None)) updater = training.StandardUpdater(train_iter, optimizer, converter=my_converter) trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model) val_interval = 1000, 'iteration' log_interval = 200, 'iteration' eval_model = model.copy() eval_model.train = False trainer.extend(extensions.Evaluator( val_iter, eval_model, my_converter), trigger=val_interval) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.snapshot(), trigger=val_interval) trainer.extend(extensions.snapshot_object( model, 'model_iter_{.updater.iteration}'), trigger=val_interval) trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.PrintReport([ 'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', ]), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.run()
def start(self): """ Train pose net. """ # set random seed. if self.seed is not None: random.seed(self.seed) np.random.seed(self.seed) if self.gpu >= 0: chainer.cuda.cupy.random.seed(self.seed) # initialize model to train. model = AlexNet(self.Nj, self.use_visibility) if self.resume_model: serializers.load_npz(self.resume_model, model) # prepare gpu. if self.gpu >= 0: chainer.cuda.get_device(self.gpu).use() model.to_gpu() # load the datasets. train = PoseDataset(self.train, data_augmentation=self.data_augmentation) val = PoseDataset(self.val, data_augmentation=False) # training/validation iterators. train_iter = chainer.iterators.MultiprocessIterator( train, self.batchsize) val_iter = chainer.iterators.MultiprocessIterator( val, self.batchsize, repeat=False, shuffle=False) # set up an optimizer. optimizer = self._get_optimizer() optimizer.setup(model) if self.resume_opt: chainer.serializers.load_npz(self.resume_opt, optimizer) # set up a trainer. updater = training.StandardUpdater(train_iter, optimizer, device=self.gpu) trainer = training.Trainer( updater, (self.epoch, 'epoch'), os.path.join(self.out, 'chainer')) # standard trainer settings trainer.extend(extensions.dump_graph('main/loss')) val_interval = (10, 'epoch') trainer.extend(TestModeEvaluator(val_iter, model, device=self.gpu), trigger=val_interval) # save parameters and optimization state per validation step resume_interval = (self.epoch/10, 'epoch') trainer.extend(extensions.snapshot_object( model, "epoch-{.updater.epoch}.model"), trigger=resume_interval) trainer.extend(extensions.snapshot_object( optimizer, "epoch-{.updater.epoch}.state"), trigger=resume_interval) trainer.extend(extensions.snapshot( filename="epoch-{.updater.epoch}.iter"), trigger=resume_interval) # show log log_interval = (10, "iteration") trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.observe_lr(), trigger=log_interval) trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'lr']), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) # start training if self.resume: chainer.serializers.load_npz(self.resume, trainer) trainer.run()
def __init__(self, folder, chain, train, test, batchsize=500, resume=True, gpu=0, nepoch=1, reports=[]): self.reports = reports self.nepoch = nepoch self.folder = folder self.chain = chain self.gpu = gpu if self.gpu >= 0: chainer.cuda.get_device(gpu).use() chain.to_gpu(gpu) self.eval_chain = eval_chain = chain.copy() self.chain.test = False self.eval_chain.test = True self.testset = test if not os.path.exists(folder): os.makedirs(folder) train_iter = chainer.iterators.SerialIterator(train, batchsize, shuffle=True) test_iter = chainer.iterators.SerialIterator(test, batchsize, repeat=False, shuffle=False) updater = training.StandardUpdater(train_iter, chain.optimizer, device=gpu) trainer = training.Trainer(updater, (nepoch, 'epoch'), out=folder) # trainer.extend(TrainingModeSwitch(chain)) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.Evaluator(test_iter, eval_chain, device=gpu), trigger=(1,'epoch')) trainer.extend(extensions.snapshot_object( chain, 'chain_snapshot_epoch_{.updater.epoch:06}'), trigger=(1,'epoch')) trainer.extend(extensions.snapshot( filename='snapshot_epoch_{.updater.epoch:06}'), trigger=(1,'epoch')) trainer.extend(extensions.LogReport(trigger=(1,'epoch')), trigger=(1,'iteration')) trainer.extend(extensions.PrintReport( ['epoch']+reports), trigger=IntervalTrigger(1,'epoch')) self.trainer = trainer if resume: #if resumeFrom is not None: # trainerFile = os.path.join(resumeFrom[0],'snapshot_epoch_{:06}'.format(resumeFrom[1])) # S.load_npz(trainerFile, trainer) i = 1 trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i)) while i <= nepoch and os.path.isfile(trainerFile): i = i + 1 trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i)) i = i - 1 trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i)) if i >= 0 and os.path.isfile(trainerFile): S.load_npz(trainerFile, trainer)