我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.Tensor()。
def xavier_uniform(tensor, gain=1): """Fills the input Tensor or Variable with values according to the method described in "Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform distribution. The resulting tensor will have values sampled from :math:`U(-a, a)` where :math:`a = gain \\times \sqrt{2 / (fan\_in + fan\_out)} \\times \sqrt{3}`. Also known as Glorot initialisation. Args: tensor: an n-dimensional torch.Tensor or autograd.Variable gain: an optional scaling factor Examples: >>> w = torch.Tensor(3, 5) >>> nn.init.xavier_uniform(w, gain=nn.init.calculate_gain('relu')) """ if isinstance(tensor, Variable): xavier_uniform(tensor.data, gain=gain) return tensor fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) std = gain * math.sqrt(2.0 / (fan_in + fan_out)) a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation return tensor.uniform_(-a, a)
def xavier_normal(tensor, gain=1): """Fills the input Tensor or Variable with values according to the method described in "Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010), using a normal distribution. The resulting tensor will have values sampled from :math:`N(0, std)` where :math:`std = gain \\times \sqrt{2 / (fan\_in + fan\_out)}`. Also known as Glorot initialisation. Args: tensor: an n-dimensional torch.Tensor or autograd.Variable gain: an optional scaling factor Examples: >>> w = torch.Tensor(3, 5) >>> nn.init.xavier_normal(w) """ if isinstance(tensor, Variable): xavier_normal(tensor.data, gain=gain) return tensor fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) std = gain * math.sqrt(2.0 / (fan_in + fan_out)) return tensor.normal_(0, std)
def add_summary(self, summary, global_step=None): """Adds a `Summary` protocol buffer to the event file. This method wraps the provided summary in an `Event` protocol buffer and adds it to the event file. You can pass the result of evaluating any summary op, using [`Session.run()`](client.md#Session.run) or [`Tensor.eval()`](framework.md#Tensor.eval), to this function. Alternatively, you can pass a `tf.Summary` protocol buffer that you populate with your own data. The latter is commonly done to report evaluation results in event files. Args: summary: A `Summary` protocol buffer, optionally serialized as a string. global_step: Number. Optional global step value to record with the summary. """ if isinstance(summary, bytes): summ = summary_pb2.Summary() summ.ParseFromString(summary) summary = summ event = event_pb2.Event(summary=summary) self._add_event(event, global_step)
def make_sprite(label_img, save_path): import math import torch import torchvision # this ensures the sprite image has correct dimension as described in # https://www.tensorflow.org/get_started/embedding_viz nrow = int(math.ceil((label_img.size(0)) ** 0.5)) # augment images so that #images equals nrow*nrow label_img = torch.cat((label_img, torch.randn(nrow ** 2 - label_img.size(0), *label_img.size()[1:]) * 255), 0) # Dirty fix: no pixel are appended by make_grid call in save_image (https://github.com/pytorch/vision/issues/206) xx = torchvision.utils.make_grid(torch.Tensor(1, 3, 32, 32), padding=0) if xx.size(2) == 33: sprite = torchvision.utils.make_grid(label_img, nrow=nrow, padding=0) sprite = sprite[:, 1:, 1:] torchvision.utils.save_image(sprite, os.path.join(save_path, 'sprite.png')) else: torchvision.utils.save_image(label_img, os.path.join(save_path, 'sprite.png'), nrow=nrow, padding=0)
def children(self): """ Returns an iterator for the non-empty children of the Node The children are returned as (Node, pos) tuples where pos is 0 for the left subnode and 1 for the right. >>> len(list(create(dimensions=2).children)) 0 >>> len(list(create([ Variable(torch.Tensor([[1, 2]])) ]).children)) 0 >>> len(list(create([ Variable(torch.Tensor([[2, 2]])), Variable(torch.Tensor([[2, 1]])), Variable(torch.Tensor([[2, 3]])) ]).children)) 2 """ if self.left and self.left.data is not None: yield self.left, 0 if self.right and self.right.data is not None: yield self.right, 1
def test(self, nb_episodes=1, maximum_episode_length=5000000): def evaluate_episode(): reward = 0 observation = self.env.reset() for _ in range(maximum_episode_length): action = self.choose_action(self.embedding_network(Variable(Tensor(observation)).unsqueeze(0)), 0) observation, immediate_reward, finished, info = self.env.step(action) reward += immediate_reward if finished: break return reward r = 0 for _ in range(nb_episodes): r += evaluate_episode() return r / nb_episodes
def __init__(self, env_name, num_episodes, alpha, gamma, epsilon, policy, **kwargs): """ base class for RL using lookup table :param env_name: name of environment, currently environments whose observation space is Box and action space is Discrete are supported. see https://github.com/openai/gym/wiki/Table-of-environments :param num_episodes: number of episode for training :param alpha: :param gamma: :param epsilon: :param kwargs: other arguments. """ super(FABase, self).__init__(env_name, num_episodes, alpha, gamma, policy, epsilon=epsilon, **kwargs) if not isinstance(self.env.action_space, gym.spaces.Discrete) or \ not isinstance(self.env.observation_space, gym.spaces.Box): raise NotImplementedError("action_space should be discrete and " "observation_space should be box") self.obs_shape = self.env.observation_space.shape self.obs_size = reduce(lambda x, y: x * y, self.obs_shape) self.action_size = self.env.action_space.n self._feature = torch.Tensor(self.action_size, self.obs_size) self._weight = None
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=None, groups=1, bias=True): self.in_channels = in_channels self.out_channels = out_channels self.kh, self.kw = _pair(kernel_size) self.dh, self.dw = _pair(stride) self.padh, self.padw = _pair(padding) self.is_dilated = dilation is not None if self.is_dilated: self.dilh, self.dilw = _pair(dilation) self.groups = groups weight = torch.Tensor(self.out_channels, self.in_channels, self.kh, self.kw) bias = torch.Tensor(self.out_channels) if bias else None super(Conv2d, self).__init__( weight=weight, bias=bias, ) self.reset_parameters()
def __init__(self, kT, kW, kH, dT=None, dW=None, dH=None, padT=0, padW=0, padH=0): super(VolumetricMaxPooling, self).__init__() self.kT = kT self.kH = kH self.kW = kW self.dT = dT or kT self.dW = dW or kW self.dH = dH or kH self.padT = padT self.padW = padW self.padH = padH self.ceil_mode = False self.indices = torch.Tensor()
def __init__(self, *args): super(CMul, self).__init__() if len(args) == 1 and isinstance(args[0], torch.Size): self.size = args[0] else: self.size = torch.Size(args) self.weight = torch.Tensor(self.size) self.gradWeight = torch.Tensor(self.size) self.output.resize_(self.size) self.reset() self._output = None self._weight = None self._expand = None self._repeat = None self._gradOutput = None self._gradInput = None self._input = None self._gradWeight = None self._sum = None
def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None): super(SpatialConvolution, self).__init__() self.nInputPlane = nInputPlane self.nOutputPlane = nOutputPlane self.kW = kW self.kH = kH self.dW = dW self.dH = dH self.padW = padW self.padH = padH or self.padW self.weight = torch.Tensor(nOutputPlane, nInputPlane, kH, kW) self.bias = torch.Tensor(nOutputPlane) self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kH, kW) self.gradBias = torch.Tensor(nOutputPlane) self.reset() self._input = None self._gradOutput = None self.finput = None self.fgradInput = None
def __init__(self, nIndex, nOutput, paddingValue=-1, maxNorm=None, normType=None): super(LookupTable, self).__init__() self.weight = torch.Tensor(nIndex, nOutput) self.gradWeight = torch.Tensor(nIndex, nOutput).zero_() self.paddingValue = paddingValue self.maxNorm = maxNorm self.normType = normType self.shouldScaleGradByFreq = False self._gradOutput = None self._sorted = None self._indices = None self._count = torch.IntTensor() self._input = torch.LongTensor() self.reset()
def __init__(self, kW, kH, dW=None, dH=None, padW=0, padH=0): super(SpatialMaxPooling, self).__init__() dW = dW or kW dH = dH or kH self.kW = kW self.kH = kH self.dW = dW self.dH = dH self.padW = padW self.padH = padH self.ceil_mode = False self.indices = torch.Tensor()
def __init__(self, nInputPlane=1, kernel=None, threshold=1e-4, thresval=1e-4): super(SpatialContrastiveNormalization, self).__init__() # get args self.nInputPlane = nInputPlane self.kernel = kernel or torch.Tensor(9, 9).fill_(1) self.threshold = threshold self.thresval = thresval or threshold kdim = self.kernel.ndimension() # check args if kdim != 2 and kdim != 1: raise ValueError('SpatialContrastiveNormalization averaging kernel must be 2D or 1D') if self.kernel.size(0) % 2 == 0 or (kdim == 2 and (self.kernel.size(1) % 2) == 0): raise ValueError('SpatialContrastiveNormalization averaging kernel must have ODD dimensions') # instantiate sub+div normalization self.normalizer = Sequential() self.normalizer.add(SpatialSubtractiveNormalization(self.nInputPlane, self.kernel)) self.normalizer.add(SpatialDivisiveNormalization(self.nInputPlane, self.kernel, self.threshold, self.thresval))
def __init__(self, conMatrix, kW, kH, dW=1, dH=1): super(SpatialFullConvolutionMap, self).__init__() self.kW = kW self.kH = kH self.dW = dW self.dH = dH self.connTable = conMatrix self.nInputPlane = int(self.connTable.select(1, 0).max()) + 1 self.nOutputPlane = int(self.connTable.select(1, 1).max()) + 1 self.weight = torch.Tensor(self.connTable.size(0), kH, kW) self.gradWeight = torch.Tensor(self.connTable.size(0), kH, kW) self.bias = torch.Tensor(self.nOutputPlane) self.gradBias = torch.Tensor(self.nOutputPlane) self.reset()
def reset(self, stdv=None): if stdv is not None: stdv = stdv * math.sqrt(3) self.weight.uniform_(-stdv, stdv) self.bias.uniform_(-stdv, stdv) else: ninp = torch.Tensor(self.nOutputPlane).zero_() for i in range(self.connTable.size(0)): idx = int(self.connTable[i][1]) ninp[idx] += 1 for k in range(self.connTable.size(0)): idx = int(self.connTable[k][1]) stdv = 1. / math.sqrt(self.kW*self.kH*ninp[idx]) self.weight[k].uniform_(-stdv, stdv) for k in range(self.bias.size(0)): stdv = 1. / math.sqrt(self.kW*self.kH*ninp[k]) # TODO: torch.uniform self.bias[k] = random.uniform(-stdv, stdv)
def __init__(self, inputSize, outputSize): super(Euclidean, self).__init__() self.weight = torch.Tensor(inputSize, outputSize) self.gradWeight = torch.Tensor(inputSize, outputSize) # state self.gradInput.resize_(inputSize) self.output.resize_(outputSize) self.fastBackward = True self.reset() self._input = None self._weight = None self._expand = None self._expand2 = None self._repeat = None self._repeat2 = None self._div = None self._output = None self._gradOutput = None self._expand3 = None self._sum = None
def __init__(self, inputSize1, inputSize2, outputSize, bias=True): # set up model: super(Bilinear, self).__init__() self.weight = torch.Tensor(outputSize, inputSize1, inputSize2) self.gradWeight = torch.Tensor(outputSize, inputSize1, inputSize2) if bias: self.bias = torch.Tensor(outputSize) self.gradBias = torch.Tensor(outputSize) else: self.bias = None self.gradBias = None self.buff1 = None self.buff2 = None self.gradInput = [torch.Tensor(), torch.Tensor()] self.reset()
def __init__(self, nInputPlane, nOutputPlane, kT, kW, kH, dT=1, dW=1, dH=1, padT=0, padW=None, padH=None): super(VolumetricConvolution, self).__init__() self.nInputPlane = nInputPlane self.nOutputPlane = nOutputPlane self.kT = kT self.kW = kW self.kH = kH self.dT = dT self.dW = dW self.dH = dH self.padT = padT self.padW = padW or self.padT self.padH = padH or self.padW self.weight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW) self.bias = torch.Tensor(nOutputPlane) self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW) self.gradBias = torch.Tensor(nOutputPlane) self.reset() self.finput = None self.fgradInput = None
def random(nin, nout, nto): nker = nto * nout tbl = torch.Tensor(nker, 2) fi = torch.randperm(nin) frcntr = 0 nfi = math.floor(nin / nto) # number of distinct nto chunks totbl = tbl.select(1, 1) frtbl = tbl.select(1, 0) fitbl = fi.narrow(0, 0, (nfi * nto)) # part of fi that covers distinct chunks ufrtbl = frtbl.unfold(0, nto, nto) utotbl = totbl.unfold(0, nto, nto) ufitbl = fitbl.unfold(0, nto, nto) # start fill_ing frtbl for i in range(nout): # fro each unit in target map ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr)) frcntr += 1 if frcntr-1 == nfi: # reset fi fi.copy_(torch.randperm(nin)) frcntr = 1 for tocntr in range(utotbl.size(0)): utotbl.select(0, tocntr).fill_(tocntr) return tbl
def __init__(self, conMatrix, kW, kH, dW=1, dH=1): super(SpatialConvolutionMap, self).__init__() self.kW = kW self.kH = kH self.dW = dW self.dH = dH self.connTable = conMatrix self.nInputPlane = int(self.connTable.select(1, 0).max()) + 1 self.nOutputPlane = int(self.connTable.select(1, 1).max()) + 1 self.weight = torch.Tensor(self.connTable.size(0), kH, kW) self.bias = torch.Tensor(self.nOutputPlane) self.gradWeight = torch.Tensor(self.connTable.size(0), kH, kW) self.gradBias = torch.Tensor(self.nOutputPlane) self.reset()
def test_parameters(self): def num_params(module): return len(list(module.parameters())) class Net(nn.Container): def __init__(self): super(Net, self).__init__( l1=l, l2=l ) self.param = Parameter(torch.Tensor(3, 5)) l = nn.Linear(10, 20) n = Net() s = nn.Sequential(n, n, n, n) self.assertEqual(num_params(l), 2) self.assertEqual(num_params(n), 3) self.assertEqual(num_params(s), 3)
def _test_rosenbrock(self, constructor, old_fn): params_t = torch.Tensor([1.5, 1.5]) state = {} params = Variable(torch.Tensor([1.5, 1.5]), requires_grad=True) optimizer = constructor([params]) solution = torch.Tensor([1, 1]) initial_dist = params.data.dist(solution) def eval(): loss = rosenbrock(params) loss.backward() return loss for i in range(2000): optimizer.zero_grad() optimizer.step(eval) old_fn(lambda _: (rosenbrock(params_t), drosenbrock(params_t)), params_t, state) self.assertEqual(params.data, params_t) self.assertLessEqual(params.data.dist(solution), initial_dist)
def test_Dropout(self): p = 0.2 input = torch.Tensor(1000).fill_(1-p) module = nn.Dropout(p) output = module.forward(input) self.assertLess(abs(output.mean() - (1-p)), 0.05) gradInput = module.backward(input, input) self.assertLess(abs(gradInput.mean() - (1-p)), 0.05) module = nn.Dropout(p, True) output = module.forward(input.clone()) self.assertLess(abs(output.mean() - (1-p)), 0.05) gradInput = module.backward(input.clone(), input.clone()) self.assertLess(abs(gradInput.mean() - (1-p)), 0.05) # Check that these don't raise errors module.__repr__() str(module)
def test_SpatialDropout(self): p = 0.2 b = random.randint(1, 5) w = random.randint(1, 5) h = random.randint(1, 5) nfeats = 1000 input = torch.Tensor(b, nfeats, w, h).fill_(1) module = nn.SpatialDropout(p) module.training() output = module.forward(input) self.assertLess(abs(output.mean() - (1-p)), 0.05) gradInput = module.backward(input, input) self.assertLess(abs(gradInput.mean() - (1-p)), 0.05) # Check that these don't raise errors module.__repr__() str(module)
def test_VolumetricDropout(self): p = 0.2 bsz = random.randint(1,5) t = random.randint(1,5) w = random.randint(1,5) h = random.randint(1,5) nfeats = 1000 input = torch.Tensor(bsz, nfeats, t, w, h).fill_(1) module = nn.VolumetricDropout(p) module.training() output = module.forward(input) self.assertLess(abs(output.mean() - (1-p)), 0.05) gradInput = module.backward(input, input) self.assertLess(abs(gradInput.mean() - (1-p)), 0.05) # Check that these don't raise errors module.__repr__() str(module)
def test_MaskedSelect(self): input = torch.randn(4, 5) mask = torch.ByteTensor(4, 5).bernoulli_() module = nn.MaskedSelect() out = module.forward([input, mask]) self.assertEqual(input.masked_select(mask), out) gradOut = torch.Tensor((20, 80)) input = torch.Tensor(((10, 20), (30, 40))) inTarget = torch.Tensor(((20, 0), (0, 80))) mask = torch.ByteTensor(((1, 0), (0, 1))) module = nn.MaskedSelect() module.forward([input, mask]) gradIn = module.backward([input, mask], gradOut) self.assertEqual(inTarget, gradIn[0]) # Check that these don't raise errors module.__repr__() str(module)
def test_linspace(self): _from = random.random() to = _from + random.random() res1 = torch.linspace(_from, to, 137) res2 = torch.Tensor() torch.linspace(res2, _from, to, 137) self.assertEqual(res1, res2, 0) self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, 1)) self.assertEqual(torch.linspace(0, 0, 1), torch.zeros(1), 0) # Check linspace for generating with start > end. self.assertEqual(torch.linspace(2, 0, 3), torch.Tensor((2, 1, 0)), 0) # Check linspace for non-contiguous tensors. x = torch.zeros(2, 3) y = torch.linspace(x.narrow(1, 1, 2), 0, 3, 4) self.assertEqual(x, torch.Tensor(((0, 0, 1), (0, 2, 3))), 0)
def test_logspace(self): _from = random.random() to = _from + random.random() res1 = torch.logspace(_from, to, 137) res2 = torch.Tensor() torch.logspace(res2, _from, to, 137) self.assertEqual(res1, res2, 0) self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, 1)) self.assertEqual(torch.logspace(0, 0, 1), torch.ones(1), 0) # Check logspace_ for generating with start > end. self.assertEqual(torch.logspace(1, 0, 2), torch.Tensor((10, 1)), 0) # Check logspace_ for non-contiguous tensors. x = torch.zeros(2, 3) y = torch.logspace(x.narrow(1, 1, 2), 0, 3, 4) self.assertEqual(x, torch.Tensor(((0, 1, 10), (0, 100, 1000))), 0)
def test_inverse(self): M = torch.randn(5,5) MI = torch.inverse(M) E = torch.eye(5) self.assertFalse(MI.is_contiguous(), 'MI is contiguous') self.assertEqual(E, torch.mm(M, MI), 1e-8, 'inverse value') self.assertEqual(E, torch.mm(MI, M), 1e-8, 'inverse value') MII = torch.Tensor(5, 5) torch.inverse(MII, M) self.assertFalse(MII.is_contiguous(), 'MII is contiguous') self.assertEqual(MII, MI, 0, 'inverse value in-place') # second call, now that MII is transposed torch.inverse(MII, M) self.assertFalse(MII.is_contiguous(), 'MII is contiguous') self.assertEqual(MII, MI, 0, 'inverse value in-place')
def test_abs(self): size = 1000 max_val = 1000 original = torch.rand(size).mul(max_val) # Tensor filled with values from {-1, 1} switch = torch.rand(size).mul(2).floor().mul(2).add(-1) types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor'] for t in types: data = original.type(t) switch = switch.type(t) res = torch.mul(data, switch) self.assertEqual(res.abs(), data, 1e-16) # Checking that the right abs function is called for LongTensor bignumber = 2^31 + 1 res = torch.LongTensor((-bignumber,)) self.assertGreater(res.abs()[0], 0)
def test_tolist(self): list0D = [] tensor0D = torch.Tensor(list0D) self.assertEqual(tensor0D.tolist(), list0D) table1D = [1, 2, 3] tensor1D = torch.Tensor(table1D) storage = torch.Storage(table1D) self.assertEqual(tensor1D.tolist(), table1D) self.assertEqual(storage.tolist(), table1D) self.assertEqual(tensor1D.tolist(), table1D) self.assertEqual(storage.tolist(), table1D) table2D = [[1, 2], [3, 4]] tensor2D = torch.Tensor(table2D) self.assertEqual(tensor2D.tolist(), table2D) tensor3D = torch.Tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) tensorNonContig = tensor3D.select(1, 1) self.assertFalse(tensorNonContig.is_contiguous()) self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
def set_vectors(self, stoi, vectors, dim, unk_init=torch.Tensor.zero_): """ Set the vectors for the Vocab instance from a collection of Tensors. Arguments: stoi: A dictionary of string to the index of the associated vector in the `vectors` input argument. vectors: An indexed iterable (or other structure supporting __getitem__) that given an input index, returns a FloatTensor representing the vector for the token associated with the index. For example, vector[stoi["string"]] should return the vector for "string". dim: The dimensionality of the vectors. unk_init (callback): by default, initialize out-of-vocabulary word vectors to zero vectors; can be any function that takes in a Tensor and returns a Tensor of the same size. Default: torch.Tensor.zero_ """ self.vectors = torch.Tensor(len(self), dim) for i, token in enumerate(self.itos): wv_index = stoi.get(token, None) if wv_index is not None: self.vectors[i] = vectors[wv_index] else: self.vectors[i] = unk_init(self.vectors[i])
def __getitem__(self, token): vector = torch.Tensor(1, self.dim).zero_() if token == "<unk>": return self.unk_init(vector) # These literals need to be coerced to unicode for Python 2 compatibility # when we try to join them with read ngrams from the files. chars = ['#BEGIN#'] + list(token) + ['#END#'] num_vectors = 0 for n in [2, 3, 4]: end = len(chars) - n + 1 grams = [chars[i:(i + n)] for i in range(end)] for gram in grams: gram_key = '{}gram-{}'.format(n, ''.join(gram)) if gram_key in self.stoi: vector += self.vectors[self.stoi[gram_key]] num_vectors += 1 if num_vectors > 0: vector /= num_vectors else: vector = self.unk_init(vector) return vector
def testAUCMeter(self): mtr = meter.AUCMeter() test_size = 1000 mtr.add(torch.rand(test_size), torch.zeros(test_size)) mtr.add(torch.rand(test_size), torch.Tensor(test_size).fill_(1)) val, tpr, fpr = mtr.value() self.assertTrue(math.fabs(val - 0.5) < 0.1, msg="AUC Meter fails") mtr.reset() mtr.add(torch.Tensor(test_size).fill_(0), torch.zeros(test_size)) mtr.add(torch.Tensor(test_size).fill_(0.1), torch.zeros(test_size)) mtr.add(torch.Tensor(test_size).fill_(0.2), torch.zeros(test_size)) mtr.add(torch.Tensor(test_size).fill_(0.3), torch.zeros(test_size)) mtr.add(torch.Tensor(test_size).fill_(0.4), torch.zeros(test_size)) mtr.add(torch.Tensor(test_size).fill_(1), torch.Tensor(test_size).fill_(1)) val, tpr, fpr = mtr.value() self.assertEqual(val, 1.0, msg="AUC Meter fails")
def __init__(self, model, action_size=1, init_value=0.0, *args, **kwargs): super(DiagonalGaussianPolicy, self).__init__(model, *args, **kwargs) self.init_value = init_value self.logstd = th.zeros((1, action_size)) + self.init_value self.logstd = P(self.logstd) self.halflog2pie = V(T([2 * pi * exp(1)])) * 0.5 self.halflog2pi = V(T([2.0 * pi])) * 0.5 self.pi = V(T([pi]))
def discount(rewards, gamma): tensor = False if not isinstance(rewards, list): tensor = True rewards = rewards.split(1) R = 0.0 discounted = [] for r in rewards[::-1]: R = r + gamma * R discounted.insert(0, R) if tensor: return th.cat(discounted).view(-1) return T(discounted)
def generalized_advantage_estimations(rewards, values, terminal=None, gamma=0.99, tau=0.95): gae = 0.0 advantages = [] values = th.cat([values, V(T([0.0077]))]) for i in reversed(range(len(rewards))): nonterminal = 1.0 - terminal[i] delta = rewards[i] + gamma * values[i+1] * nonterminal - values[i] gae = delta + gamma * tau * gae * nonterminal advantages.insert(0, gae + values[i]) return th.cat(advantages)
def get_update(self): num_traj = loss_stats = critics_stats = entropy_stats = policy_stats = 0.0 all_rewards, all_advantages = self.advantage(self.rewards, self.critics, self.terminals) # for actions_ep, rewards_ep, critics_ep, entropy_ep, terminals_ep in zip(self.actions, self.rewards, self.critics, self.entropies, self.terminals): for actions_ep, rewards_ep, advantage_ep, critics_ep, entropy_ep, terminals_ep in zip(self.actions, all_rewards, all_advantages, self.critics, self.entropies, self.terminals): if len(actions_ep) > 0: # Compute advantages #rewards_ep = V(T(rewards_ep)) critics_ep = th.cat(critics_ep, 0).view(-1) #rewards_ep, advantage_ep = self.advantage(rewards_ep, critics_ep, terminals_ep) # Compute losses critic_loss = (rewards_ep - critics_ep).pow(2).mean() entropy_loss = th.cat(entropy_ep).mean() critic_loss = self.critic_weight * critic_loss entropy_loss = - self.entropy_weight * entropy_loss # Compute policy gradients policy_loss = 0.0 for action, advantage in zip(actions_ep, advantage_ep): policy_loss = policy_loss - action.log_prob.mean() * advantage.data[0] loss = policy_loss + critic_loss + entropy_loss loss.backward(retain_graph=True) if self.grad_clip > 0.0: th.nn.utils.clip_grad_norm(self.parameters(), self.grad_clip) # Update running statistics loss_stats += loss.data[0] critics_stats += critic_loss.data[0] entropy_stats += entropy_loss.data[0] policy_stats += policy_loss.data[0] num_traj += 1.0 # Store statistics self.stats['Num. Updates'] += 1.0 self.stats['Num. Trajectories'] += num_traj self.stats['Critic Loss'] += critics_stats / num_traj self.stats['Entropy Loss'] += entropy_stats / num_traj self.stats['Policy Loss'] += policy_stats / num_traj self.stats['Total Loss'] += loss_stats / num_traj self.stats['Num. Steps'] += self.steps self._reset() return [p.grad.clone() for p in self.parameters()]
def equal(a, b): if isinstance(a, torch.Tensor): return a.equal(b) elif isinstance(a, str): return a == b elif isinstance(a, collections.Iterable): res = True for (x, y) in zip(a, b): res = res & equal(x, y) return res else: return a == b
def checkAverager(self): acc = utils.averager() acc.add(Variable(torch.Tensor([1, 2]))) acc.add(Variable(torch.Tensor([[5, 6]]))) assert acc.val() == 3.5 acc = utils.averager() acc.add(torch.Tensor([1, 2])) acc.add(torch.Tensor([[5, 6]])) assert acc.val() == 3.5
def checkAssureRatio(self): img = torch.Tensor([[1], [3]]).view(1, 1, 2, 1) img = Variable(img) img = utils.assureRatio(img) assert torch.Size([1, 1, 2, 2]) == img.size()
def add(self, v): if isinstance(v, Variable): count = v.data.numel() v = v.data.sum() elif isinstance(v, torch.Tensor): count = v.numel() v = v.sum() self.n_count += count self.sum += v
def __init__(self, batch_size, num_tokens, embed_size, word_gru_hidden, bidirectional= True, init_range=0.1, use_lstm=False): super(AttentionWordRNN, self).__init__() self.batch_size = batch_size self.num_tokens = num_tokens self.embed_size = embed_size self.word_gru_hidden = word_gru_hidden self.bidirectional = bidirectional self.use_lstm = use_lstm self.lookup = nn.Embedding(num_tokens, embed_size) if bidirectional == True: if use_lstm: print("inside using LSTM") self.word_gru = nn.LSTM(embed_size, word_gru_hidden, bidirectional= True) else: self.word_gru = nn.GRU(embed_size, word_gru_hidden, bidirectional= True) self.weight_W_word = nn.Parameter(torch.Tensor(2* word_gru_hidden, 2*word_gru_hidden)) self.bias_word = nn.Parameter(torch.Tensor(2* word_gru_hidden,1)) self.weight_proj_word = nn.Parameter(torch.Tensor(2*word_gru_hidden, 1)) else: if use_lstm: self.word_gru = nn.LSTM(embed_size, word_gru_hidden, bidirectional= False) else: self.word_gru = nn.GRU(embed_size, word_gru_hidden, bidirectional= False) self.weight_W_word = nn.Parameter(torch.Tensor(word_gru_hidden, word_gru_hidden)) self.bias_word = nn.Parameter(torch.Tensor(word_gru_hidden,1)) self.weight_proj_word = nn.Parameter(torch.Tensor(word_gru_hidden, 1)) self.softmax_word = nn.Softmax() self.weight_W_word.data.uniform_(-init_range, init_range) self.weight_proj_word.data.uniform_(-init_range, init_range)
def train(args): # Setup Dataloader data_loader = get_loader(args.dataset) data_path = get_data_path(args.dataset) loader = data_loader(data_path, is_transform=True, img_size=(args.img_rows, args.img_cols)) n_classes = loader.n_classes trainloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4, shuffle=True) # Setup visdom for visualization if args.visdom: vis = visdom.Visdom() loss_window = vis.line(X=torch.zeros((1,)).cpu(), Y=torch.zeros((1)).cpu(), opts=dict(xlabel='minibatches', ylabel='Loss', title='Training Loss', legend=['Loss'])) # Setup Model model = get_model(args.arch, n_classes) model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) model.cuda() optimizer = torch.optim.SGD(model.parameters(), lr=args.l_rate, momentum=0.99, weight_decay=5e-4) for epoch in range(args.n_epoch): for i, (images, labels) in enumerate(trainloader): images = Variable(images.cuda()) labels = Variable(labels.cuda()) optimizer.zero_grad() outputs = model(images) loss = cross_entropy2d(outputs, labels) loss.backward() optimizer.step() if args.visdom: vis.line( X=torch.ones((1, 1)).cpu() * i, Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(), win=loss_window, update='append') if (i+1) % 20 == 0: print("Epoch [%d/%d] Loss: %.4f" % (epoch+1, args.n_epoch, loss.data[0])) torch.save(model, "{}_{}_{}_{}.pkl".format(args.arch, args.dataset, args.feature_scale, epoch))
def dirac_delta(ni, no, k): n = min(ni, no) size = (n, n) + k repeats = (max(no // ni, 1), max(ni // no, 1)) + (1,) * len(k) return dirac(torch.Tensor(*size)).repeat(*repeats)
def __init__(self, in_channels, out_channels, kernel_size, padding=0, dilation=1, bias=True): super(DiracConv1d, self).__init__(in_channels, out_channels, kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias) self.alpha = nn.Parameter(torch.Tensor([5])) self.beta = nn.Parameter(torch.Tensor([1e-5])) self.register_buffer('delta', dirac_delta(in_channels, out_channels, k=self.weight.size()[2:])) assert self.delta.size() == self.weight.size()
def __init__(self, in_channels, out_channels, kernel_size, padding=0, dilation=1, bias=True): super(DiracConv2d, self).__init__(in_channels, out_channels, kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias) self.alpha = nn.Parameter(torch.Tensor([5])) self.beta = nn.Parameter(torch.Tensor([1e-5])) self.register_buffer('delta', dirac_delta(in_channels, out_channels, self.weight.size()[2:])) assert self.delta.size() == self.weight.size()
def __init__(self, in_channels, out_channels, kernel_size, padding=0, dilation=1, bias=True): super(DiracConv3d, self).__init__(in_channels, out_channels, kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias) self.alpha = nn.Parameter(torch.Tensor([5])) self.beta = nn.Parameter(torch.Tensor([1e-5])) self.register_buffer('delta', dirac_delta(in_channels, out_channels, self.weight.size()[2:])) assert self.delta.size() == self.weight.size()