我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用torch.nn.ParameterList()。
def __set_update(self, update_def, args): self.u_definition = update_def.lower() self.u_function = { 'duvenaud': self.u_duvenaud, 'ggnn': self.u_ggnn, 'intnet': self.u_intnet, 'mpnn': self.u_mpnn }.get(self.u_definition, None) if self.u_function is None: print('WARNING!: Update Function has not been set correctly\n\tIncorrect definition ' + update_def) init_parameters = { 'duvenaud': self.init_duvenaud, 'ggnn': self.init_ggnn, 'intnet': self.init_intnet, 'mpnn': self.init_mpnn }.get(self.u_definition, lambda x: (nn.ParameterList([]), nn.ModuleList([]), {})) self.learn_args, self.learn_modules, self.args = init_parameters(args) # Get the name of the used update function
def init_duvenaud(self, params): learn_args = [] learn_modules = [] args = {} # Filter degree 0 (the message will be 0 and therefore there is no update args['deg'] = [i for i in params['deg'] if i!=0] args['in'] = params['in'] args['out'] = params['out'] # Define a parameter matrix H for each degree. learn_args.append(torch.nn.Parameter(torch.randn(len(args['deg']), args['in'], args['out']))) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # GG-NN, Li et al.
def init_duvenaud(self, params): learn_args = [] learn_modules = [] args = {} args['out'] = params['out'] # Define a parameter matrix W for each layer. for l in range(params['layers']): learn_args.append(nn.Parameter(torch.randn(params['in'][l], params['out']))) # learn_modules.append(nn.Linear(params['out'], params['target'])) learn_modules.append(NNet(n_in=params['out'], n_out=params['target'])) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # GG-NN, Li et al.
def init_ggnn(self, params): learn_args = [] learn_modules = [] args = {} # i learn_modules.append(NNet(n_in=2*params['in'], n_out=params['target'])) # j learn_modules.append(NNet(n_in=params['in'], n_out=params['target'])) args['out'] = params['target'] return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # Battaglia et al. (2016), Interaction Networks
def __init__(self, kernels, input_embed_size, bias=False): """ :param kernels: array of pairs (width, out_dim) :param input_embed_size: size of input. conv kernel has shape of [out_dim_{i}, input_embed_size, width_{i}] :param bias: whether to use bias when convolution is performed """ super(TDNN, self).__init__() self.input_embed_size = input_embed_size self.kernels = nn.ParameterList([Parameter(t.Tensor(out_dim, input_embed_size, kW).normal_(0, 0.05)) for kW, out_dim in kernels]) self.use_bias = bias if self.use_bias: self.biases = nn.ParameterList([Parameter(t.Tensor(out_dim).normal_(0, 0.05)) for _, out_dim in kernels])
def __init__(self, mixture_size: int, do_layer_norm: bool = False) -> None: super(ScalarMix, self).__init__() self.mixture_size = mixture_size self.do_layer_norm = do_layer_norm self.scalar_parameters = ParameterList([Parameter(torch.FloatTensor([0.0])) for _ in range(mixture_size)]) self.gamma = Parameter(torch.FloatTensor([1.0]))
def test_ParameterList(self): def make_param(): return Parameter(torch.randn(10, 10)) parameters = [make_param(), make_param()] param_list = nn.ParameterList(parameters) def check(): self.assertEqual(len(parameters), len(param_list)) for p1, p2 in zip(parameters, param_list): self.assertIs(p1, p2) for p1, p2 in zip(parameters, param_list.parameters()): self.assertIs(p1, p2) for i in range(len(parameters)): self.assertIs(parameters[i], param_list[i]) check() parameters += [make_param()] param_list += [parameters[-1]] check() parameters.append(make_param()) param_list.append(parameters[-1]) check() next_params = [make_param(), make_param()] parameters.extend(next_params) param_list.extend(next_params) check() parameters[2] = make_param() param_list[2] = parameters[2] check() with self.assertRaises(TypeError): param_list += make_param() with self.assertRaises(TypeError): param_list.extend(make_param())
def init_ggnn(self, params): learn_args = [] learn_modules = [] args = {} args['in_m'] = params['in_m'] args['out'] = params['out'] # GRU learn_modules.append(nn.GRU(params['in_m'], params['out'])) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # Battaglia et al. (2016), Interaction Networks
def init_intnet(self, params): learn_args = [] learn_modules = [] args = {} args['in'] = params['in'] args['out'] = params['out'] learn_modules.append(NNet(n_in=params['in'], n_out=params['out'])) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args
def init_mpnn(self, params): learn_args = [] learn_modules = [] args = {} args['in_m'] = params['in_m'] args['out'] = params['out'] # GRU learn_modules.append(nn.GRU(params['in_m'], params['out'])) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args
def init_intnet(self, params): learn_args = [] learn_modules = [] args = {} learn_modules.append(NNet(n_in=params['in'], n_out=params['target'])) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args
def init_mpnn(self, params): learn_args = [] learn_modules = [] args = {} # i learn_modules.append(NNet(n_in=2*params['in'], n_out=params['target'])) # j learn_modules.append(NNet(n_in=params['in'], n_out=params['target'])) args['out'] = params['target'] return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args
def __set_message(self, message_def, args={}): self.m_definition = message_def.lower() self.m_function = { 'duvenaud': self.m_duvenaud, 'ggnn': self.m_ggnn, 'intnet': self.m_intnet, 'mpnn': self.m_mpnn, 'mgc': self.m_mgc, 'bruna': self.m_bruna, 'defferrard': self.m_deff, 'kipf': self.m_kipf }.get(self.m_definition, None) if self.m_function is None: print('WARNING!: Message Function has not been set correctly\n\tIncorrect definition ' + message_def) quit() init_parameters = { 'duvenaud': self.init_duvenaud, 'ggnn': self.init_ggnn, 'intnet': self.init_intnet, 'mpnn': self.init_mpnn }.get(self.m_definition, lambda x: (nn.ParameterList([]), nn.ModuleList([]), {})) self.learn_args, self.learn_modules, self.args = init_parameters(args) self.m_size = { 'duvenaud': self.out_duvenaud, 'ggnn': self.out_ggnn, 'intnet': self.out_intnet, 'mpnn': self.out_mpnn }.get(self.m_definition, None) # Get the name of the used message function
def init_ggnn(self, params): learn_args = [] learn_modules = [] args = {} args['e_label'] = params['e_label'] args['in'] = params['in'] args['out'] = params['out'] # Define a parameter matrix A for each edge label. learn_args.append(nn.Parameter(torch.randn(len(params['e_label']), params['in'], params['out']))) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # Battaglia et al. (2016), Interaction Networks
def init_intnet(self, params): learn_args = [] learn_modules = [] args = {} args['in'] = params['in'] args['out'] = params['out'] learn_modules.append(NNet(n_in=params['in'], n_out=params['out'])) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # Gilmer et al. (2017), Neural Message Passing for Quantum Chemistry
def init_mpnn(self, params): learn_args = [] learn_modules = [] args = {} args['in'] = params['in'] args['out'] = params['out'] # Define a parameter matrix A for each edge label. learn_modules.append(NNet(n_in=params['edge_feat'], n_out=(params['in']*params['out']))) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # Kearnes et al. (2016), Molecular Graph Convolutions
def test_ParameterList(self): def make_param(): return Parameter(torch.randn(10, 10)) parameters = [make_param(), make_param()] param_list = nn.ParameterList(parameters) def check(): self.assertEqual(len(parameters), len(param_list)) for p1, p2 in zip(parameters, param_list): self.assertIs(p1, p2) for p1, p2 in zip(parameters, param_list.parameters()): self.assertIs(p1, p2) for i in range(len(parameters)): self.assertIs(parameters[i], param_list[i]) check() parameters += [make_param()] param_list += [parameters[-1]] check() parameters.append(make_param()) param_list.append(parameters[-1]) check() next_params = [make_param(), make_param()] parameters.extend(next_params) param_list.extend(next_params) check() parameters[2] = make_param() param_list[2] = parameters[2] check() with self.assertRaises(TypeError): param_list += make_param() with self.assertRaises(TypeError): param_list.extend(make_param()) l1 = nn.Linear(1, 2) l2 = nn.Linear(2, 3) l3 = nn.Linear(3, 2) l4 = nn.Linear(2, 3) subnet = nn.Sequential(l3, l4) s = nn.Sequential( OrderedDict([ ("layer1", l1), ("layer2", l2), ("layer3", l3), ("layer4", l4), ("subnet_layer", subnet) ]) ) parameters = list(s.parameters()) param_list = nn.ParameterList() param_list.extend(s.parameters()) check()