我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用torch.nn.PReLU()。
def __init__(self): super(Net, self).__init__() self.conv1_1 = nn.Conv2d(1, 32, kernel_size=5, padding=2) self.prelu1_1 = nn.PReLU() self.conv1_2 = nn.Conv2d(32, 32, kernel_size=5, padding=2) self.prelu1_2 = nn.PReLU() self.conv2_1 = nn.Conv2d(32, 64, kernel_size=5, padding=2) self.prelu2_1 = nn.PReLU() self.conv2_2 = nn.Conv2d(64, 64, kernel_size=5, padding=2) self.prelu2_2 = nn.PReLU() self.conv3_1 = nn.Conv2d(64, 128, kernel_size=5, padding=2) self.prelu3_1 = nn.PReLU() self.conv3_2 = nn.Conv2d(128, 128, kernel_size=5, padding=2) self.prelu3_2 = nn.PReLU() self.preluip1 = nn.PReLU() self.ip1 = nn.Linear(128*3*3, 2) self.ip2 = nn.Linear(2, 10)
def test_changing_model_reinitializes_optimizer(self, net, data): # The idea is that we change the model using `set_params` to # add parameters. Since the optimizer depends on the model # parameters it needs to be reinitialized. X, y = data net.set_params(module__nonlin=F.relu) net.fit(X, y) net.set_params(module__nonlin=nn.PReLU()) assert isinstance(net.module_.nonlin, nn.PReLU) d1 = net.module_.nonlin.weight.data.clone().cpu().numpy() # make sure that we do not initialize again by making sure that # the network is initialized and by using partial_fit. assert net.initialized_ net.partial_fit(X, y) d2 = net.module_.nonlin.weight.data.clone().cpu().numpy() # all newly introduced parameters should have been trained (changed) # by the optimizer after 10 epochs. assert (abs(d2 - d1) > 1e-05).all()
def test_repr_fitted_works(self, net_cls, module_cls, data): X, y = data net = net_cls( module_cls, module__num_units=11, module__nonlin=nn.PReLU(), ) net.fit(X[:50], y[:50]) result = net.__repr__() expected = """<class 'skorch.net.NeuralNetClassifier'>[initialized]( module_=MyClassifier( (dense0): Linear(in_features=20, out_features=11) (nonlin): PReLU(num_parameters=1) (dropout): Dropout(p=0.5) (dense1): Linear(in_features=11, out_features=10) (output): Linear(in_features=10, out_features=2) ), )""" assert result == expected
def ELUCons(elu, nchan): if elu: return nn.ELU(inplace=True) else: return nn.PReLU(nchan) # normalization between sub-volumes is necessary # for good performance
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL='relu', same_padding=False, bn=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) if same_padding else 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None if NL == 'relu' : self.relu = nn.ReLU(inplace=True) elif NL == 'prelu': self.relu = nn.PReLU() else: self.relu = None
def __init__(self, in_features, out_features, NL='relu'): super(FC, self).__init__() self.fc = nn.Linear(in_features, out_features) if NL == 'relu' : self.relu = nn.ReLU(inplace=True) elif NL == 'prelu': self.relu = nn.PReLU() else: self.relu = None
def __init__(self, bn=False, num_classes=10): super(CMTL, self).__init__() self.num_classes = num_classes self.base_layer = nn.Sequential(Conv2d( 1, 16, 9, same_padding=True, NL='prelu', bn=bn), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_1 = nn.Sequential(Conv2d( 32, 16, 9, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(32, 16, 7, same_padding=True, NL='prelu', bn=bn), Conv2d(16, 8, 7, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_2 = nn.Sequential(nn.AdaptiveMaxPool2d((32,32)), Conv2d( 8, 4, 1, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_fc1 = FC(4*1024,512, NL='prelu') self.hl_prior_fc2 = FC(512,256, NL='prelu') self.hl_prior_fc3 = FC(256, self.num_classes, NL='prelu') self.de_stage_1 = nn.Sequential(Conv2d( 32, 20, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(20, 40, 5, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(40, 20, 5, same_padding=True, NL='prelu', bn=bn), Conv2d(20, 10, 5, same_padding=True, NL='prelu', bn=bn)) self.de_stage_2 = nn.Sequential(Conv2d( 18, 24, 3, same_padding=True, NL='prelu', bn=bn), Conv2d( 24, 32, 3, same_padding=True, NL='prelu', bn=bn), nn.ConvTranspose2d(32,16,4,stride=2,padding=1,output_padding=0,bias=True), nn.PReLU(), nn.ConvTranspose2d(16,8,4,stride=2,padding=1,output_padding=0,bias=True), nn.PReLU(), Conv2d(8, 1, 1, same_padding=True, NL='relu', bn=bn))
def prelu(num_param=1, init=0.25): return nn.PReLU(num_parameters=num_param, init=init) # soft
def pytorch_activation(name="relu"): if name == "tanh": return nn.Tanh() if name == "prelu": return nn.PReLU() if name == "sigmoid": return nn.Sigmoid() if name == "log_sigmoid": return nn.LogSigmoid() return nn.ReLU()
def __init__(self, inChannel, outChannel, kSize, dropout, prelus, dilated): super().__init__() pad = int((kSize-1)/2) self.conv1a = nn.Conv2d(inChannel, outChannel, (kSize,1), stride=1, padding=(pad,0)) self.nonLinear1a = prelus and nn.PReLU(outChannel) or nn.ReLU(True) self.conv1b = nn.Conv2d(outChannel, outChannel, (1,kSize), stride=1, padding=(0,pad)) self.bn1 = nn.BatchNorm2d(outChannel, eps=1e-3) self.nonLinear1b = prelus and nn.PReLU(outChannel) or nn.ReLU(True) self.conv2a = nn.Conv2d(inChannel, outChannel, (kSize,1), stride=1, padding=(pad*dilated,0), dilation=(dilated,1)) self.nonLinear2 = prelus and nn.PReLU(outChannel) or nn.ReLU(True) self.conv2b = nn.Conv2d(outChannel, outChannel, (1,kSize), stride=1, padding=(0,pad*dilated), dilation=(1,dilated)) self.bn2 = nn.BatchNorm2d(outChannel, eps=1e-3) self.dropout = nn.Dropout(dropout)
def __init__(self, inChannel, outChannel, kSize, dropout, prelus, dilated): super().__init__() self.main = nonBt1dMain(inChannel, outChannel, kSize, dropout, prelus, dilated) self.nonLinear = prelus and nn.PReLU(outChannel) or nn.ReLU(True)
def __init__(self, inChannel, outChannel, kSize, dropout, prelus): super().__init__() pad = int((kSize-1)/2) self.main = nn.Conv2d(inChannel, outChannel-inChannel, kSize, stride=2, padding=pad) self.other = nn.MaxPool2d(2, stride=2) self.bn = nn.BatchNorm2d(outChannel, eps=1e-3) self.dropout = nn.Dropout(dropout) self.nonLinear = prelus and nn.PReLU(outChannel) or nn.ReLU(True)
def __init__(self): super(PReLUNet, self).__init__() self.prelu = nn.PReLU(3)
def test_prelu(self): keras_model = Sequential() keras_model.add(PReLU(input_shape=(3, 32, 32), shared_axes=(2, 3), name='prelu')) keras_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD()) pytorch_model = PReLUNet() self.transfer(keras_model, pytorch_model) self.assertEqualPrediction(keras_model, pytorch_model, self.test_data)
def build_discriminator(w_in, h_in, f_first, num_down_layers, norm, p_dropout): net = nn.Sequential() if (w_in % 2 != 0) or (h_in % 2 != 0): raise ValueError('input width and height must be even numbers') f_prev = 3 f = f_first w = w_in h = h_in for i in range(num_down_layers): if i == num_down_layers - 1: pad_w = 0 pad_h = 0 else: if (w % 4 == 2): pad_w = 1 else: pad_w = 0 if (h % 4 == 2): pad_h = 1 else: pad_h = 0 if (norm == 'weight') or (norm == 'weight-affine'): net.add_module('level.{0}.conv'.format(i), WeightNormalizedConv2d(f_prev, f, 4, 2, (1 + pad_h, 1 + pad_w), scale = (norm == 'weight-affine'), bias = (norm == 'weight-affine'))) else: net.add_module('level.{0}.conv'.format(i), nn.Conv2d(f_prev, f, 4, 2, (1 + pad_h, 1 + pad_w))) if (norm == 'batch') and (i > 0): net.add_module('level.{0}.batchnorm'.format(i), nn.BatchNorm2d(f)) if norm == 'weight': net.add_module('level.{0}.tprelu'.format(i), TPReLU(f)) else: net.add_module('level.{0}.prelu'.format(i), nn.PReLU(f)) f_prev = f f = f * 2 w = (w + pad_w * 2) // 2 h = (h + pad_h * 2) // 2 if p_dropout > 0: net.add_module('final.dropout', nn.Dropout(p_dropout)) if (norm == 'weight') or (norm == 'weight-affine'): net.add_module('final.conv', WeightNormalizedConv2d(f_prev, 1, (h, w))) else: net.add_module('final.conv', nn.Conv2d(f_prev, 1, (h, w))) net.add_module('final.sigmoid', nn.Sigmoid()) net.add_module('final.view', View(1)) return net
def build_reverser(w_in, h_in, f_first, num_down_layers, code_size, norm, spatial_dropout_r): net = nn.Sequential() if (w_in % 2 != 0) or (h_in % 2 != 0): raise ValueError('input width and height must be even numbers') f_prev = 3 f = f_first w = w_in h = h_in for i in range(num_down_layers): if i == num_down_layers - 1: pad_w = 0 pad_h = 0 else: if (w % 4 == 2): pad_w = 1 else: pad_w = 0 if (h % 4 == 2): pad_h = 1 else: pad_h = 0 if (norm == 'weight') or (norm == 'weight-affine'): net.add_module('level.{0}.conv'.format(i), WeightNormalizedConv2d(f_prev, f, 4, 2, (1 + pad_h, 1 + pad_w), scale = (norm == 'weight-affine'), bias = (norm == 'weight-affine'))) else: net.add_module('level.{0}.conv'.format(i), nn.Conv2d(f_prev, f, 4, 2, (1 + pad_h, 1 + pad_w))) if i >= 1 and spatial_dropout_r > 0: net.add_module('level.{0}.sd'.format(i), nn.Dropout2d(spatial_dropout_r)) if (norm == 'batch') and (i > 0): net.add_module('level.{0}.batchnorm'.format(i), nn.BatchNorm2d(f)) if norm == 'weight': net.add_module('level.{0}.tprelu'.format(i), TPReLU(f)) else: net.add_module('level.{0}.prelu'.format(i), nn.PReLU(f)) f_prev = f f = f * 2 w = (w + pad_w * 2) // 2 h = (h + pad_h * 2) // 2 if (norm == 'weight') or (norm == 'weight-affine'): net.add_module('final.conv', WeightNormalizedConv2d(f_prev, code_size, (h, w))) else: net.add_module('final.conv', nn.Conv2d(f_prev, code_size, (h, w))) #net.add_module('final.tanh', nn.Tanh()) net.add_module('final.view', View(code_size)) return net