我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用torch.nn.Conv3d()。
def __init__(self): super(C3D_net,self).__init__() self.conv1=nn.Conv3d(3,64,kernel_size=(3,3,3),stride=1,padding=(1,1,1)) self.relu=nn.ReLU() self.maxpool1=nn.MaxPool3d(kernel_size=(1,2,2),stride=(1,2,2)) self.conv2=nn.Conv3d(64,128,kernel_size=(3,3,3),stride=1,padding=(1,1,1)) self.maxpool2=nn.MaxPool3d(kernel_size=(2,2,2),stride=(2,2,2)) self.conv3=nn.Conv3d(128,256,kernel_size=(3,3,3),stride=1,padding=(1,1,1)) self.maxpool3=nn.MaxPool3d(kernel_size=(2,2,2),stride=(2,2,2)) self.conv4=nn.Conv3d(256,256,kernel_size=(3,3,3),stride=1,padding=(1,1,1)) self.maxpool4=nn.MaxPool3d(kernel_size=(2,2,2),stride=(2,2,2)) self.conv5=nn.Conv3d(256,256,kernel_size=(3,3,3),stride=1,padding=(1,1,1)) self.maxpool5=nn.MaxPool3d(kernel_size=(2,2,2),stride=(2,2,2)) self.num_out_maxpool5=2304 self.fc6=nn.Linear(self.num_out_maxpool5,2048)#TBA self.fc7=nn.Linear(2048,2048) #self.dropout=nn.Dropout(p=0.5) self.fc8=nn.Linear(2048,101) self._initialize_weights()
def test_multi_gpu(self): import torch from torch.autograd import Variable import torch.nn as nn from torch.nn.parallel.data_parallel import data_parallel from inferno.extensions.containers.graph import Graph input_shape = [8, 1, 3, 128, 128] model = Graph() \ .add_input_node('input') \ .add_node('conv0', nn.Conv3d(1, 10, 3, padding=1), previous='input') \ .add_node('conv1', nn.Conv3d(10, 1, 3, padding=1), previous='conv0') \ .add_output_node('output', previous='conv1') model.cuda() input = Variable(torch.rand(*input_shape).cuda()) output = data_parallel(model, input, device_ids=[0, 1, 2, 3])
def __init__(self, input_example_non_batch, output_dim, reshape=None, dropout=0): super(ObserveEmbeddingCNN3D4C, self).__init__() self.reshape = reshape if self.reshape is not None: input_example_non_batch = input_example_non_batch.view(self.reshape) self.reshape.insert(0, -1) # For correct handling of the batch dimension in self.forward if input_example_non_batch.dim() == 3: self.input_sample = input_example_non_batch.unsqueeze(0).cpu() elif input_example_non_batch.dim() == 4: self.input_sample = input_example_non_batch.cpu() else: util.logger.log('ObserveEmbeddingCNN3D4C: Expecting a 4d input_example_non_batch (num_channels x depth x height x width) or a 3d input_example_non_batch (depth x height x width). Received: {0}'.format(input_example_non_batch.size())) self.input_channels = self.input_sample.size(0) self.output_dim = output_dim self.conv1 = nn.Conv3d(self.input_channels, 64, 3) self.conv2 = nn.Conv3d(64, 64, 3) self.conv3 = nn.Conv3d(64, 128, 3) self.conv4 = nn.Conv3d(128, 128, 3) self.drop = nn.Dropout(dropout)
def __init__(self, input_size, hidden_size, num_classes, dim, num_kernels, max_document_length): super(CNN, self).__init__() self.max_document_length = max_document_length self.conv = nn.Conv3d(1, input_size, (1, 1, dim), padding=0) self.fc1 = nn.Linear(input_size*num_kernels, hidden_size) self.fc2 = nn.Linear(hidden_size, num_classes) self.init_weights()
def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: torch.nn.init.xavier_uniform( m.weight.data, gain=1 ) m.bias.data.fill_(0.1) # elif classname.find('Conv3d') != -1: # torch.nn.init.xavier_uniform( # m.weight.data, # gain=1 # ) # elif classname.find('ConvTranspose3d') != -1: # torch.nn.init.xavier_uniform( # m.weight.data, # gain=1 # )
def weights_init(m): if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d): m.weight.data.normal_(0, 0.02) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d): m.weight.data.normal_(1, 0.02) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_()
def __init__(self, num_layers, in_channels = 3, out_channels = 8, batch_norm = True): super(ConvEncoder3D, self).__init__() # set up number of layers if isinstance(num_layers, int): num_layers = [num_layers, 0] network = [] # several 3x3 convolutional layers and max-pooling layers for k in range(num_layers[0]): # 3d convolution network.append(nn.Conv3d(in_channels, out_channels, 3, padding = 1)) # batch normalization if batch_norm: network.append(nn.BatchNorm3d(out_channels)) # non-linearity and max-pooling network.append(nn.LeakyReLU(0.2, True)) network.append(nn.MaxPool3d(2)) # double channel size in_channels = out_channels out_channels *= 2 # several 1x1 convolutional layers for k in range(num_layers[1]): # 3d convolution network.append(nn.Conv3d(in_channels, in_channels, 1)) # batch normalization if batch_norm: network.append(nn.BatchNorm3d(in_channels)) # non-linearity network.append(nn.LeakyReLU(0.2, True)) # set up modules for network self.network = nn.Sequential(*network) self.network.apply(weights_init)
def __init__(self): super(MiniC3D, self).__init__() self.conv = nn.Sequential( nn.Conv3d(3, 64, 7, padding=1), nn.ReLU(), nn.MaxPool3d((1, 2, 2), (1, 2, 2)), nn.Conv3d(64, 128, 5, padding=1), nn.ReLU(), nn.MaxPool3d(2, 2), nn.Conv3d(128, 256, (3, 3, 3), padding=1), nn.ReLU(), nn.Conv3d(256, 256, (3, 3, 3), padding=1), nn.ReLU(), nn.MaxPool3d(2, 2), nn.Conv3d(256, 512, (3, 3, 3), padding=1), nn.ReLU(), nn.Conv3d(512, 512, (3, 3, 3), padding=2), nn.ReLU(), nn.MaxPool3d(2, 2), nn.Conv3d(512, 512, (3, 3, 3), padding=1), nn.ReLU(), nn.Conv3d(512, 512, (3, 3, 3), padding=1), nn.ReLU(), nn.MaxPool3d(2, 2), ) self.fc = nn.Sequential( nn.Linear(6144, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 2048), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(2048, 1), )
def __init__(self): super(C3D, self).__init__() self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)) self.fc6 = nn.Linear(8192, 4096) self.fc7 = nn.Linear(4096, 4096) self.fc8 = nn.Linear(4096, 487) self.dropout = nn.Dropout(p=0.5) self.relu = nn.ReLU() self.softmax = nn.Softmax()
def _layer_Conv(self): self.add_body(0, """ @staticmethod def __conv(dim, name, **kwargs): if dim == 1: layer = nn.Conv1d(**kwargs) elif dim == 2: layer = nn.Conv2d(**kwargs) elif dim == 3: layer = nn.Conv3d(**kwargs) else: raise NotImplementedError() layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights'])) if 'bias' in __weights_dict[name]: layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias'])) return layer""")
def __init__(self, nchan, elu): super(LUConv, self).__init__() self.relu1 = ELUCons(elu, nchan) self.conv1 = nn.Conv3d(nchan, nchan, kernel_size=5, padding=2) self.bn1 = ContBatchNorm3d(nchan)
def __init__(self, outChans, elu): super(InputTransition, self).__init__() self.conv1 = nn.Conv3d(1, 16, kernel_size=5, padding=2) self.bn1 = ContBatchNorm3d(16) self.relu1 = ELUCons(elu, 16)
def __init__(self, inChans, nConvs, elu, dropout=False): super(DownTransition, self).__init__() outChans = 2*inChans self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2) self.bn1 = ContBatchNorm3d(outChans) self.do1 = passthrough self.relu1 = ELUCons(elu, outChans) self.relu2 = ELUCons(elu, outChans) if dropout: self.do1 = nn.Dropout3d() self.ops = _make_nConv(outChans, nConvs, elu)
def __init__(self, inChans, elu, nll): super(OutputTransition, self).__init__() self.conv1 = nn.Conv3d(inChans, 2, kernel_size=5, padding=2) self.bn1 = ContBatchNorm3d(2) self.conv2 = nn.Conv3d(2, 2, kernel_size=1) self.relu1 = ELUCons(elu, 2) if nll: self.softmax = F.log_softmax else: self.softmax = F.softmax
def conv(in_ch, out_ch, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, dim=2): #TODO: in the future some preprocessing goes here in_dim = dim if in_dim == 1: return nn.Conv1d(in_ch, out_ch, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif in_dim == 2: return nn.Conv2d(in_ch, out_ch, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) elif in_dim == 3: return nn.Conv3d(in_ch, out_ch, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) # Transposed Concolution
def is_conv_layer(layer, dim=None): if dim is None: cls = _ConvNd elif dim == 1: cls = nn.Conv1d elif dim == 2: cls = nn.Conv2d elif dim == 3: cls = nn.Conv3d return isinstance(layer, cls)
def test_conv_modules_raise_error_on_incorrect_input_size(self): modules = [nn.Conv1d(3, 8, 3), nn.ConvTranspose1d(3, 8, 3), nn.Conv2d(3, 8, 3), nn.ConvTranspose2d(3, 8, 3), nn.Conv3d(3, 8, 3), nn.ConvTranspose3d(3, 8, 3)] invalid_input_dims = [(2, 4), (2, 4), (3, 5), (3, 5), (4, 6), (4, 6)] for invalid_dims, module in zip(invalid_input_dims, modules): for dims in invalid_dims: input = Variable(torch.Tensor(torch.Size((3, ) * dims))) self.assertRaises(ValueError, lambda: module(input))
def is_sparseable(m): return True if hasattr(m, 'weight') and isinstance(m, ( nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d, nn.Linear)) else False
def __init__(self): super(Conv3DNet, self).__init__() self.conv = nn.Conv3d(3, 8, 5)
def __init__(self): super(C3D, self).__init__() self.group1 = nn.Sequential( nn.Conv3d(3, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))) #init.xavier_normal(self.group1.state_dict()['weight']) self.group2 = nn.Sequential( nn.Conv3d(64, 128, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) #init.xavier_normal(self.group2.state_dict()['weight']) self.group3 = nn.Sequential( nn.Conv3d(128, 256, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(256, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) #init.xavier_normal(self.group3.state_dict()['weight']) self.group4 = nn.Sequential( nn.Conv3d(256, 512, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) #init.xavier_normal(self.group4.state_dict()['weight']) self.group5 = nn.Sequential( nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) #init.xavier_normal(self.group5.state_dict()['weight']) self.fc1 = nn.Sequential( nn.Linear(512 * 3 * 3, 2048), # nn.ReLU(), nn.Dropout(0.5)) #init.xavier_normal(self.fc1.state_dict()['weight']) self.fc2 = nn.Sequential( nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.5)) #init.xavier_normal(self.fc2.state_dict()['weight']) self.fc3 = nn.Sequential( nn.Linear(2048, 32)) #101 self._features = nn.Sequential( self.group1, self.group2, self.group3, self.group4, self.group5 ) self._classifier = nn.Sequential( self.fc1, self.fc2 )
def __init__(self): super(C3D, self).__init__() self.layers = nn.Sequential( nn.Conv3d(3, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)), nn.Conv3d(64, 128, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)), nn.Conv3d(128, 256, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(256, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)), nn.Conv3d(256, 512, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.fc_s = nn.Sequential( nn.Linear(512 * 3 * 3, 2048), # nn.ReLU(), nn.Dropout(0.5), nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.5)) self.fc3 = nn.Sequential( nn.Linear(2048, 10)) #101 # self._features = nn.Sequential( # self.group1, # self.group2, # self.group3, # self.group4, # self.group5 # ) # # self._classifier = nn.Sequential( # self.fc1, # self.fc2 # )
def __init__(self): super(C3D_cls20, self).__init__() self.group1 = nn.Sequential( nn.Conv3d(3, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))) self.group2 = nn.Sequential( nn.Conv3d(64, 128, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.group3 = nn.Sequential( nn.Conv3d(128, 256, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(256, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.group4 = nn.Sequential( nn.Conv3d(256, 512, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.group5 = nn.Sequential( nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.fc1 = nn.Sequential( nn.Linear(512 * 3 * 3, 2048), nn.ReLU(), nn.Dropout(0.5)) self.fc2 = nn.Sequential( nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.5)) self.fc3 = nn.Sequential( nn.Linear(2048, 20)) self._features = nn.Sequential( self.group1, self.group2, self.group3, self.group4, self.group5 ) self._classifier = nn.Sequential( self.fc1, self.fc2 )
def __init__(self): super(C3D_cls46, self).__init__() self.group1 = nn.Sequential( nn.Conv3d(3, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))) self.group2 = nn.Sequential( nn.Conv3d(64, 128, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.group3 = nn.Sequential( nn.Conv3d(128, 256, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(256, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.group4 = nn.Sequential( nn.Conv3d(256, 512, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.group5 = nn.Sequential( nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.Conv3d(512, 512, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))) self.fc1 = nn.Sequential( nn.Linear(512 * 3 * 3, 2048), nn.ReLU(), nn.Dropout(0.5)) self.fc2 = nn.Sequential( nn.Linear(2048, 2048), nn.ReLU(), nn.Dropout(0.5)) self.fc3 = nn.Sequential( nn.Linear(2048, 46)) self._features = nn.Sequential( self.group1, self.group2, self.group3, self.group4, self.group5 ) self._classifier = nn.Sequential( self.fc1, self.fc2 )