我在PyTorch中实现了一个简单的前馈神经传递函数。但是我想知道是否有更好的方法向网络添加灵活的层数?也许是在一个循环中命名它们,但是我听说那不可能吗?
目前我正在这样做
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, input_dim, output_dim, hidden_dim): super(Net, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.hidden_dim = hidden_dim self.layer_dim = len(hidden_dim) self.fc1 = nn.Linear(self.input_dim, self.hidden_dim[0]) i = 1 if self.layer_dim > i: self.fc2 = nn.Linear(self.hidden_dim[i-1], self.hidden_dim[i]) i += 1 if self.layer_dim > i: self.fc3 = nn.Linear(self.hidden_dim[i-1], self.hidden_dim[i]) i += 1 if self.layer_dim > i: self.fc4 = nn.Linear(self.hidden_dim[i-1], self.hidden_dim[i]) i += 1 if self.layer_dim > i: self.fc5 = nn.Linear(self.hidden_dim[i-1], self.hidden_dim[i]) i += 1 if self.layer_dim > i: self.fc6 = nn.Linear(self.hidden_dim[i-1], self.hidden_dim[i]) i += 1 if self.layer_dim > i: self.fc7 = nn.Linear(self.hidden_dim[i-1], self.hidden_dim[i]) i += 1 if self.layer_dim > i: self.fc8 = nn.Linear(self.hidden_dim[i-1], self.hidden_dim[i]) i += 1 self.fcn = nn.Linear(self.hidden_dim[-1], self.output_dim) def forward(self, x): # Max pooling over a (2, 2) window x = F.relu(self.fc1(x)) i = 1 if self.layer_dim > i: x = F.relu(self.fc2(x)) i += 1 if self.layer_dim > i: x = F.relu(self.fc3(x)) i += 1 if self.layer_dim > i: x = F.relu(self.fc4(x)) i += 1 if self.layer_dim > i: x = F.relu(self.fc5(x)) i += 1 if self.layer_dim > i: x = F.relu(self.fc6(x)) i += 1 if self.layer_dim > i: x = F.relu(self.fc7(x)) i += 1 if self.layer_dim > i: x = F.relu(self.fc8(x)) i += 1 x = F.softmax(self.fcn(x)) return x
您可以将图层放入ModuleList容器中:
ModuleList
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, input_dim, output_dim, hidden_dim): super(Net, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.hidden_dim = hidden_dim current_dim = input_dim self.layers = nn.ModuleList() for hdim in hidden_dim: self.layers.append(nn.Linear(current_dim, hdim)) current_dim = hdim self.layers.append(nn.Linear(current_dim, output_dim)) def forward(self, x): for layer in self.layers[:-1]: x = F.relu(layer(x)) out = F.softmax(self.layers[-1](x)) return out
对于这些层使用pytorch容器非常重要,而不仅仅是简单的python列表。