1.PyTorch 中,nn 与 nn.functional 有什么区别?
这两个是差不多的,不过一个包装好的类,一个是可以直接调用的函数。我们可以去翻这两个模块的具体实现代码,我下面以卷积Conv1d为例。torch.nn下的Conv1d:
class Conv1d(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
super(Conv1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _single(0), groups, bias)
def forward(self, input):
return F.conv1d(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
torch.nn.functional下的conv1d:
def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1,
groups=1):
if input is not None and input.dim() != 3:
raise ValueError("Expected 3D tensor as input, got {}D tensor instead.".format(input.dim()))
f = ConvNd(_single(stride), _single(padding), _single(dilation), False,
_single(0), groups, torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic, torch.backends.cudnn.enabled)
return f(input, weight, bias)
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(512, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(F.dropout(self.fc2(x), 0.5))
x = F.dropout(self.fc3(x), 0.5)
return x
需要维持状态的,主要是三个线性变换,所以在构造Module是,定义了三个nn.Linear对象,而在计算时,relu,dropout之类不需要保存状态的可以直接使用。注:dropout的话有个坑,需要设置自行设定training的state。
至于喜欢哪一种方式,是个人口味问题,但PyTorch官方推荐:具有学习参数的(例如,conv2d, linear, batch_norm)采用nn.Xxx方式,没有学习参数的(例如,maxpool, loss func, activation func)等根据个人选择使用nn.functional.xxx或者nn.Xxx方式。但关于dropout,个人强烈推荐使用nn.Xxx方式,因为一般情况下只有训练阶段才进行dropout,在eval阶段都不会进行dropout。使用nn.Xxx方式定义dropout,在调用model.eval()之后,model中所有的dropout layer都关闭,但以nn.function.dropout方式定义dropout,在调用model.eval()之后并不能关闭dropout。
nn.functional中的都是没有副作用无状态的函数,也就是说function内部一定是没有Variable的,只是纯粹从输入到输出的一个变换nn下面的可能有可能没有,一般都是nn.Module的子类,可以借助nn.Module的父方法方便的管理各种需要的变量(状态)
2. PyTorch权重初始化的几种方法
1)class discriminator(nn.Module):
def __init__(self, dataset = 'mnist'):
super(discriminator, self).__init__()
。…
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim, 64, 4, 2, 1),
nn.ReLU(),
)
…
self.fc = nn.Sequential(
nn.Linear(32, 64 * (self.input_height // 2) * (self.input_width // 2)),
nn.BatchNorm1d(64 * (self.input_height // 2) * (self.input_width // 2)),
nn.ReLU(),
)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1),
#nn.Sigmoid(), # EBGAN does not work well when using Sigmoid().
)
utils.initialize_weights(self)
def forward(self, input):
…
def initialize_weights(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.02)
m.bias.data.zero_()
2)def init_weights(m):
print(m)
if type(m) == nn.Linear:
m.weight.data.fill_(1.0)
print(m.weight)
net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
net.apply(init_weights)
3)def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
net.apply(weights_init)
3.pytorch注意点:
1)通过对模型进行并行GPU处理(这里一般指单机多卡),可以相对提高处理速度,但是处理方法大致有两种。
将Module放在GPU上运行也十分简单,只需两步:
model = model.cuda():将模型的所有参数转存到GPU
input.cuda():将输入数据也放置到GPU上
至于如何在多个GPU上并行计算,PyTorch也提供了两个函数,可实现简单高效的并行GPU计算。
①nn.parallel.data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None)
②class torch.nn.DataParallel(module, device_ids=None, output_device=None, dim=0)
可见二者的参数十分相似,通过device_ids参数可以指定在哪些GPU上进行优化,output_device指定输出到哪个GPU上。唯一的不同就在于前者直接利用多GPU并行计算得出结果,而后者则返回一个新的module,能够自动在多GPU上进行并行加速。
# method 1
new_net = nn.DataParallel(net, device_ids=[0, 1])
output = new_net(input)
# method 2
output = nn.parallel.data_parallel(new_net, input, device_ids=[0, 1])
# method 3
from parallel import DataParallelModel, DataParallelCriterion
parallel_model = DataParallelModel(model) # Encapsulate the model
parallel_loss = DataParallelCriterion(loss_function) # Encapsulate the loss function
predictions = parallel_model(inputs) # Parallel forward pass
# "predictions" is a tuple of n_gpu tensors
loss = parallel_loss(predictions, labels) # Compute loss function in parallel
loss.backward() # Backward pass
optimizer.step() # Optimizer step
DataParallel并行的方式,是将输入一个batch的数据均分成多份,分别送到对应的GPU进行计算,各个GPU得到的梯度累加。与Module相关的所有数据也都会以浅复制的方式复制多份,在此需要注意,在module中属性应该是只读的。
model = Model(input_size, output_size)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, …], [10, …], [10, …] on 3 GPUs
model = nn.DataParallel(model)
if torch.cuda.is_available():
model.cuda()
4.pytorch动态调整学习率
1)自定义根据 epoch 改变学习率。
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
示例:
optimizer = torch.optim.SGD(model.parameters(),lr = args.lr,momentum = 0.9)
for epoch in range(10):
adjust_learning_rate(optimizer,epoch)
train(…)
validate(…)
2)针对模型的不同层设置不同的学习率。
model = torchvision.models.resnet101(pretrained=True)
large_lr_layers = list(map(id,model.fc.parameters()))
small_lr_layers = filter(lambda p:id(p) not in large_lr_layers,model.parameters())
optimizer = torch.optim.SGD([
{"params":large_lr_layers},
{"params":small_lr_layers,"lr":1e-4}
],lr = 1e-2,momenum=0.9)
注:large_lr_layers 学习率为 1e-2,small_lr_layers 学习率为 1e-4,两部分参数共用一个 momenum。
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
for p in self.parameters():
p.requires_grad=False
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
这样就将for循环以上的参数固定, 只训练下面的参数(f,g,h,gamma,fc,等), 但是注意需要在optimizer中添加上这样的一句话filter(lambda p: p.requires_grad, model.parameters()
5. 手动设置 lr 衰减区间。
示例:
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
for epoch in range(60):
lr = 30e-5
if epoch > 25:
lr = 15e-5
if epoch > 30:
lr = 7.5e-5
if epoch > 35:
lr = 3e-5
if epoch > 40:
lr = 1e-5
adjust_learning_rate(optimizer, lr)
4)余弦退火
示例:
epochs = 60
optimizer = optim.SGD(model.parameters(),lr = config.lr,momentum=0.9,weight_decay=1e-4)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer,T_max = (epochs // 9) + 1)
for epoch in range(epochs):
scheduler.step(epoch)
5)根据epochs.
scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)
for epoch in range(100):
scheduler.step()
train(…)
validate(…)