import torch.nn as nn from collections import OrderedDict class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3) self.conv2 = nn.Conv2d(64, 64, 3) self.maxpool1 = nn.MaxPool2d(2, 2) self.features = nn.Sequential(OrderedDict([ ('conv3', nn.Conv2d(64, 128, 3)), ('conv4', nn.Conv2d(128, 128, 3)), ('relu1', nn.ReLU()) ])) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.maxpool1(x) x = self.features(x) return x m = Model() for p in m.parameters(): print(type(p.data), p.size()),今天小编就来说说关于布尔网络的稳定性的定义?下面更多详细答案一起来看看吧!

布尔网络的稳定性的定义(Day158:网络的参数)

布尔网络的稳定性的定义

parameters方法

import torch.nn as nn from collections import OrderedDict class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3) self.conv2 = nn.Conv2d(64, 64, 3) self.maxpool1 = nn.MaxPool2d(2, 2) self.features = nn.Sequential(OrderedDict([ ('conv3', nn.Conv2d(64, 128, 3)), ('conv4', nn.Conv2d(128, 128, 3)), ('relu1', nn.ReLU()) ])) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.maxpool1(x) x = self.features(x) return x m = Model() for p in m.parameters(): print(type(p.data), p.size())

其输出为:

<class 'torch.Tensor'> torch.Size([64, 3, 3, 3]) <class 'torch.Tensor'> torch.Size([64]) <class 'torch.Tensor'> torch.Size([64, 64, 3, 3]) <class 'torch.Tensor'> torch.Size([64]) <class 'torch.Tensor'> torch.Size([128, 64, 3, 3]) <class 'torch.Tensor'> torch.Size([128]) <class 'torch.Tensor'> torch.Size([128, 128, 3, 3]) <class 'torch.Tensor'> torch.Size([128])

optimizer = torch.optim.SGD(m1.parameters(), lr = args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

m = Model() for k, v in m.named_parameters(): print(k, v.size())

输出为:

conv1.weight - torch.Size([64, 3, 3, 3]) conv1.bias - torch.Size([64]) conv2.weight - torch.Size([64, 64, 3, 3]) conv2.bias - torch.Size([64]) features.conv3.weight - torch.Size([128, 64, 3, 3]) features.conv3.bias - torch.Size([128]) features.conv4.weight - torch.Size([128, 128, 3, 3]) features.conv4.bias - torch.Size([128])

再以上面的vgg网络为例:

for k,v in m1.named_parameters(): print(k,v.size())

named_parameters返回的是键值对,k为参数的名称 ,v为参数本身。输出结果为:

vgg.0.weight torch.Size([64, 3, 3, 3]) vgg.0.bias torch.Size([64]) vgg.2.weight torch.Size([64, 64, 3, 3]) vgg.2.bias torch.Size([64]) vgg.5.weight torch.Size([128, 64, 3, 3]) vgg.5.bias torch.Size([128]) vgg.7.weight torch.Size([128, 128, 3, 3]) vgg.7.bias torch.Size([128]) vgg.10.weight torch.Size([256, 128, 3, 3]) vgg.10.bias torch.Size([256]) vgg.12.weight torch.Size([256, 256, 3, 3]) vgg.12.bias torch.Size([256]) vgg.14.weight torch.Size([256, 256, 3, 3]) vgg.14.bias torch.Size([256]) vgg.17.weight torch.Size([512, 256, 3, 3]) vgg.17.bias torch.Size([512]) vgg.19.weight torch.Size([512, 512, 3, 3]) vgg.19.bias torch.Size([512]) vgg.21.weight torch.Size([512, 512, 3, 3]) vgg.21.bias torch.Size([512]) vgg.24.weight torch.Size([512, 512, 3, 3]) vgg.24.bias torch.Size([512]) vgg.26.weight torch.Size([512, 512, 3, 3]) vgg.26.bias torch.Size([512]) vgg.28.weight torch.Size([512, 512, 3, 3]) vgg.28.bias torch.Size([512])

参数名的命名规则:属性名称.参数所属层的编号.weight/bias。 在fine-tuning的时候,给一些特定层的参数赋值是非常方便的,这点在后面在加载预训练模型时会看到。

,