Advertisement

【AI】Pytorch 系列:预训练模型使用

阅读量:

1. 模型下载

复制代码
    import re
    import os
    import glob
    import torch
    from torch.hub import download_url_to_file
    from torch.hub import urlparse
    import torchvision.models as models
    
    def download_model(url, dst_path):
    parts = urlparse(url)
    filename = os.path.basename(parts.path)
    
    HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
    hash_prefix = HASH_REGEX.search(filename).group(1)
    if os.path.exists(os.path.join(dst_path, filename)):
        return filename
    download_url_to_file(url, os.path.join(dst_path, filename), hash_prefix, True)
    return filename
    
    
    def saveToFolder(path):
    #其他各种模型可以在这个目录下进行搜索查看 https://github.com/pytorch/vision/tree/master/torchvision/models
    # model_urls = {
    #     'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
    #     'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
    #     'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
    #     'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
    #     'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
    #     'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
    #     'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
    #     'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
    # }
    model_urls={
        'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
    }
    if not (os.path.exists(path)):
        os.makedirs(path)
    
    for url in model_urls.values():
        download_model(url, path)
    
    def load_model(model_name, model_dir):
    model  = eval('models.%s(init_weights=False)' % model_name)
    path_format = os.path.join(model_dir, '%s-[a-z0-9]*.pth' % model_name)
    model_path = glob.glob(path_format)[0]
    model.load_state_dict(torch.load(model_path))
    return model
    
    def main():
    path = '/home/iot/jupyter/root_dir/liudongdong/pytorch_demo/pretainedpth/vgg16'
    saveToFolder(path)
    model = load_model('vgg16', path)
    print(model)
    if __name__ == "__main__":
    main()
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手

2. 模型查看

复制代码
    resnet.fc = torch.nn.Linear(resnet.fc.in_features, 100)
    print(resnet)   #将会输出网络每一层结构, print顺序可能不是最终网络的数据
    # 或者采用torchviz模块,对网络结构进行可视化, 将会生成一个pdf 网络结构图
    import torch
    import torchvision
    from torchviz import make_dot
    x = torch.randn(10, 3, 224, 224).requires_grad_(True)
    model50 = torchvision.models.resnet50()
    y = model50(x)
    vis_graph = make_dot(y, params=dict(list(resnet.named_parameters()) + [('x', x)]))
    vise_graph.view()
    #保存成文件形式
    vise_graph.render(filename='resnet50', view=False, format='pdf')
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
  • 方式二:以列表的形式
复制代码
    from torchsummary import summary
    summary(model50, (3, 224, 224)) #模型参数,输入数据的格式
    
    
      
      
    
    AI助手

3. 模型初始化

合适的权重初始化有助于提升模型的训练效率和优化效果。相反地,在权重初始化不当的情况下,则可能导致梯度消失或爆炸现象——这将使得网络无法有效完成训练过程。因此有必要对网络输出值的尺度范围进行严格控制。在PyTorch库中,默认提供了多种高效的权重初始化方法——其中最常用的方法包括Xavier系列和Kaiming系列等;此外还有其他如正交矩阵等特殊的初始化方式。

通过观察图中的公式可以看出,在每一层传播过程中,输出数据集的方差会乘以因子n。为了保证输出层H的尺度范围不受限制且稳定在初始输入X的波动范围内,则必须限定输出层的方差保持在1这一水平。基于此原则,在权重初始化时应令其方差为1/n(其中n表示神经元数量),从而实现对传播深度的有效控制

.1. Xavier 均匀分布
复制代码
    import os
    import torch
    import random
    import numpy as np
    import torch.nn as nn
     
     
     
    def set_seed(seed=1):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
     
    set_seed(1)  # 设置随机种子
     
     
    class MLP(nn.Module):
    def __init__(self, neural_num, layers):
        super(MLP, self).__init__()
        self.linears = nn.ModuleList([nn.Linear(neural_num, neural_num, bias=False) for i in range(layers)])
        self.neural_num = neural_num
     
    def forward(self, x):
        for (i, linear) in enumerate(self.linears):
            x = linear(x)
            x = torch.tanh(x)
     
            print("layer:{}, std:{}".format(i, x.std()))
            if torch.isnan(x.std()):
                print("output is nan in {} layers".format(i))
                break
     
        return x
     
    def initialize(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                #xavier手动计算
                a = np.sqrt(6 / (self.neural_num + self.neural_num))
                tanh_gain = nn.init.calculate_gain('tanh')         #计算增益
                a *= tanh_gain
                nn.init.uniform_(m.weight.data, -a, a)
     
                #调用pytorch实现xavier初始化,适用于饱和激活函数
                # tanh_gain = nn.init.calculate_gain('tanh')
                # nn.init.xavier_uniform_(m.weight.data, gain=tanh_gain)
     
     
    # flag = 0
    flag = 1
     
    if flag:
    layer_nums = 100
    neural_nums = 256
    batch_size = 16
     
    net = MLP(neural_nums, layer_nums)
    net.initialize()
     
    inputs = torch.randn((batch_size, neural_nums))  # normal: mean=0, std=1
     
    output = net(inputs)
    print(output)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手

import math中的函数实现xavier均匀初始化算法。
该算法中参数a遵循均匀分布U(−a,a),其中a等于增益系数乘以平方根号下的(6除以fan_in加fan_out)。
这里的增益系数根据所使用的激活函数类型来确定。

复制代码
>     eg:nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
>  
>  
>       
>  
>     AI助手
.2. Xavier正态分布

该函数用于对张量进行Xavier系列初始化操作,并接受gain参数,默认值为1。
Xavier初始化方法基于正态分布实现权重参数的赋值。
其中均值设为0,
标准差等于gain乘以sqrt(2除以(fan_in加fan_out))。

.3. kaiming均匀分布

该函数基于Kaiming均匀初始化算法实现权重矩阵的初始化操作。
该算法通过指定参数设置其权重初始化。
其核心参数包括张量对象、均匀分布参数a、工作模式以及所选非线性激活函数类型等要素。
具体而言:
bound = sqrt(6/(1+a²)×fan_in)
其中:
a表示激活函数负半部分的斜率系数
relu类型的激活函数在非线性层中具有零斜率特性
mode选项可选值包括'fan_in'或'fan_out'两种模式:
'fan_in'模式下,在正向传播过程中采用输入通道数计算方差的一致性;
而'fan_out'模式则适用于反向传播过程中的方差一致性计算
非线性激活选项支持relu或leaky_relu,默认情况下选择leaky_relu类型
调用此函数时需明确指定张量对象、工作模式以及非线性激活类型等必要参数

复制代码
    import os
    import torch
    import random
    import numpy as np
    import torch.nn as nn
     
     
     
    def set_seed(seed=1):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
     
    set_seed(1)  # 设置随机种子
     
     
    class MLP(nn.Module):
    def __init__(self, neural_num, layers):
        super(MLP, self).__init__()
        self.linears = nn.ModuleList([nn.Linear(neural_num, neural_num, bias=False) for i in range(layers)])
        self.neural_num = neural_num
     
    def forward(self, x):
        for (i, linear) in enumerate(self.linears):
            x = linear(x)
            x = torch.relu(x)
     
            print("layer:{}, std:{}".format(i, x.std()))
            if torch.isnan(x.std()):
                print("output is nan in {} layers".format(i))
                break
     
        return x
     
    def initialize(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                #kaiming初始化手动
                nn.init.normal_(m.weight.data, std=np.sqrt(2 / self.neural_num))
     
                #kaiming初始化
                # nn.init.kaiming_normal_(m.weight.data)
     
     
    # flag = 0
    flag = 1
     
    if flag:
    layer_nums = 100
    neural_nums = 256
    batch_size = 16
     
    net = MLP(neural_nums, layer_nums)
    net.initialize()
     
    inputs = torch.randn((batch_size, neural_nums))  # normal: mean=0, std=1
     
    output = net(inputs)
    print(output)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
.4. kaiming 正态分布

该层采用均值为零的正态分布初始化权重参数,在概率密度函数中满足N(0, std),其中标准差std计算公式为sqrt(2/(1+alpha²) * fan_in)。
具体而言:

  • alpha表示激活函数负半轴的斜率,默认值设为0(如ReLU激活函数)。
  • 选择mode时:
    • 若设置模式为'fan_in',则在前向传播过程中保证权重初始化带来的方差一致性。
    • 若选择'fan_out'模式,则在反向传播过程中确保方差的一致性。
  • 激活函数选项包括'relu'与'leaky_relu',默认情况下选用leaky_relu激活函数。
    此层权重初始化的具体配置采用以下参数组合:
    nn.init.kaiming_normal_(w, mode设置为fan_out, nonlinearity设为relu)
.5. 均匀初始化分布

torch.nn.init.uniform_(tensor , a=0 , b=1)

使值服从均匀分布U(a,b)

.6. 正态初始化分布

该函数用于使张量的元素遵循均值为mean、标准差为std的正态分布,默认情况下取值范围是0到1。

.7. 常数初始化

torch.nn.init.constant_(tensor , val)

使值为常数val nn.init.constant_(w, 0.3)

.8. 单位矩阵初始化

通过调用torch.nn.init.eye_函数来设置该二维张量的值为单位矩阵。

.9. 正交初始化

该层神经网络权重采用正交初始化方法(Orthogonal Initialization),通过正交初始化方法使神经网络权重保持正交特性,并参考文献:Saxe等(2013)中所提出的理论基础对该算法性能进行验证

.10. 稀疏初始化

该张量按正态分布N(0, std)进行稀疏化处理
该操作会使得该张量中的每一列有一部分元素被置为零
该函数通过指定sparsity参数来控制每一列变为零的比例
使用nn.init.sparse_函数对权重张量w进行初始化操作

了解 model.modules()与 model.children()之间的区别至关重要。首先, **model.modules()**能够逐步访问整个网络架构的所有组件, 这使得它可以全面解析整个模型结构;相比之下, **model.children()**则仅限于查看直接包含在主模型中的组件, 其覆盖范围更为有限

  • 对网络中某一层进行初始化
复制代码
    self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
    init.xavier_uniform(self.conv1.weight)
    init.constant(self.conv1.bias, 0.1)
    
    
      
      
      
    
    AI助手
  • 对网络整体进行初始化
复制代码
    def weights_init(m):
    classname=m.__class__.__name__
    if classname.find('Conv') != -1:
        xavier(m.weight.data)
        xavier(m.bias.data)
    net = Net()#构建网络
    net.apply(weights_init) #apply函数会递归地搜索网络内的所有module并把参数表示的函数应用到所有的module上。  
     #对所有的Conv层都初始化权重. 
    
    
      
      
      
      
      
      
      
      
    
    AI助手
  • 权重初始化
复制代码
    # Common practise for initialization.
    for layer in model.modules():
    if isinstance(layer, torch.nn.Conv2d):
        torch.nn.init.kaiming_normal_(layer.weight, mode='fan_out',
                                      nonlinearity='relu')
        if layer.bias is not None:
            torch.nn.init.constant_(layer.bias, val=0.0)
    elif isinstance(layer, torch.nn.BatchNorm2d):
        torch.nn.init.constant_(layer.weight, val=1.0)
        torch.nn.init.constant_(layer.bias, val=0.0)
    elif isinstance(layer, torch.nn.Linear):
        torch.nn.init.xavier_normal_(layer.weight)
        if layer.bias is not None:
            torch.nn.init.constant_(layer.bias, val=0.0)
    # Initialization with given tensor.
    layer.weight = torch.nn.Parameter(tensor)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
  • 对指定层进行Finetune
复制代码
    count = 0
    para_optim = []
    for k in model.children():
    count += 1
    # 6 should be changed properly
    if count > 6:
        for param in k.parameters():
            para_optim.append(param)
            else:
                for param in k.parameters():
                    param.requires_grad = False
    optimizer = optim.RMSprop(para_optim, lr)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
  • 对固定部分参数训练
复制代码
    # 只有True的才训练
    optimizer.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3)
    
    class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
    		#前面的参数就是False,而后面的不变
        for p in self.parameters():
            p.requires_grad=False
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
  • 优化
复制代码
    optimizer = optim.Adam([
        {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
         'lr': 2 * args['lr']},
        {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
         'lr': args['lr'], 'weight_decay': args['weight_decay']}
    ], betas=(args['momentum'], 0.999))
    
    
      
      
      
      
      
      
    
    AI助手
  • 加载部分权重
复制代码
    # 获得模型的键值
    keys=[]
    for k,v in desnet.state_dict().items():
    if v.shape:
        keys.append(k)
    print(k,v.shape)  
    # 从预训练文件中加载权重
    state={}
    pretrained_dict = torch.load('/home/lulu/pytorch/Paper_Code/weights/densenet121-a639ec97.pth')
    for i,(k,v) in enumerate(pretrained_dict.items()):
    if 'classifier' not in k:
        state[keys[i]] = v
    # 保存权重
    torch.save(state,'/home/lulu/pytorch/Paper_Code/weights/densenet121.pth')
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手

4. 构建模型

  • Sequential:按照严格的顺序依次处理各网络层
  • ModuleList:遵循重复性的特点,在for循环中实现多层结构的构建
  • ModuleDict:具备可选择性的特点,在特定条件下完成特定功能的操作
.1. nn.Sequential
复制代码
    # ============================ Sequential
    class LeNetSequential(nn.Module):
    def __init__(self, classes):
        super(LeNetSequential, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 6, 5),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(6, 16, 5),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),)
     
        self.classifier = nn.Sequential(
            nn.Linear(16*5*5, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, classes),)
     
    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size()[0], -1)
        x = self.classifier(x)
        return x
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
.2. nn.ModuleList

功能:类似于Python中的list结构,用于包裹多个网络层,并支持迭代方式调用网络层

append():将网络层拼接到modulelist的末尾

extend():将当前模块列表与目标模块列表进行拼接

insert():将网络层插入到modulelist指定的位置

复制代码
    class ModuleList(nn.Module):
    def __init__(self):
        super(ModuleList, self).__init__()
        self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(20)])
    def forward(self, x):
        for i, linear in enumerate(self.linears):
            x = linear(x)
        return x
    net = ModuleList()
    print(net)
    fake_data = torch.ones((10, 10))
    output = net(fake_data)
    print(output)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
.3. nn.ModuleDict

功能遵循Python字典结构封装多个网络层(每个网络层对应一个键)。清除整个模块字典或释放模块字典资源;提供可迭代的键值对集合或关键-值对元组;获取所有键;获取所有值;删除并获取一对键-值对

复制代码
    # ============================ ModuleDict
    class ModuleDict(nn.Module):
    def __init__(self):
        super(ModuleDict, self).__init__()
        self.choices = nn.ModuleDict({
            'conv': nn.Conv2d(10, 10, 3),
            'pool': nn.MaxPool2d(3)
        })
        self.activations = nn.ModuleDict({
            'relu': nn.ReLU(),
            'prelu': nn.PReLU()
        })
    def forward(self, x, choice, act):
        x = self.choices[choice](x)
        x = self.activations[act](x)
        return x
    net = ModuleDict()
    fake_img = torch.randn((4, 10, 32, 32))
    output = net(fake_img, 'conv', 'relu')
    #prelu输出结果有负值,改为relu后输出没有负数,可以检查是不是按照我们的想法运行的
    print(output)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手

5. 使用预训练模型

.0. AlexNet 预训练模型修改
1. 直接使用AlexNet,并添加可视化
复制代码
    import os
    import torch
    import torch.nn
    import torchvision.models as models
    import torchvision.transforms as transforms
    import torch.nn.functional as F 
    import torchvision.utils as utils
    import cv2 
    import matplotlib.pyplot as plt
    import numpy as np 
    from PIL import Image
    import argparse
    
    """
    input commands
    """
    paser = argparse.ArgumentParser()
    paser.add_argument("--test_img", type=str, default='whippet.jpg', help="testing image")
    opt = paser.parse_args()
    
    # function for visualizing the feature maps
    def visualize_activation_maps(input, model):
    I = utils.make_grid(input, nrow=1, normalize=True, scale_each=True)
    img = I.permute((1, 2, 0)).cpu().numpy()
    
    conv_results = []
    x = input
    for idx, operation in enumerate(model.features):
        x = operation(x)
        if idx in {1, 4, 7, 9, 11}:
            conv_results.append(x)
    
    for i in range(5):
        conv_result = conv_results[i]
        N, C, H, W = conv_result.size()
    
        mean_acti_map = torch.mean(conv_result, 1, True)
        mean_acti_map = F.interpolate(mean_acti_map, size=[224,224], mode='bilinear', align_corners=False)
    
        map_grid = utils.make_grid(mean_acti_map, nrow=1, normalize=True, scale_each=True)
        map_grid = map_grid.permute((1, 2, 0)).mul(255).byte().cpu().numpy()
        map_grid = cv2.applyColorMap(map_grid, cv2.COLORMAP_JET)
        map_grid = cv2.cvtColor(map_grid, cv2.COLOR_BGR2RGB)
        map_grid = np.float32(map_grid) / 255
    
        visual_acti_map = 0.6 * img + 0.4 * map_grid
        tensor_visual_acti_map = torch.from_numpy(visual_acti_map).permute(2, 0, 1)
    
        file_name_visual_acti_map = 'conv{}_activation_map.jpg'.format(i+1)
        utils.save_image(tensor_visual_acti_map, file_name_visual_acti_map)
    
    return 0
    
    # main 
    if __name__ == "__main__":
    """
    data transforms, for pre-processing the input testing image before feeding into the net
    """
    data_transforms = transforms.Compose([
        transforms.Resize((224,224)),             # resize the input to 224x224
        transforms.ToTensor(),              # put the input to tensor format
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])  # normalize the input
        # the normalization is based on images from ImageNet
    ])
    
    # obtain the file path of the testing image
    test_image_dir = './alexnet_images'
    test_image_filepath = os.path.join(test_image_dir, opt.test_img)
    #print(test_image_filepath)
    
    # open the testing image
    img = Image.open(test_image_filepath)
    print("original image's shape: " + str(img.size))
    # pre-process the input
    transformed_img = data_transforms(img)
    print("transformed image's shape: " + str(transformed_img.shape))
    # form a batch with only one image
    batch_img = torch.unsqueeze(transformed_img, 0)  #Returns a new tensor with a dimension of size one inserted at the specified position.
    print("image batch's shape: " + str(batch_img.shape))
    
    # load pre-trained AlexNet model
    print("\nfeed the input into the pre-trained alexnet to get the output")
    alexnet = models.alexnet(pretrained=True)
    # put the model to eval mode for testing
    alexnet.eval()
    
    # obtain the output of the model
    output = alexnet(batch_img)
    print("output vector's shape: " + str(output.shape))
    
    # obtain the activation maps
    visualize_activation_maps(batch_img, alexnet)
    
    # map the class no. to the corresponding label
    with open('class_names_ImageNet.txt') as labels:
        classes = [i.strip() for i in labels.readlines()]
    
    # print the first 5 classes to see the labels
    print("\nprint the first 5 classes to see the lables")
    for i in range(5):
        print("class " + str(i) + ": " + str(classes[i]))
    
    # sort the probability vector in descending order
    sorted, indices = torch.sort(output, descending=True)
    percentage = F.softmax(output, dim=1)[0] * 100.0
    # obtain the first 5 classes (with the highest probability) the input belongs to
    results = [(classes[i], percentage[i].item()) for i in indices[0][:5]]
    print("\nprint the first 5 classes the testing image belongs to")
    for i in range(5):
        print('{}: {:.4f}%'.format(results[i][0], results[i][1]))
    
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
2. 修改Alexnet最后一层
复制代码
    import torchvision.models as models
    
    model = models.AlexNet()
    print(model)
    #修改网络的第一个卷积层的输入为4通道,输出的结果预测为10个类别
    model.features[0]=nn.Conv2d(4, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    model.classifier[6] = nn.Linear(4096,10)
    
    print(model)
    
    
      
      
      
      
      
      
      
      
      
    
    AI助手
复制代码
    model = cifar10_cnn.CIFAR10_Nettest()
    pretrained_dict = torch.load('models/cifar10_statedict.pkl')
    model_dict = model.state_dict()
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    
    print(model)
    new_model_dict = model.state_dict()
    dict_name = list(new_model_dict)
    for i, p in enumerate(dict_name):
    print(i, p)
    
    print('before change:\n',new_model_dict['classifier.5.bias'])
    model.classifier[5]=nn.Linear(1024,17)
    
    change_model_dict = model.state_dict()
    new_dict_name = list(change_model_dict)
    print('after change:\n',change_model_dict['classifier.5.bias'])
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
复制代码
    import torch.nn as nn
    from torchvision import models
    
    class BuildAlexNet(nn.Module):
    def __init__(self, model_type, n_output):
        super(BuildAlexNet, self).__init__()
        self.model_type = model_type
        if model_type == 'pre':
            model = models.alexnet(pretrained=True)
            self.features = model.features
            fc1 = nn.Linear(9216, 4096)
            fc1.bias = model.classifier[1].bias
            fc1.weight = model.classifier[1].weight
            
            fc2 = nn.Linear(4096, 4096)
            fc2.bias = model.classifier[4].bias
            fc2.weight = model.classifier[4].weight
            
            self.classifier = nn.Sequential(
                    nn.Dropout(),
                    fc1,
                    nn.ReLU(inplace=True),
                    nn.Dropout(),
                    fc2,
                    nn.ReLU(inplace=True),
                    nn.Linear(4096, n_output))  
            # 
    #            model.classifier[6]==nn.Linear(4096,n_output)
    #            self.classifier = model.classifier
        if model_type == 'new':
            self.features = nn.Sequential(
                    nn.Conv2d(3, 64, 11, 4, 2),
                    nn.ReLU(inplace = True),
                    nn.MaxPool2d(3, 2, 0),
                    nn.Conv2d(64, 192, 5, 1, 2),
                    nn.ReLU(inplace=True),
                    nn.MaxPool2d(3, 2, 0),
                    nn.Conv2d(192, 384, 3, 1, 1),
                    nn.ReLU(inplace = True),
                    nn.Conv2d(384, 256, 3, 1, 1),
                    nn.ReLU(inplace=True),
                    nn.MaxPool2d(3, 2, 0))
            self.classifier = nn.Sequential(
                    nn.Dropout(),
                    nn.Linear(9216, 4096),
                    nn.ReLU(inplace=True),
                    nn.Dropout(),
                    nn.Linear(4096, 4096),
                    nn.ReLU(inplace=True),
                    nn.Linear(4096, n_output))
            
    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)
        out  = self.classifier(x)
        return out
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
复制代码
    import numpy as np
    from torch.autograd import Variable
    import torch
    
    if __name__ == '__main__':
    model_type = 'pre'
    n_output = 10
    alexnet = BuildAlexNet(model_type, n_output)
    print(alexnet)
    
    x = np.random.rand(1,3,224,224)
    x = x.astype(np.float32)
    x_ts = torch.from_numpy(x)
    x_in = Variable(x_ts)
    y = alexnet(x_in)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
.1. ResNet参数修改

ResNet网络的最后几卷积层中的全连接层(FC layer)负责对多达1 个不同类别进行分类处理。在本研究中所使用的数据集仅分为9个类别。

复制代码
    # coding=UTF-8
    import torchvision.models as models
    #调用模型
    model = models.resnet50(pretrained=True)
    #提取fc层中固定的参数
    fc_features = model.fc.in_features
    #修改类别为9
    model.fc = nn.Linear(fc_features, 9)
    
    
      
      
      
      
      
      
      
      
    
    AI助手
.2. 增减卷积层

1、首先需要构建一个基于预训练模型的基础架构(若无此前提,则无法实现微调效果)。
2、接着应移除那些在自身设计中不具备重要性的预训练模型参数。
3、最后需要将筛选出的关键参数导入目标网络进行初始化配置以达成微调目标。

复制代码
    # -*- coding:utf-8 -*-
    #####################
    #建立自己的网络模型net
    #####################
    
    ###然后读出预训练模型参数以resnet152为例,我不是利用程序下载的,我是习惯了下载好存储在文件夹中
    pretrained_dict = torch.load(save_path)
    model_dict = net.state_dict()   #(读出搭建的网络的参数,以便后边更新之后初始化)
    
    ####去除不属于model_dict的键值
    pretrained_dict={ k : v for k, v in pretrained_dict.items() if k in model_dict}
    
    ###更新现有的model_dict的值
    model_dict.update(pretrained_dict)
    
    ##加载模型需要的参数
    net.load_state_dict(model_dict)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
.3. ImageNet计算多层卷积特征
复制代码
    class FeatureExtractor(torch.nn.Module):
    """Helper class to extract several convolution features from the given
    pre-trained model.
    Attributes:
        _model, torch.nn.Module.
        _layers_to_extract, list<str> or set<str>
    Example:
        >>> model = torchvision.models.resnet152(pretrained=True)
        >>> model = torch.nn.Sequential(collections.OrderedDict(
                list(model.named_children())[:-1]))
        >>> conv_representation = FeatureExtractor(
                pretrained_model=model,
                layers_to_extract={'layer1', 'layer2', 'layer3', 'layer4'})(image)
    """
    def __init__(self, pretrained_model, layers_to_extract):
        torch.nn.Module.__init__(self)
        self._model = pretrained_model
        self._model.eval()
        self._layers_to_extract = set(layers_to_extract)
    
    def forward(self, x):
        with torch.no_grad():
            conv_representation = []
            for name, layer in self._model.named_children():
                x = layer(x)
                if name in self._layers_to_extract:
                    conv_representation.append(x)
            return conv_representation
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
4. 训练特定层,冻结其它层

在初始阶段保留模型的部分权重,并对后续的层进行微调以获得新的权重参数。通过反复实验来探索最佳配置,在这个过程中可以通过实验结果确定最适合的冻结层与重训层组合。预训练模型的应用程度主要取决于数据集大小以及新旧数据集间的相似度水平(即预训练的数据集与目标数据集之间的相似性)。为了实现这一目标,请考虑以下策略:通过设置requires_grad=False来锁定网络参数,并筛选出可更新梯度的参数:通过过滤器函数只包含p.requires_grad=True的参数。

复制代码
    #首先自己新定义一个网络
    class CNN(nn.Module):
    def __init__(self, block, layers, num_classes=9): 
        #自己新定义的CNN与继承的ResNet网络结构大体相同,即除了新增层,其他层的层名与ResNet的相同。
    
        self.inplanes = 64 
        super(ResNet, self).__init__() #继承ResNet网络结构
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) 
        self.bn1 = nn.BatchNorm2d(64) 
        self.relu = nn.ReLU(inplace=True) 
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 
        self.layer1 = self._make_layer(block, 64, layers[0]) 
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 
        self.avgpool = nn.AvgPool2d(7, stride=1)
    
        #新增一个反卷积层 
        self.convtranspose1 = nn.ConvTranspose2d(2048, 2048, kernel_size=3, stride=1, padding=1, output_padding=0, groups=1, bias=False, dilation=1) 
    
        #新增一个最大池化层 
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) 
    
        #将原来的fc层改成fclass层 
        self.fclass = nn.Linear(2048, num_classes) #原来的fc层:self.fc = nn.Linear(512 * block.expansion, num_classes)
        for m in self.modules(): #
            if isinstance(m, nn.Conv2d): 
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels #
                m.weight.data.normal_(0, math.sqrt(2. / n)) 
                elif isinstance(m, nn.BatchNorm2d): 
                    m.weight.data.fill_(1) 
                    m.bias.data.zero_() 
                    def _make_layer(self, block, planes, blocks, stride=1): 
                        downsample = None 
                        if stride != 1 or self.inplanes != planes * block.expansion: 
                            downsample = nn.Sequential( 
                                nn.Conv2d(self.inplanes, planes * block.expansion, 
                                          kernel_size=1, stride=stride, bias=False), 
                                nn.BatchNorm2d(planes * block.expansion), 
                            ) 
                            layers = [ ] 
                            layers.append(block(self.inplanes, planes, stride, downsample)) 
                            self.inplanes = planes * block.expansion 
                            for i in range(1, blocks): 
                                layers.append(block(self.inplanes, planes)) 
                                return nn.Sequential(*layers) 
                            def forward(self, x): 
                                x = self.conv1(x) 
                                x = self.bn1(x) 
                                x = self.relu(x) 
                                x = self.maxpool(x) 
                                x = self.layer1(x) 
                                x = self.layer2(x) 
                                x = self.layer3(x) 
                                x = self.layer4(x) 
                                x = self.avgpool(x) 
                                #3个新加层的forward 
                                x = x.view(x.size(0), -1) 
    
                                #因为接下来的self.convtranspose1层的输入通道是2048
                                x = self.convtranspose1(x) 
                                x = self.maxpool2(x) 
                                x = x.view(x.size(0), -1)  
    
                                #因为接下来的self.fclass层的输入通道是2048 
                                x = self.fclass(x) 
                                return x
                            #加载model 
                            resnet50 = models.resnet50(pretrained=True) 
                            cnn = CNN(Bottleneck, [3, 4, 6, 3]) #创建一个自己新定义的网络对象cnn。
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
复制代码
    #微调全连接层
    model = torchvision.models.resnet18(pretrained=True)
    for param in model.parameters():
    param.requires_grad = False
    model.fc = nn.Linear(512, 100)  # Replace the last fc layer
    optimizer = torch.optim.SGD(model.fc.parameters(), lr=1e-2, momentum=0.9, weight_decay=1e-4)
    
    #以较大的学习率微调全连接层,较小的学习率微调卷积层
    model = torchvision.models.resnet18(pretrained=True)
    finetuned_parameters = list(map(id, model.fc.parameters()))
    conv_parameters = (p for p in model.parameters() if id(p) not in finetuned_parameters)
    parameters = [{'params': conv_parameters, 'lr': 1e-3}, 
              {'params': model.fc.parameters()}]
    optimizer = torch.optim.SGD(parameters, lr=1e-2, momentum=0.9, weight_decay=1e-4)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手
.5. keypointrcnn_resnet50_fpn 模型使用
复制代码
    import torch
    import torchvision
    import torch.nn as nn
    def get_model(num_kpts,train_kptHead=False,train_fpn=True):
    is_available = torch.cuda.is_available()
    device =torch.device('cuda:0' if is_available else 'cpu')
    dtype = torch.cuda.FloatTensor if is_available else torch.FloatTensor
    model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)
    
    for i,param in enumerate(model.parameters()):
        param.requires_grad = False
        
    if train_kptHead!=False:
      for i, param in enumerate(model.roi_heads.keypoint_head.parameters()):
          if i/2>=model.roi_heads.keypoint_head.__len__()/2-train_kptHead:
            param.requires_grad = True
    
    if train_fpn==True:
      for param in model.backbone.fpn.parameters():
        param.requires_grad = True
    
    out = nn.ConvTranspose2d(512, num_kpts, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
    model.roi_heads.keypoint_predictor.kps_score_lowres = out
    
    return model, device, dtype
    #model, device, dtype=get_model(2)
    
    
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
      
    
    AI助手

全部评论 (0)

还没有任何评论哟~