Advertisement

基于目标检测模型实现遥感图像检测

阅读量:

在我们的实际业务场景中,在线教育平台中的课程资源建设同样扮演着重要角色。课程资源类型繁多,在以往的教学实践过程中,课程开发主要集中在系统的开发设计上,在智能化数据分析方面相对较少涉及。为了更好地提升教学效果,在本项目中我们采用了公开可用的遥感数据集开展目标检测实践。

首先看效果:

这里的遥感图像与我之前接触过的有所区别。经处理后呈现为黑白色调。这里主要被用来检测识别过去的船只。

简单看下数据集:

模型cfg文件如下:

复制代码
 [net]

    
 # Training
    
 batch=32
    
 subdivisions=2
    
 width=320
    
 height=320
    
 channels=3
    
 momentum=0.9
    
 decay=0.0005
    
 angle=0
    
 saturation = 1.5
    
 exposure = 1.5
    
 hue=.1
    
  
    
 learning_rate=0.001
    
 burn_in=1000
    
 max_batches = 50200
    
 policy=steps
    
 steps=40000,45000
    
 scales=.1,.1
    
  
    
 [convolutional]
    
 filters=8
    
 size=3
    
 groups=1
    
 stride=2
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=8
    
 size=3
    
 groups=8
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=4
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [convolutional]
    
 filters=24
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=24
    
 size=3
    
 groups=24
    
 stride=2
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=6
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [convolutional]
    
 filters=36
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=36
    
 size=3
    
 groups=36
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=6
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
  
    
  
    
 [convolutional]
    
 filters=36
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=36
    
 size=3
    
 groups=36
    
 stride=2
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=8
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [convolutional]
    
 filters=48
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=48
    
 size=3
    
 groups=48
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=8
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
  
    
  
    
 [convolutional]
    
 filters=48
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=48
    
 size=3
    
 groups=48
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=8
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
 ######################
    
  
    
 [convolutional]
    
 filters=48
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=48
    
 size=3
    
 groups=48
    
 stride=2
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=16
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [convolutional]
    
 filters=96
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=96
    
 size=3
    
 groups=96
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=16
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
  
    
  
    
 [convolutional]
    
 filters=96
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=96
    
 size=3
    
 groups=96
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=16
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
  
    
  
    
 [convolutional]
    
 filters=96
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=96
    
 size=3
    
 groups=96
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=16
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
  
    
  
    
 [convolutional]
    
 filters=96
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=96
    
 size=3
    
 groups=96
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=24
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [convolutional]
    
 filters=144
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=144
    
 size=3
    
 groups=144
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=24
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
  
    
  
    
 [convolutional]
    
 filters=144
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=144
    
 size=3
    
 groups=144
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=24
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
  
    
  
    
 [convolutional]
    
 filters=144
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=144
    
 size=3
    
 groups=144
    
 stride=2
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=40
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [convolutional]
    
 filters=240
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=240
    
 size=3
    
 groups=240
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=40
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
  
    
 [convolutional]
    
 filters=240
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=240
    
 size=3
    
 groups=240
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=40
    
 size=1
    
 groups=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=linear
    
  
    
 [shortcut]
    
 from=-4
    
 activation=linear
    
 ### SPP ###
    
 [maxpool]
    
 stride=1
    
 size=3
    
  
    
 [route]
    
 layers=-2
    
  
    
 [maxpool]
    
 stride=1
    
 size=5
    
  
    
 [route]
    
 layers=-4
    
  
    
 [maxpool]
    
 stride=1
    
 size=9
    
  
    
 [route]
    
 layers=-1,-3,-5,-6
    
 ##############################
    
 [convolutional]
    
 filters=144
    
 size=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=144
    
 size=3
    
 groups=144
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=48
    
 size=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=144
    
 size=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 size=1
    
 stride=1
    
 pad=1
    
 filters=18
    
 activation=linear
    
  
    
  
    
 [yolo]
    
 mask = 3,4,5
    
 anchors = 26, 48,  67, 84,  72,175, 189,126, 137,236, 265,259
    
 classes=1
    
 num=6
    
 jitter=.1
    
 ignore_thresh = .5
    
 truth_thresh = 1
    
 random=0
    
 #################
    
 scale_x_y = 1.1
    
 iou_thresh=0.213
    
 cls_normalizer=1.0
    
 iou_normalizer=0.07
    
 iou_loss=ciou
    
 nms_kind=greedynms
    
 beta_nms=0.6
    
  
    
 ##################################
    
 [route]
    
 layers= 64
    
  
    
 [upsample]
    
 stride=2
    
  
    
 [route]
    
 layers=-1,47
    
 #################################
    
 [convolutional]
    
 filters=40
    
 size=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
  
    
 [convolutional]
    
 filters=96
    
 size=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=96
    
 size=3
    
 groups=96
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=48
    
 size=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 filters=96
    
 size=1
    
 stride=1
    
 pad=1
    
 batch_normalize=1
    
 activation=relu
    
  
    
 [convolutional]
    
 size=1
    
 stride=1
    
 pad=1
    
 filters=18
    
 activation=linear
    
  
    
  
    
 [yolo]
    
 mask = 0,1,2
    
 anchors = 26, 48,  67, 84,  72,175, 189,126, 137,236, 265,259
    
 classes=1
    
 num=6
    
 jitter=.1
    
 ignore_thresh = .5
    
 truth_thresh = 1
    
 random=0
    
 #################
    
 scale_x_y = 1.05
    
 iou_thresh=0.213
    
 cls_normalizer=1.0
    
 iou_normalizer=0.07
    
 iou_loss=ciou
    
 nms_kind=greedynms
    
 beta_nms=0.6

同样有两处需要修改的地方:

【第一处】

【第二处】

主要涉及修改classes和filters这两个关键字段即可。 classes代表你要检测的目标类别数量为1

filters的数值计算公式为: filters=(classes+5)*,在这里就是 (1+5)*3=18

接下来编写yaogan.names和yaogan.data文件用于框架训练需要。

yaogan.names如下所示:

复制代码
    ship

这里是单目标的检测,所以对象清单只有一个ship。

yaogan.data如下所示:

复制代码
 classes= 1

    
 train  = /home/objDet/yaogan/train.txt
    
 valid  = /home/objDet/yaogan/test.txt
    
 names = /home/objDet/yaogan/yaogan.names
    
 backup = /home/objDet/yaogan/model/yolo

执行命令如下所示:

复制代码
 chmod +x darknet

    
 nohup ./darknet detector train yaogan.data yolo.cfg >> yolo.out &

随机选择数据测试结果如下:

这是一个相对简单的实践,在有时间的情况下会将对应场景中的模型整理并上传以便使用

全部评论 (0)

还没有任何评论哟~