RT-DETR优化:ASF-YOLO提取多尺度特征 | 2023年12月最新成果

2023-12-15 11:37:00

???🚀🚀🚀本文改进:?ASF-YOLO一种新的特征融合网络架构,该网络由两个主要的组件网络组成,可以为小目标分割提供互补的信息:(1)SSFF模块,它结合了来自u;(2)TFE模块,它可以捕获小目标的局部精细细节等

??🚀🚀🚀YOLOv8改进专栏:http://t.csdnimg.cn/hGhVK

学姐带你学习YOLOv8,从入门到创新,轻轻松松搞定科研;

1.ASF-YOLO介绍

论文:?https://arxiv.org/ftp/arxiv/papers/2312/2312.06458.pdf

摘要:提出了一种新的基于注意尺度序列融合的You Only Look Once (YOLO)框架(ASF-YOLO),该框架结合了空间和尺度特征,实现了准确、快速的细胞实例分割。在YOLO分割框架的基础上,采用尺度序列特征融合(SSFF)模块增强网络的多尺度信息提取能力,采用三特征编码器(TPE)模块融合不同尺度的特征映射,增加详细信息。我们进一步引入了通道和位置注意机制(CPAM)来集成SSFF和TPE模块,它们专注于信息通道和空间位置相关的小物体,以提高检测和分割性能。在两个细胞数据集上的实验验证表明,ASFYOLO模型具有显著的分割精度和分割速度。它在 2018 Data Science Bowl数据集上实现了0.91的box mAP, 0.887的mask mAP和47.3 FPS的推理速度,优于最先进的方法。

图3显示了所提出的ASF-YOLO框架的概述,该框架结合了空间和多尺度特征,用于细胞图像实例分割。我们开发了一种新的特征融合网络架构,该网络由两个主要的组件网络组成,可以为小目标分割提供互补的信息:(1)SSFF模块,它结合了来自多尺度图像的全局或高级语义信息;(2)TFE模块,它可以捕获小目标的局部精细细节。结合局部和全局特征信息,可以得到更精确的分割图。我们对从骨干网络中提取的P3、P4和P5的输出特征进行融合。首先,设计SSFF模块,有效融合P3、P4和P5的特征图,这些特征图捕获了覆盖不同细胞类型的各种大小和形状的不同空间尺度。在SSFF中,P3, P4和P5特征图被归一化为相同的大小,上采样,然后堆叠在一起作为3D卷积的输入,以组合多尺度特征。其次,开发了TFE模块,通过在空间维度上拼接大、中、小三种不同尺寸的特征来捕获小目标的详细信息,增强对密集细胞的小目标检测。然后将TFE模块的详细特征信息通过PANet结构整合到各个特征分支中,再与SSFF模块的多尺度信息结合到P3分支中。我们进一步在P3分支中引入通道和位置注意机制(CPAM),以利用高级多尺度特征和详细特征。CPAM中的通道注意机制和位置注意机制可以分别捕获信息通道和细化与细胞等小物体相关的空间定位,从而提高CPAM的检测和分割精度。?

2.ASF-YOLO加入到RT-DETR

2.1新建ultralytics/nn/block/ASF_YOLO.py

import torch
import torch.nn as nn
import torch.nn.functional as F
import math

from ..modules.conv import Conv


class Zoom_cat(nn.Module):
    def __init__(self):
        super().__init__()
        # self.conv_l_post_down = Conv(in_dim, 2*in_dim, 3, 1, 1)

    def forward(self, x):
        """l,m,s表示大中小三个尺度,最终会被整合到m这个尺度上"""
        l, m, s = x[0], x[1], x[2]
        tgt_size = m.shape[2:]
        l = F.adaptive_max_pool2d(l, tgt_size) + F.adaptive_avg_pool2d(l, tgt_size)
        # l = self.conv_l_post_down(l)
        # m = self.conv_m(m)
        # s = self.conv_s_pre_up(s)
        s = F.interpolate(s, m.shape[2:], mode='nearest')
        # s = self.conv_s_post_up(s)
        lms = torch.cat([l, m, s], dim=1)
        return lms

class ScalSeq(nn.Module):
    def __init__(self, inc, channel):
        super(ScalSeq, self).__init__()
        if channel != inc[0]:
            self.conv0 = Conv(inc[0], channel,1)
        self.conv1 =  Conv(inc[1], channel,1)
        self.conv2 =  Conv(inc[2], channel,1)
        self.conv3d = nn.Conv3d(channel,channel,kernel_size=(1,1,1))
        self.bn = nn.BatchNorm3d(channel)
        self.act = nn.LeakyReLU(0.1)
        self.pool_3d = nn.MaxPool3d(kernel_size=(3,1,1))

    def forward(self, x):
        p3, p4, p5 = x[0],x[1],x[2]
        if hasattr(self, 'conv0'):
            p3 = self.conv0(p3)
        p4_2 = self.conv1(p4)
        p4_2 = F.interpolate(p4_2, p3.size()[2:], mode='nearest')
        p5_2 = self.conv2(p5)
        p5_2 = F.interpolate(p5_2, p3.size()[2:], mode='nearest')
        p3_3d = torch.unsqueeze(p3, -3)
        p4_3d = torch.unsqueeze(p4_2, -3)
        p5_3d = torch.unsqueeze(p5_2, -3)
        combine = torch.cat([p3_3d, p4_3d, p5_3d],dim = 2)
        conv_3d = self.conv3d(combine)
        bn = self.bn(conv_3d)
        act = self.act(bn)
        x = self.pool_3d(act)
        x = torch.squeeze(x, 2)
        return x


class Add(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        return torch.sum(torch.stack(x, dim=0), dim=0)


class channel_att(nn.Module):
    def __init__(self, channel, b=1, gamma=2):
        super(channel_att, self).__init__()
        kernel_size = int(abs((math.log(channel, 2) + b) / gamma))
        kernel_size = kernel_size if kernel_size % 2 else kernel_size + 1

        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        y = self.avg_pool(x)
        y = y.squeeze(-1)
        y = y.transpose(-1, -2)
        y = self.conv(y).transpose(-1, -2).unsqueeze(-1)
        y = self.sigmoid(y)
        return x * y.expand_as(x)


class local_att(nn.Module):
    def __init__(self, channel, reduction=16):
        super(local_att, self).__init__()

        self.conv_1x1 = nn.Conv2d(in_channels=channel, out_channels=channel // reduction, kernel_size=1, stride=1,
                                  bias=False)

        self.relu = nn.ReLU()
        self.bn = nn.BatchNorm2d(channel // reduction)

        self.F_h = nn.Conv2d(in_channels=channel // reduction, out_channels=channel, kernel_size=1, stride=1,
                             bias=False)
        self.F_w = nn.Conv2d(in_channels=channel // reduction, out_channels=channel, kernel_size=1, stride=1,
                             bias=False)

        self.sigmoid_h = nn.Sigmoid()
        self.sigmoid_w = nn.Sigmoid()

    def forward(self, x):
        _, _, h, w = x.size()

        x_h = torch.mean(x, dim=3, keepdim=True).permute(0, 1, 3, 2)
        x_w = torch.mean(x, dim=2, keepdim=True)

        x_cat_conv_relu = self.relu(self.bn(self.conv_1x1(torch.cat((x_h, x_w), 3))))

        x_cat_conv_split_h, x_cat_conv_split_w = x_cat_conv_relu.split([h, w], 3)

        s_h = self.sigmoid_h(self.F_h(x_cat_conv_split_h.permute(0, 1, 3, 2)))
        s_w = self.sigmoid_w(self.F_w(x_cat_conv_split_w))

        out = x * s_h.expand_as(x) * s_w.expand_as(x)
        return out


class attention_model(nn.Module):
    # Concatenate a list of tensors along dimension
    def __init__(self, ch=256):
        super().__init__()
        self.channel_att = channel_att(ch)
        self.local_att = local_att(ch)

    def forward(self, x):
        input1, input2 = x[0], x[1]
        input1 = self.channel_att(input1)
        x = input1 + input2
        x = self.local_att(x)
        return x

2.2?修改task.py

进行注册

from ultralytics.nn.block.ASF_YOLO import Zoom_cat,ScalSeq,Add

??修改def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)

        ###ASF-YOLO
        elif m is Zoom_cat:
            c2 = sum(ch[x] for x in f)
        elif m is Add:
            c2 = ch[f[-1]]
        elif m is ScalSeq:
            c1 = [ch[x] for x in f]
            c2 = make_divisible(args[0] * width, 8)
            args = [c1, c2]
        ###ASF-YOLO

2.3?rtdetr-l-ASF-YOLO.yaml

# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLOv8-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment

# Parameters
nc: 80  # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will call yolov8-seg.yaml with scale 'n'
  # [depth, width, max_channels]
  n: [0.33, 0.25, 1024]
  s: [0.33, 0.50, 1024]
  m: [0.67, 0.75, 768]
  l: [1.00, 1.00, 512]
  x: [1.00, 1.25, 512]

# ASF-YOLO backbone
backbone:
  # [from, number, module, args]
  - [-1, 1, Conv, [64, 3, 2]]  # 0-P1/2
  - [-1, 1, Conv, [128, 3, 2]]  # 1-P2/4
  - [-1, 3, C2f, [128, True]]
  - [-1, 1, Conv, [256, 3, 2]]  # 3-P3/8
  - [-1, 6, C2f, [256, True]]
  - [-1, 1, Conv, [512, 3, 2]]  # 5-P4/16
  - [-1, 9, C2f, [512, True]]
  - [-1, 1, Conv, [1024, 3, 2]]  # 7-P5/32
  - [-1, 3, C2f, [1024, True]]
  - [-1, 1, SPPF, [1024, 5]]  # 9


# ASF-YOLO head
head:
  - [-1, 1, Conv, [512, 1, 1]] #10
  - [4, 1, Conv, [512, 1, 1]] #11
  - [[-1, 6, -2], 1, Zoom_cat, []]  # 12 cat backbone P4
  - [-1, 3, C2f, [512]]  # 13

  - [-1, 1, Conv, [256, 1, 1]] #14
  - [2, 1, Conv, [256, 1, 1]] #15
  - [[-1, 4, -2], 1, Zoom_cat, []]  #16  cat backbone P3
  - [-1, 3, C2f, [256]]  # 17 (P3/8-small)

  - [-1, 1, Conv, [256, 3, 2]] #18
  - [[-1, 14], 1, Concat, [1]]  #19 cat head P4
  - [-1, 3, C2f, [512]]  # 20 (P4/16-medium)

  - [-1, 1, Conv, [512, 3, 2]] #21
  - [[-1, 10], 1, Concat, [1]]  #22 cat head P5
  - [-1, 3, C2f, [1024]]  # 23 (P5/32-large)

  - [[4, 6, 8], 1, ScalSeq, [256]] #24 args[inchane]
  - [[17, -1], 1, Add, []] #25
  
  - [[25, 20, 23], 1, Detect, [nc]]  # Detect(P3, P4, P5)
  

文章来源:https://blog.csdn.net/CV_20231007/article/details/135009961
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。