文章目录[隐藏]
NMS都不会,做什么Detection!
Non-maximum suppression(非极大值抑制)算法
NMS原理:
-
首先得出所有的预测框集合
B
、 对应框的得分Scores
, NMS(IoU)阈值T
; -
定义存放侯选框的集合
H
(初始为Null
), 对Scores
排序选出得分最大的框为maxBox
,
将maxBox
从集合B
中移到集合H中,集合B
中没有maxBox
框了; -
计算
maxBox
和B
中剩余的所有框的IoU, 将IoU大于T
的从B
中删除(认为和maxBox
重叠了); -
重复2~3步骤,直到集合
B
为Null
, 集合H中存放的框就是NMS处理的结果;重复步骤是:
(1)对集合B中剩余框对应的得分进行排序, 选出最大得分的框maxBox,并从集合B中移到集合H中。(2) 计算这个得分最大的框maxBox和集合B中框的IoU阈值,将大于IoU阈值的框从B中删除。
NMS代码实现
1. Pytorch代码实现
from torch import Tensor
import torch
def box_area(boxes: Tensor) -> Tensor:
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2) coordinates.
Arguments:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format
Returns:
area (Tensor[N]): area for each box
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
boxes1 (Tensor[N, 4])
boxes2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
"""
area1 = box_area(boxes1) # 每个框的面积 (N,)
area2 = box_area(boxes2) # (M,)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] # N中一个和M个比较; 所以由N,M 个
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2] #小于0的为0 clamp 钳;夹钳;
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou # NxM, boxes1中每个框和boxes2中每个框的IoU值;
def nms(boxes: Tensor, scores: Tensor, iou_threshold: float):
"""
:param boxes: [N, 4], 此处传进来的框,是经过筛选(NMS之前选取过得分TopK)之后, 在传入之前处理好的;
:param scores: [N]
:param iou_threshold: 0.7
:return:
"""
keep = [] # 最终保留的结果, 在boxes中对应的索引;
idxs = scores.argsort() # 值从小到大的 索引
while idxs.numel() > 0: # 循环直到null; numel(): 数组元素个数
# 得分最大框对应的索引, 以及对应的坐标
max_score_index = idxs[-1]
max_score_box = boxes[max_score_index][None, :] # [1, 4]
keep.append(max_score_index)
if idxs.size(0) == 1: # 就剩余一个框了;
break
idxs = idxs[:-1] # 将得分最大框 从索引中删除; 剩余索引对应的框 和 得分最大框 计算IoU;
other_boxes = boxes[idxs] # [?, 4]
ious = box_iou(max_score_box, other_boxes) # 一个框和其余框比较 1XM
idxs = idxs[ious[0] <= iou_threshold]
keep = idxs.new(keep) # Tensor
return keep
2. Pytorch代码实现
import torch
def nms(boxes, scores, overlap=0.7, top_k=200):
"""
输入:
boxes: 存储一个图片的所有预测框。[num_positive,4].
scores:置信度。如果为多分类则需要将nms函数套在一个循环内。[num_positive].
overlap: nms抑制时iou的阈值.
top_k: 先选取置信度前top_k个框再进行nms.
返回:
nms后剩余预测框的索引.
"""
keep = scores.new(scores.size(0)).zero_().long()
# 保存留下来的box的索引 [num_positive]
# 函数new(): 构建一个有相同数据类型的tensor
# 如果输入box为空则返回空Tensor
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0] # x1 坐标
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1) # 并行化计算所有框的面积
v, idx = scores.sort(0) # 升序排序
idx = idx[-top_k:] # 前top-k的索引,从小到大
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new() # new() 无参数,创建 相同类型的空值;
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
count = 0
while idx.numel() > 0:
i = idx[-1] # 目前最大score对应的索引 # 选取得分最大的框索引;
keep[count] = i # 存储在keep中
count += 1
if idx.size(0) == 1: # 跳出循环条件:box被筛选完了
break
idx = idx[:-1] # 去掉最后一个
# 剩下boxes的信息存储在xx,yy中
torch.index_select(x1, 0, idx, out=xx1) # 从x1中再维度0选取索引为idx 数据 输出到xx1中;
torch.index_select(y1, 0, idx, out=yy1) # torch.index_select() # 从tensor中按指定维度和索引 取值;
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# 计算当前最大置信框与其他剩余框的交集,不知道clamp的同学确实容易被误导
xx1 = torch.clamp(xx1, min=x1[i]) # max(x1,xx1) # x1 y1 的最大值
yy1 = torch.clamp(yy1, min=y1[i]) # max(y1,yy1)
xx2 = torch.clamp(xx2, max=x2[i]) # min(x2,xx2) # x2 x3 最小值;
yy2 = torch.clamp(yy2, max=y2[i]) # min(y2,yy2)
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1 # w=min(x2,xx2)−max(x1,xx1)
h = yy2 - yy1 # h=min(y2,yy2)−max(y1,yy1)
w = torch.clamp(w, min=0.0) # max(w,0)
h = torch.clamp(h, min=0.0) # max(h,0)
inter = w * h
# 计算当前最大置信框与其他剩余框的IOU
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # 剩余的框的面积
union = rem_areas + area[i] - inter # 并集
IoU = inter / union # 计算iou
# 选出IoU <= overlap的boxes(注意le函数的使用)
idx = idx[IoU.le(overlap)] # le: 小于等于 返回的bool , 去除大于overlap的值;
return keep, count
参考自:链接
3. Numpy代码实现
import numpy as np
from numpy import array
def box_area(boxes :array):
"""
:param boxes: [N, 4]
:return: [N]
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def box_iou(box1 :array, box2: array):
"""
:param box1: [N, 4]
:param box2: [M, 4]
:return: [N, M]
"""
area1 = box_area(box1) # N
area2 = box_area(box2) # M
# broadcasting, 两个数组各维度大小 从后往前对比一致, 或者 有一维度值为1;
lt = np.maximum(box1[:, np.newaxis, :2], box2[:, :2])
rb = np.minimum(box1[:, np.newaxis, 2:], box2[:, 2:])
wh = rb - lt
wh = np.maximum(0, wh) # [N, M, 2]
inter = wh[:, :, 0] * wh[:, :, 1]
iou = inter / (area1[:, np.newaxis] + area2 - inter)
return iou # NxM
def numpy_nms(boxes :array, scores :array, iou_threshold :float):
idxs = scores.argsort() # 按分数 降序排列的索引 [N]
keep = []
while idxs.size > 0: # 统计数组中元素的个数
max_score_index = idxs[-1]
max_score_box = boxes[max_score_index][None, :]
keep.append(max_score_index)
if idxs.size == 1:
break
idxs = idxs[:-1] # 将得分最大框 从索引中删除; 剩余索引对应的框 和 得分最大框 计算IoU;
other_boxes = boxes[idxs] # [?, 4]
ious = box_iou(max_score_box, other_boxes) # 一个框和其余框比较 1XM
idxs = idxs[ious[0] <= iou_threshold]
keep = np.array(keep) # Tensor
return keep
Soft NMS原理和代码
Soft-NMS的原理
原理步骤:
-
首先得出所有的预测框集合
B
、 对应框的得分Scores
, NMS(IoU)阈值T
; -
定义存放侯选框的集合
H
(初始为Null
),对Scores
排序选出得分最大的框为maxBox
, 将maxBox
从集合B
中移到集合H中,集合B
中没有maxBox
框了; -
计算
maxBox
和B
中剩余的所有框的IoU, 将IoU大于T
的框的得分 按某种方式 降低(不删除了), -
重复2~3步骤,直到集合
B
为Null
, 集合H中存放的框就是Soft-NMS处理的结果;
重复步骤是:
(1)对集合B中剩余框对应的得分进行排序(因为分数变化,必须排序
), 选出最大得分的框maxBox,并从集合B中移到集合H中。(2) 计算这个得分最大的框maxBox和集合B中框的IoU阈值,将大于IoU阈值的框对应的得分降低。
-
Soft-NMS返回的结果是 框以及框对应的得分(得分是Soft-NMS抑制后的),说白了,就是抑制了框对应的得分, 使用时需要一个得分阈值。
Soft-NMS权重函数的形式
M
M
M表示得分最大框,
b
i
b_i
bi是除去得分最大框后剩余的每个框。
原来的NMS可以描述如下:将IoU大于阈值的窗口的得分全部置为0。
s
i
=
{
s
i
,
i
o
u
(
M
,
b
i
)
<
T
0
,
i
o
u
(
M
,
b
i
)
>
=
T
s_i = \left\{ \begin{array}{lr}s_i, iou(M,b_i) < T \\ 0, iou(M,b_i) >= T \end{array} \right.
si={si,iou(M,bi)<T0,iou(M,bi)>=T
(1) 线性加权抑制得分
s
i
=
{
s
i
,
i
o
u
(
M
,
b
i
)
<
T
s
i
(
1
−
i
o
u
(
M
,
b
i
)
)
,
i
o
u
(
M
,
b
i
)
>
=
T
s_i = \left\{ \begin{array}{lr}s_i, iou(M,b_i) < T \\ s_i(1 - iou(M, b_i)), iou(M,b_i) > =T \end{array} \right.
si={si,iou(M,bi)<Tsi(1−iou(M,bi)),iou(M,bi)>=T
(2) 高斯加权抑制得分
s
i
=
s
i
e
−
i
o
u
(
M
,
b
i
)
2
σ
,
∀
b
i
∉
D
s_i = s_ie^{-\frac{iou(M, b_i)^2}{\sigma}}, \forall b_i \notin D
si=sie−σiou(M,bi)2,∀bi∈/D
NMS-Soft 实现代码
Pytorch 代码 未验证 没有合适比较代码【欢迎指正错误】
from torch import Tensor
import torch
def box_area(boxes: Tensor) -> Tensor:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:
area1 = box_area(boxes1) # 每个框的面积 (N,)
area2 = box_area(boxes2) # (M,)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] # N中一个和M个比较; 所以由N,M 个
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2] # 删除面积小于0 不相交的 clamp 钳;夹钳;
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] # 切片的用法 相乘维度减1
iou = inter / (area1[:, None] + area2 - inter)
return iou # NxM, boxes1中每个框和boxes2中每个框的IoU值;
def soft_nms(boxes: Tensor, scores: Tensor, soft_threshold=0.01, iou_threshold=0.7, weight_method=2, sigma=0.5):
"""
:param boxes: [N, 4], 此处传进来的框,是经过筛选(选取的得分TopK)之后的
:param scores: [N]
:param iou_threshold: 0.7
:param soft_threshold soft nms 过滤掉得分太低的框 (手动设置)
:param weight_method 权重方法 1. 线性 2. 高斯
:return:
"""
keep = []
idxs = scores.argsort()
while idxs.numel() > 0: # 循环直到null; numel(): 数组元素个数
# 由于scores得分会改变,所以每次都要重新排序,获取得分最大值
idxs = scores.argsort() # 评分排序
if idxs.size(0) == 1: # 就剩余一个框了;
keep.append(idxs[-1])
break
keep_len = len(keep)
max_score_index = idxs[-(keep_len + 1)]
max_score_box = boxes[max_score_index][None, :] # [1, 4]
idxs = idxs[:-(keep_len + 1)]
other_boxes = boxes[idxs] # [?, 4]
keep.append(max_score_index) # 位置不能边
ious = box_iou(max_score_box, other_boxes) # 一个框和其余框比较 1XM
# Soft NMS 处理, 和 得分最大框 IOU大于阈值的框, 进行得分抑制
if weight_method == 1: # 线性抑制 # 整个过程 只修改分数
ge_threshod_bool = ious[0] >= iou_threshold
ge_threshod_idxs = idxs[ge_threshod_bool]
scores[ge_threshod_idxs] *= (1. - ious[0][ge_threshod_bool]) # 小于IoU阈值的不变
# idxs = idxs[scores[idxs] >= soft_threshold] # 小于soft_threshold删除, 经过抑制后 阈值会越来越小;
elif weight_method == 2: # 高斯抑制, 不管大不大于阈值,都计算权重
scores[idxs] *= torch.exp(-(ious[0] * ious[0]) / sigma) # 权重(0, 1]
# idxs = idxs[scores[idxs] >= soft_threshold]
# else: # NMS
# idxs = idxs[ious[0] <= iou_threshold]
# keep = scores[scores > soft_threshold].int()
keep = idxs.new(keep) # Tensor
keep = keep[scores[keep] > soft_threshold] # 最后处理阈值
boxes = boxes[keep] # 保留下来的框
scores = scores[keep] # soft nms抑制后得分
return boxes, scores
对应代码和测试在NMS_SoftNMS项目,欢迎指正!
版权声明:本文为CSDN博主「挡不住三千问的BlueCat」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/qq_23944915/article/details/115107566
暂无评论