python与pytorch中的冒号“:”的用法_Norstc的博客-CSDN博客_pytorch 冒号冒号的作用是按一定标号取list的部分元素给定列表a,那么a[st:ed]表示取标号从st到ed-1的所有元素,即[st,ed)如果没有给定st或者ed就表示没有给的st默认为0,ed默认为len(a)即a[st:]表示取从st开始的所有元素;a[:ed]表示从0取到第ed-1个元素...https://blog.csdn.net/a493823882/article/details/120126949
d2l.set_figsize()
return d2l.plt.Rectangle( xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0], height=bbox[3]-bbox[1], fill=False, edgecolor=color, linewidth=2)
坐上的xy 高宽
fig = d2l.plt.imshow(img)
fig.axes.add_patch(bbox_to_rect(dog_bbox, 'blue'))
fig.axes.add_patch(bbox_to_rect(cat_bbox, 'red'));
4. Q&A
Q:有哪些标注软件/平台推荐?
A:弹幕上说的:Labeling,Makesense,Vott 等。也可以自己标注,使用半监督学习,迁移学习等。
更多QA
哇咔咔负负得正的博客_CSDN博客-CV,Pytorch,数据库领域博主哇咔咔负负得正擅长CV,Pytorch,数据库,等方面的知识https://blog.csdn.net/qq_39906884?type=blog数据集 书上 没有
Pytorch 目标检测和数据集_哇咔咔负负得正的博客-CSDN博客_pytorch 目标识别Pytorch 目标检测和数据集0. 环境介绍环境使用 Kaggle 里免费建立的 Notebook教程使用李沐老师的 动手学深度学习 网站和 视频讲解小技巧:当遇到函数看不懂的时候可以按 Shift+Tab 查看函数详解。1. 目标检测1.1 概述在图像分类任务中,我们假设图像中只有一个主要物体对象,我们只关注如何识别其类别。然而,很多时候图像里有多个我们感兴趣的目标,我们不仅想知道它们的类别,还想得到它们在图像中的具体位置。 在计算机视觉里,我们将这类任务称为目标检测(object dehttps://blog.csdn.net/qq_39906884/article/details/124619593?spm=1001.2014.3001.5502
42 锚框【动手学深度学习v2】_哔哩哔哩_bilibili动手学深度学习 v2 - 从零开始介绍深度学习算法和代码实现课程主页:https://courses.d2l.ai/zh-v2/教材:https://zh-v2.d2l.ai/, 视频播放量 48054、弹幕量 518、点赞数 844、投硬币枚数 643、收藏人数 307、转发人数 31, 视频作者 跟李沐学AI, 作者简介 ,相关视频:44 物体检测算法:R-CNN,SSD,YOLO【动手学深度学习v2】,43 树叶分类竞赛技术总结【动手学深度学习v2】,PyTorch深度学习快速入门教程(绝对通俗易懂!)【小土堆】,程序员才能理解的动图,哈哈哈哈,(强推)李宏毅2021/2022春机器学习课程,日本人工智能又一次刷新了我的认知。。。,1.1Faster RCNN理论合集,人工智能研究生现状,“神经网络”是什么?如何直观理解它的能力极限?它是如何无限逼近真理的?,从“卷积”、到“图像卷积操作”、再到“卷积神经网络”,“卷积”意义的3次改变https://www.bilibili.com/video/BV1aB4y1K7za?p=2&vd_source=eba877d881f216d635d2dfec9dc10379
图片错啦 不是Python np.set_printoptions(2) # 精简输出精度
def multibox_prior(data, sizes, ratios): """生成以每个像素为中心具有不同形状的锚框""" in_height, in_width = data.shape[-2:] device, num_sizes, num_ratios = data.device, len(sizes), len(ratios) boxes_per_pixel = (num_sizes + num_ratios - 1) size_tensor = torch.tensor(sizes, device=device) ratio_tensor = torch.tensor(ratios, device=device) # 为了将锚点移动到像素的中心,需要设置偏移量。 # 因为一个像素的的高为1且宽为1,我们选择偏移我们的中心0.5 offset_h, offset_w = 0.5, 0.5 steps_h = 1.0 / in_height # 在y轴上缩放步长 steps_w = 1.0 / in_width # 在x轴上缩放步长 # 生成锚框的所有中心点 center_h = (torch.arange(in_height, device=device) + offset_h) * steps_h center_w = (torch.arange(in_width, device=device) + offset_w) * steps_w shift_y, shift_x = torch.meshgrid(center_h, center_w) shift_y, shift_x = shift_y.reshape(-1), shift_x.reshape(-1) # 生成“boxes_per_pixel”个高和宽, # 之后用于创建锚框的四角坐标(xmin,xmax,ymin,ymax) w = torch.cat((size_tensor * torch.sqrt(ratio_tensor[0]), sizes[0] * torch.sqrt(ratio_tensor[1:])))\ * in_height / in_width # 处理矩形输入 h = torch.cat((size_tensor / torch.sqrt(ratio_tensor[0]), sizes[0] / torch.sqrt(ratio_tensor[1:]))) # 除以2来获得半高和半宽 anchor_manipulations = torch.stack((-w, -h, w, h)).T.repeat( in_height * in_width, 1) / 2 # 每个中心点都将有“boxes_per_pixel”个锚框, # 所以生成含所有锚框中心的网格,重复了“boxes_per_pixel”次 out_grid = torch.stack([shift_x, shift_y, shift_x, shift_y], dim=1).repeat_interleave(boxes_per_pixel, dim=0) output = out_grid + anchor_manipulations return output.unsqueeze(0)
img = d2l.plt.imread('../img/catdog.jpg') h, w = img.shape[:2] print(h, w) X = torch.rand(size=(1, 3, h, w)) 批量大小1 rgb无所谓 Y = multibox_prior(X, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5]) 磨框是百分之75 50 25 1:1 2:1 1:2 Y.shape561 728图片 十几万像素
批量1 204两百万个磨框 4是每个磨框的位置
boxes = Y.reshape(h, w, 5, 4) 生产五个这样子的磨框 3+3-1 boxes[250, 250, 0, :] :是坐标
def show_bboxes(axes, bboxes, labels=None, colors=None): """显示所有边界框""" def _make_list(obj, default_values=None): if obj is None: obj = default_values elif not isinstance(obj, (list, tuple)): obj = [obj] return obj labels = _make_list(labels) colors = _make_list(colors, ['b', 'g', 'r', 'm', 'c']) for i, bbox in enumerate(bboxes): color = colors[i % len(colors)] rect = d2l.bbox_to_rect(bbox.detach().numpy(), color) axes.add_patch(rect) if labels and len(labels) > i: text_color = 'k' if color == 'w' else 'w' axes.text(rect.xy[0], rect.xy[1], labels[i], va='center', ha='center', fontsize=9, color=text_color, bbox=dict(facecolor=color, lw=0))
250250像素为中心的所有磨框画出来
def box_iou(boxes1, boxes2): """计算两个锚框或边界框列表中成对的交并比""" box_area = lambda boxes: ((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])) # boxes1,boxes2,areas1,areas2的形状: # boxes1:(boxes1的数量,4), # boxes2:(boxes2的数量,4), # areas1:(boxes1的数量,), # areas2:(boxes2的数量,) areas1 = box_area(boxes1) areas2 = box_area(boxes2) # inter_upperlefts,inter_lowerrights,inters的形状: # (boxes1的数量,boxes2的数量,2) inter_upperlefts = torch.max(boxes1[:, None, :2], boxes2[:, :2]) inter_lowerrights = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) inters = (inter_lowerrights - inter_upperlefts).clamp(min=0) # inter_areasandunion_areas的形状:(boxes1的数量,boxes2的数量) inter_areas = inters[:, :, 0] * inters[:, :, 1] union_areas = areas1[:, None] + areas2 - inter_areas return inter_areas / union_areas
def assign_anchor_to_bbox(ground_truth, anchors, device, iou_threshold=0.5): """将最接近的真实边界框分配给锚框""" num_anchors, num_gt_boxes = anchors.shape[0], ground_truth.shape[0] # 位于第i行和第j列的元素x_ij是锚框i和真实边界框j的IoU jaccard = box_iou(anchors, ground_truth) # 对于每个锚框,分配的真实边界框的张量 anchors_bbox_map = torch.full((num_anchors,), -1, dtype=torch.long, device=device) # 根据阈值,决定是否分配真实边界框 max_ious, indices = torch.max(jaccard, dim=1) anc_i = torch.nonzero(max_ious >= 0.5).reshape(-1) box_j = indices[max_ious >= 0.5] anchors_bbox_map[anc_i] = box_j col_discard = torch.full((num_anchors,), -1) row_discard = torch.full((num_gt_boxes,), -1) for _ in range(num_gt_boxes): max_idx = torch.argmax(jaccard) box_idx = (max_idx % num_gt_boxes).long() anc_idx = (max_idx / num_gt_boxes).long() anchors_bbox_map[anc_idx] = box_idx jaccard[:, box_idx] = col_discard jaccard[anc_idx, :] = row_discard return anchors_bbox_map
#@save def offset_boxes(anchors, assigned_bb, eps=1e-6): """对锚框偏移量的转换""" c_anc = d2l.box_corner_to_center(anchors) c_assigned_bb = d2l.box_corner_to_center(assigned_bb) offset_xy = 10 * (c_assigned_bb[:, :2] - c_anc[:, :2]) / c_anc[:, 2:] offset_wh = 5 * torch.log(eps + c_assigned_bb[:, 2:] / c_anc[:, 2:]) offset = torch.cat([offset_xy, offset_wh], axis=1) return offset
ground_truth = torch.tensor([[0, 0.1, 0.08, 0.52, 0.92], [1, 0.55, 0.2, 0.9, 0.88]]) anchors = torch.tensor([[0, 0.1, 0.2, 0.3], [0.15, 0.2, 0.4, 0.4], [0.63, 0.05, 0.88, 0.98], [0.66, 0.45, 0.8, 0.8], [0.57, 0.3, 0.92, 0.9]]) fig = d2l.plt.imshow(img) show_bboxes(fig.axes, ground_truth[:, 1:] * bbox_scale, ['dog', 'cat'], 'k') show_bboxes(fig.axes, anchors * bbox_scale, ['0', '1', '2', '3', '4']);
multibox_target(anchors, labels):
labels[2] 表示labelstensor([[0, 1, 2, 0, 2]])每个磨框的类别labels[1] 表示masktensor([[0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 1., 1., 1., 1.]])磨框个数撑4
labels[0] 表示偏移tensor([[-0.00e+00, -0.00e+00, -0.00e+00, -0.00e+00, 1.40e+00, 1.00e+01, 2.59e+00, 7.18e+00, -1.20e+00, 2.69e-01, 1.68e+00, -1.57e+00, -0.00e+00, -0.00e+00, -0.00e+00, -0.00e+00, -5.71e-01, -1.00e+00, 4.17e-06, 6.26e-01]])
def nms(boxes, scores, iou_threshold): """对预测边界框的置信度进行排序""" B = torch.argsort(scores, dim=-1, descending=True) keep = [] # 保留预测边界框的指标 while B.numel() > 0: i = B[0] keep.append(i) if B.numel() == 1: break iou = box_iou(boxes[i, :].reshape(-1, 4), boxes[B[1:], :].reshape(-1, 4)).reshape(-1) inds = torch.nonzero(iou <= iou_threshold).reshape(-1) B = B[inds + 1] return torch.tensor(keep, device=boxes.device)
def multibox_detection(cls_probs, offset_preds, anchors, nms_threshold=0.5, pos_threshold=0.009999999): """使用非极大值抑制来预测边界框""" device, batch_size = cls_probs.device, cls_probs.shape[0] anchors = anchors.squeeze(0) num_classes, num_anchors = cls_probs.shape[1], cls_probs.shape[2] out = [] for i in range(batch_size): cls_prob, offset_pred = cls_probs[i], offset_preds[i].reshape(-1, 4) conf, class_id = torch.max(cls_prob[1:], 0) predicted_bb = offset_inverse(anchors, offset_pred) keep = nms(predicted_bb, conf, nms_threshold) # 找到所有的non_keep索引,并将类设置为背景 all_idx = torch.arange(num_anchors, dtype=torch.long, device=device) combined = torch.cat((keep, all_idx)) uniques, counts = combined.unique(return_counts=True) non_keep = uniques[counts == 1] all_id_sorted = torch.cat((keep, non_keep)) class_id[non_keep] = -1 class_id = class_id[all_id_sorted] conf, predicted_bb = conf[all_id_sorted], predicted_bb[all_id_sorted] # pos_threshold是一个用于非背景预测的阈值 below_min_idx = (conf < pos_threshold) class_id[below_min_idx] = -1 conf[below_min_idx] = 1 - conf[below_min_idx] pred_info = torch.cat((class_id.unsqueeze(1), conf.unsqueeze(1), predicted_bb), dim=1) out.append(pred_info) return torch.stack(out)
anchors = torch.tensor([[0.1, 0.08, 0.52, 0.92], [0.08, 0.2, 0.56, 0.95], [0.15, 0.3, 0.62, 0.91], [0.55, 0.2, 0.9, 0.88]]) offset_preds = torch.tensor([0] * anchors.numel()) cls_probs = torch.tensor([[0] * 4, # 背景的预测概率 [0.9, 0.8, 0.7, 0.1], # 狗的预测概率 [0.1, 0.2, 0.3, 0.9]]) # 猫的预测概率fig = d2l.plt.imshow(img) show_bboxes(fig.axes, anchors * bbox_scale, ['dog=0.9', 'dog=0.8', 'dog=0.7', 'cat=0.9'])
文章来源:https://www.toymoban.com/news/detail-407314.html
fig = d2l.plt.imshow(img) for i in output[0].detach().numpy(): if i[0] == -1: continue label = ('dog=', 'cat=')[int(i[0])] + str(i[1]) show_bboxes(fig.axes, [torch.tensor(i[2:]) * bbox_scale], label)
文章来源地址https://www.toymoban.com/news/detail-407314.html
到了这里,关于冒号用法 视频41 13.3. 目标检测和边界框 QA 13.4锚框的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!