物体检测-Faster R-CNN
物体检测-Faster R-CNN
物体检测是计算机视觉中的一个重要的研究领域,在人流检测,行人跟踪,自动驾驶,医学影像等领域有着广泛的应用。不同于简单的图像分类,物体检测旨在对图像中的目标进行精确识别,包括物体的位置和分类,因此能够应用于更多高层视觉处理的场景。例如在自动驾驶领域,需要辨识摄像头拍摄的图像中的车辆、行人、交通指示牌及其位置,以便进一步根据这些数据决定驾驶策略。上一期学习案例中,我们聚焦于YOLO算法,YOLO(You Only Look Once)是一种one-stage物体检测算法,在本期案例中,我们介绍一种two-stage算法——Faster R-CNN,将目标区域检测和类别识别分为两个任务进行物体检测。
注意事项:
-
本案例使用框架**:** Pytorch-1.0.0
-
本案例使用硬件规格**:** 8 vCPU + 64 GiB + 1 x Tesla V100-PCIE-32GB
-
进入运行环境方法:点此链接进入AI Gallery,点击Run in ModelArts按钮进入ModelArts运行环境,如需使用GPU,您可以在ModelArts JupyterLab运行界面右边的工作区进行切换
-
运行代码方法**:** 点击本页面顶部菜单栏的三角形运行按钮或按Ctrl+Enter键 运行每个方块中的代码
-
JupyterLab的详细用法**:** 请参考《ModelAtrs JupyterLab使用指导》
-
碰到问题的解决办法**:** 请参考《ModelAtrs JupyterLab常见问题解决办法》
1.数据准备
首先,我们将需要的代码和数据下载到Notebook。
本案例我们使用PASCAL VOC 2007数据集训练模型,共20个类别的物体。
import os
from modelarts.session import Session
sess = Session()
if sess.region_name == 'cn-north-1':
bucket_path="modelarts-labs/notebook/DL_object_detection_faster/fasterrcnn.tar.gz"
elif sess.region_name == 'cn-north-4':
bucket_path="modelarts-labs-bj4/notebook/DL_object_detection_faster/fasterrcnn.tar.gz"
else:
print("请更换地区到北京一或北京四")
if not os.path.exists('./experiments'):
sess.download_data(bucket_path=bucket_path, path="./fasterrcnn.tar.gz")
if os.path.exists('./fasterrcnn.tar.gz'):
# 解压压缩包
os.system("tar -xf ./fasterrcnn.tar.gz")
# 清理压缩包
os.system("rm -r ./fasterrcnn.tar.gz")
2.安装依赖并引用
!pip install pycocotools==2.0.0
!pip install torchvision==0.4.0
!pip install protobuf==3.9.0
Requirement already satisfied: pycocotools==2.0.0 in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages
[33mYou are using pip version 9.0.1, however version 20.3.3 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Requirement already satisfied: torchvision==0.4.0 in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages
Requirement already satisfied: torch==1.2.0 in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages (from torchvision==0.4.0)
Requirement already satisfied: pillow>=4.1.1 in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages (from torchvision==0.4.0)
Requirement already satisfied: numpy in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages (from torchvision==0.4.0)
Requirement already satisfied: six in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages (from torchvision==0.4.0)
[33mYou are using pip version 9.0.1, however version 20.3.3 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Requirement already satisfied: protobuf==3.9.0 in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages
Requirement already satisfied: setuptools in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages (from protobuf==3.9.0)
Requirement already satisfied: six>=1.9 in /home/ma-user/anaconda3/envs/Pytorch-1.0.0/lib/python3.6/site-packages (from protobuf==3.9.0)
[33mYou are using pip version 9.0.1, however version 20.3.3 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
import tools._init_paths
%matplotlib inline
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorboardX as tb
from datasets.factory import get_imdb
from model.train_val import get_training_roidb, train_net
from model.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, get_output_tb_dir
import roi_data_layer.roidb as rdl_roidb
from roi_data_layer.layer import RoIDataLayer
import utils.timer
import pickle
import torch
import torch.optim as optim
from nets.vgg16 import vgg16
import numpy as np
import os
import sys
import glob
import time
3.神经网络搭建
3.1模型训练超参设置
为了减少训练时间,我们在预训练模型的基础上进行训练。这里,我们使用VGG16作为FasterRCNN的主干网络。
imdb_name = "voc_2007_trainval"
imdbval_name = "voc_2007_test"
# 使用的预训练模型位置
weight = "./data/imagenet_weights/vgg16.pth"
# 训练迭代次数
max_iters = 100
# cfg模型文件位置
cfg_file = './experiments/cfgs/vgg16.yml'
set_cfgs = None
if cfg_file is not None:
cfg_from_file(cfg_file)
if set_cfgs is not None:
cfg_from_list(set_cfgs)
print('Using config:')
print(cfg)
Using config:
{'TRAIN': {'LEARNING_RATE': 0.001, 'MOMENTUM': 0.9, 'WEIGHT_DECAY': 0.0001, 'GAMMA': 0.1, 'STEPSIZE': [30000], 'DISPLAY': 20, 'DOUBLE_BIAS': True, 'TRUNCATED': False, 'BIAS_DECAY': False, 'USE_GT': False, 'ASPECT_GROUPING': False, 'SNAPSHOT_KEPT': 3, 'SUMMARY_INTERVAL': 180, 'SCALES': [600], 'MAX_SIZE': 1000, 'IMS_PER_BATCH': 1, 'BATCH_SIZE': 256, 'FG_FRACTION': 0.25, 'FG_THRESH': 0.5, 'BG_THRESH_HI': 0.5, 'BG_THRESH_LO': 0.0, 'USE_FLIPPED': True, 'BBOX_REG': True, 'BBOX_THRESH': 0.5, 'SNAPSHOT_ITERS': 5000, 'SNAPSHOT_PREFIX': 'vgg16_faster_rcnn', 'BBOX_NORMALIZE_TARGETS': True, 'BBOX_INSIDE_WEIGHTS': [1.0, 1.0, 1.0, 1.0], 'BBOX_NORMALIZE_TARGETS_PRECOMPUTED': True, 'BBOX_NORMALIZE_MEANS': [0.0, 0.0, 0.0, 0.0], 'BBOX_NORMALIZE_STDS': [0.1, 0.1, 0.2, 0.2], 'PROPOSAL_METHOD': 'gt', 'HAS_RPN': True, 'RPN_POSITIVE_OVERLAP': 0.7, 'RPN_NEGATIVE_OVERLAP': 0.3, 'RPN_CLOBBER_POSITIVES': False, 'RPN_FG_FRACTION': 0.5, 'RPN_BATCHSIZE': 256, 'RPN_NMS_THRESH': 0.7, 'RPN_PRE_NMS_TOP_N': 12000, 'RPN_POST_NMS_TOP_N': 2000, 'RPN_BBOX_INSIDE_WEIGHTS': [1.0, 1.0, 1.0, 1.0], 'RPN_POSITIVE_WEIGHT': -1.0, 'USE_ALL_GT': True}, 'TEST': {'SCALES': [600], 'MAX_SIZE': 1000, 'NMS': 0.3, 'SVM': False, 'BBOX_REG': True, 'HAS_RPN': True, 'PROPOSAL_METHOD': 'gt', 'RPN_NMS_THRESH': 0.7, 'RPN_PRE_NMS_TOP_N': 6000, 'RPN_POST_NMS_TOP_N': 300, 'MODE': 'nms', 'RPN_TOP_N': 5000}, 'RESNET': {'MAX_POOL': False, 'FIXED_BLOCKS': 1}, 'MOBILENET': {'REGU_DEPTH': False, 'FIXED_LAYERS': 5, 'WEIGHT_DECAY': 4e-05, 'DEPTH_MULTIPLIER': 1.0}, 'PIXEL_MEANS': array([[[102.9801, 115.9465, 122.7717]]]), 'RNG_SEED': 3, 'ROOT_DIR': '/home/ma-user/work', 'DATA_DIR': '/home/ma-user/work/data', 'MATLAB': 'matlab', 'EXP_DIR': 'vgg16', 'USE_GPU_NMS': True, 'POOLING_MODE': 'align', 'POOLING_SIZE': 7, 'ANCHOR_SCALES': [8, 16, 32], 'ANCHOR_RATIOS': [0.5, 1, 2], 'RPN_CHANNELS': 512}
3.2定义读取数据集函数
数据集的标注格式是PASCAL VOC格式。
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
# 加载数据集
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
# 使用ground truth作为数据集策略
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
3.3设置模型训练参数
np.random.seed(cfg.RNG_SEED)
# 加载训练数据集
imdb, roidb = combined_roidb(imdb_name)
print('{:d} roidb entries'.format(len(roidb)))
# 设置输出路径
output_dir = get_output_dir(imdb,None)
print('Output will be saved to `{:s}`'.format(output_dir))
# 设置日志保存路径
tb_dir = get_output_tb_dir(imdb, None)
print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))
# 加载验证数据集
orgflip = cfg.TRAIN.USE_FLIPPED
cfg.TRAIN.USE_FLIPPED = False
_, valroidb = combined_roidb(imdbval_name)
print('{:d} validation roidb entries'.format(len(valroidb)))
cfg.TRAIN.USE_FLIPPED = orgflip
# 创建backbone网络
# 在案例中使用的是VGG16模型,可以尝试其他不同的模型结构,例如Resnet等
net = vgg16()
Loaded dataset `voc_2007_trainval` for training
Set proposal method: gt
Appending horizontally-flipped training examples...
voc_2007_trainval gt roidb loaded from /home/ma-user/work/data/cache/voc_2007_trainval_gt_roidb.pkl
done
Preparing training data...
done
10022 roidb entries
Output will be saved to `/home/ma-user/work/output/vgg16/voc_2007_trainval/default`
TensorFlow summaries will be saved to `/home/ma-user/work/tensorboard/vgg16/voc_2007_trainval/default`
Loaded dataset `voc_2007_test` for training
Set proposal method: gt
Preparing training data...
voc_2007_test gt roidb loaded from /home/ma-user/work/data/cache/voc_2007_test_gt_roidb.pkl
done
4952 validation roidb entries
from model.train_val import filter_roidb, SolverWrapper
# 对ROI进行筛选,将无效的ROI数据筛选掉
roidb = filter_roidb(roidb)
valroidb = filter_roidb(valroidb)
sw = SolverWrapper(
net,
imdb,
roidb,
valroidb,
output_dir,
tb_dir,
pretrained_model=weight)
print('Solving...')
Filtered 0 roidb entries: 10022 -> 10022
Filtered 0 roidb entries: 4952 -> 4952
Solving...
# 显示所有模型属性
sw.__dict__.keys()
dict_keys(['net', 'imdb', 'roidb', 'valroidb', 'output_dir', 'tbdir', 'tbvaldir', 'pretrained_model'])
# sw.net为主干网络
print(sw.net)
vgg16()
3.4定义神经网络结构
使用PyTorch搭建神经网络。
部分实现细节可以去相应的文件夹查看源码。
# 构建网络结构,模型加入ROI数据层
sw.data_layer = RoIDataLayer(sw.roidb, sw.imdb.num_classes)
sw.data_layer_val = RoIDataLayer(sw.valroidb, sw.imdb.num_classes, random=True)
# 构建网络结构,在VGG16基础上加入ROI和Classifier部分
lr, train_op = sw.construct_graph()
# 加载之前的snapshot
lsf, nfiles, sfiles = sw.find_previous()
# snapshot 为训练提供了断点训练,如果有snapshot将加载进来,继续训练
if lsf == 0:
lr, last_snapshot_iter, stepsizes, np_paths, ss_paths = sw.initialize()
else:
lr, last_snapshot_iter, stepsizes, np_paths, ss_paths = sw.restore(str(sfiles[-1]), str(nfiles[-1]))
iter = last_snapshot_iter + 1
last_summary_time = time.time()
# 在之前的训练基础上继续进行训练
stepsizes.append(max_iters)
stepsizes.reverse()
next_stepsize = stepsizes.pop()
# 将net切换成训练模式
print("网络结构:")
sw.net.train()
sw.net.to(sw.net._device)
Restoring model snapshots from /home/ma-user/work/output/vgg16/voc_2007_trainval/default/vgg16_faster_rcnn_iter_100.pth
Restored.
网络结构:
vgg16(
(vgg): VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace=True)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace=True)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace=True)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
)
)
(rpn_net): Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
(rpn_cls_score_net): Conv2d(512, 18, kernel_size=[1, 1], stride=(1, 1))
(rpn_bbox_pred_net): Conv2d(512, 36, kernel_size=[1, 1], stride=(1, 1))
(cls_score_net): Linear(in_features=4096, out_features=21, bias=True)
(bbox_pred_net): Linear(in_features=4096, out_features=84, bias=True)
)
3.5开始训练
while iter < max_iters + 1:
if iter == next_stepsize + 1:
# 加入snapshot节点
sw.snapshot(iter)
lr *= cfg.TRAIN.GAMMA
scale_lr(sw.optimizer, cfg.TRAIN.GAMMA)
next_stepsize = stepsizes.pop()
utils.timer.timer.tic()
# 数据通过ROI数据层,进行前向计算
blobs = sw.data_layer.forward()
now = time.time()
if iter == 1 or now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL:
# 计算loss函数
# 根据loss函数对模型进行训练
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss, summary = \
sw.net.train_step_with_summary(blobs, sw.optimizer)
for _sum in summary:
sw.writer.add_summary(_sum, float(iter))
# 进行数据层验证计算
blobs_val = sw.data_layer_val.forward()
summary_val = sw.net.get_summary(blobs_val)
for _sum in summary_val:
sw.valwriter.add_summary(_sum, float(iter))
last_summary_time = now
else:
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss = \
sw.net.train_step(blobs, sw.optimizer)
utils.timer.timer.toc()
if iter % (cfg.TRAIN.DISPLAY) == 0:
print('iter: %d / %d, total loss: %.6f\n >>> rpn_loss_cls: %.6f\n '
'>>> rpn_loss_box: %.6f\n >>> loss_cls: %.6f\n >>> loss_box: %.6f\n >>> lr: %f' % \
(iter, max_iters, total_loss, rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, lr))
print('speed: {:.3f}s / iter'.format(
utils.timer.timer.average_time()))
# 进行snapshot存储
if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
ss_path, np_path = sw.snapshot(iter)
np_paths.append(np_path)
ss_paths.append(ss_path)
# 删掉多余的snapshot
if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
sw.remove_snapshot(np_paths, ss_paths)
iter += 1
if last_snapshot_iter != iter - 1:
sw.snapshot(iter - 1)
sw.writer.close()
sw.valwriter.close()
4.测试部分
在这部分中,我们利用训练得到的模型进行推理测试。
%matplotlib inline
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# 将路径转入lib
import tools._init_paths
from model.config import cfg
from model.test import im_detect
from torchvision.ops import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import os, cv2
import argparse
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
from model.bbox_transform import clip_boxes, bbox_transform_inv
import torch
4.1参数定义
# PASCAL VOC类别设置
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
# 网络模型文件名定义
NETS = {'vgg16': ('vgg16_faster_rcnn_iter_%d.pth',),'res101': ('res101_faster_rcnn_iter_%d.pth',)}
# 数据集文件名定义
DATASETS= {'pascal_voc': ('voc_2007_trainval',),'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}
4.2结果绘制
将预测的标签和边界框绘制在原图上。
def vis_detections(im, class_dets, thresh=0.5):
"""Draw detected bounding boxes."""
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for class_name in class_dets:
dets = class_dets[class_name]
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
continue
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
plt.axis('off')
plt.tight_layout()
plt.draw()
4.3准备测试图片
我们将测试图片传到test文件夹下,我们准备了两张图片进行测试,大家也可以通过notebook的upload按钮上传自己的测试数据。注意,测试数据需要是图片,并且放在test文件夹下。
test_file = "./test"
4.4模型推理
这里我们加载一个预先训练好的模型,也可以选择案例中训练的模型。
import cv2
from utils.timer import Timer
from model.test import im_detect
from torchvision.ops import nms
cfg.TEST.HAS_RPN = True # Use RPN for proposals
# 模型存储位置
# 这里我们加载一个已经训练110000迭代之后的模型,可以选择自己的训练模型位置
saved_model = "./models/vgg16-voc0712/vgg16_faster_rcnn_iter_110000.pth"
print('trying to load weights from ', saved_model)
# 加载backbone
net = vgg16()
# 构建网络
net.create_architecture(21, tag='default', anchor_scales=[8, 16, 32])
# 加载权重文件
net.load_state_dict(torch.load(saved_model, map_location=lambda storage, loc: storage))
net.eval()
# 选择推理设备
net.to(net._device)
print('Loaded network {:s}'.format(saved_model))
for file in os.listdir(test_file):
if file.startswith("._") == False:
file_path = os.path.join(test_file, file)
print(file_path)
# 打开测试图片文件
im = cv2.imread(file_path)
# 定义计时器
timer = Timer()
timer.tic()
# 检测得到图片ROI
scores, boxes = im_detect(net, im)
print(scores.shape, boxes.shape)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time(), boxes.shape[0]))
# 定义阈值
CONF_THRESH = 0.7
NMS_THRESH = 0.3
cls_dets = {}
# NMS 非极大值抑制操作,过滤边界框
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # 跳过 background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores), NMS_THRESH)
dets = dets[keep.numpy(), :]
if len(dets) > 0:
if cls in cls_dets:
cls_dets[cls] = np.vstack([cls_dets[cls], dets])
else:
cls_dets[cls] = dets
vis_detections(im, cls_dets, thresh=CONF_THRESH)
plt.show()
trying to load weights from ./models/vgg16-voc0712/vgg16_faster_rcnn_iter_110000.pth
Loaded network ./models/vgg16-voc0712/vgg16_faster_rcnn_iter_110000.pth
./test/test_image_1.jpg
(300, 21) (300, 84)
Detection took 0.042s for 300 object proposals
./test/test_image_0.jpg
(300, 21) (300, 84)
Detection took 0.039s for 300 object proposals
- 点赞
- 收藏
- 关注作者
评论(0)