Mosaic数据增强 mixup CutMix数据增强
【摘要】
目录
mixup数据增强:
python opencv代码:
pytorch分类代码:
Cutmix数据增强
python opencv cutmix 分类用代码:
pytorch CutMix代码
mixup数据增强:
按照0.5的比例进行混合。
python opencv代码:
import c...
目录
mixup数据增强:
按照0.5的比例进行混合。
python opencv代码:
-
import cv2
-
-
image1 = cv2.imread('aaa.jpg')
-
image1 = cv2.resize(image1,(600,600))
-
image2 = cv2.imread('bbb.jpg')
-
image2 = cv2.resize(image2,(600,600))
-
new_image = image1*0.5 + image2*0.5
-
-
cv2.imwrite('mixup.jpg', new_image)
随机比例alpha:
-
import numpy as np
-
-
# alpha为混合的比例
-
lam = np.random.beta(alpha, alpha)
pytorch分类代码:
-
alpha = 1.0 # 默认设置为1
-
criterion = nn.CrossEntropyLoss()
-
for (inputs, labels) in train_loader:
-
lam = np.random.beta(alpha, alpha)
-
index = torch.randperm(inputs.size(0))
-
images_a, images_b = inputs, inputs[index]
-
labels_a, labels_b = labels, labels[index]
-
mixed_images = lam * images_a + (1 - lam) * images_b
-
outputs = model(mixed_images)
-
_, preds = torch.max(outputs, 1)
-
loss = lam * criterion(outputs, labels_a) + (1 - lam) * criterion(outputs, labels_b)
原文链接:https://blog.csdn.net/qq_42198461/article/details/126106442
Cutmix数据增强
Pytorch实现Cutmix代码
从Github上面找到了pytorch实现Cutmix的整个工程代码,然后把具体实现的部分代码截取了下来。之前在想标签该怎么处理,看到代码才知道是给对两个标签都计算loss然后根据裁剪块的大小设置权重。
-
for i, (input, target) in enumerate(train_loader):
-
# measure data loading time
-
#input和target数据格式均为(batch_size,dim,w,h)
-
data_time.update(time.time() - end)
-
-
input = input.cuda()
-
target = target.cuda()
-
-
r = np.random.rand(1)
-
if args.beta > 0 and r < args.cutmix_prob:
-
# generate mixed sample
-
lam = np.random.beta(args.beta, args.beta)#通过lam决定裁剪叠加块的大小,并在后面计算loss时作为权重
-
rand_index = torch.randperm(input.size()[0]).cuda()
-
target_a = target
-
target_b = target[rand_index]
-
bbx1, bby1, bbx2, bby2 = rand_bbox(input.size(), lam)
-
input[:, :, bbx1:bbx2, bby1:bby2] = input[rand_index, :, bbx1:bbx2, bby1:bby2]#进行裁剪替换操作
-
# adjust lambda to exactly match pixel ratio
-
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (input.size()[-1] * input.size()[-2]))
-
# compute output
-
output = model(input)
-
loss = criterion(output, target_a) * lam + criterion(output, target_b) * (1. - lam)#以lam作为权重
-
else:
-
# compute output
-
output = model(input)
-
loss = criterion(output, target)
原文链接:https://blog.csdn.net/qq_41804380/article/details/115266264
python opencv cutmix 分类用代码:
-
import random
-
-
import numpy as np
-
-
import cv2
-
-
-
def rand_bbox(size, lamb):
-
"""
-
生成随机的bounding box
-
:param size:
-
:param lamb:
-
:return:
-
"""
-
W = size[0]
-
H = size[1]
-
-
# 得到一个bbox和原图的比例
-
cut_ratio = np.sqrt(1.0 - lamb)
-
cut_w = int(W * cut_ratio)
-
cut_h = int(H * cut_ratio)
-
-
# 得到bbox的中心点
-
cx = np.random.randint(W)
-
cy = np.random.randint(H)
-
-
bbx1 = np.clip(cx - cut_w // 2, 0, W)
-
bby1 = np.clip(cy - cut_h // 2, 0, H)
-
bbx2 = np.clip(cx + cut_w // 2, 0, W)
-
bby2 = np.clip(cy + cut_h // 2, 0, H)
-
-
return bbx1, bby1, bbx2, bby2
-
-
-
def cutmix(image_batch, image_batch_labels, alpha=1.0):
-
# 决定bbox的大小,服从beta分布
-
lam = np.random.beta(alpha, alpha)
-
lam = random.random()/2+0.3 #(0-1)/2)+0.3
-
print("lam",lam)
-
-
# permutation: 如果输入x是一个整数,那么输出相当于打乱的range(x)
-
rand_index = np.random.permutation(len(image_batch))
-
-
# 对应公式中的y_a,y_b
-
target_a = image_batch_labels
-
target_b = image_batch_labels[rand_index]
-
-
# 根据图像大小随机生成bbox
-
bbx1, bby1, bbx2, bby2 = rand_bbox(image_batch[0].shape, lam)
-
-
image_batch_updated = image_batch.copy()
-
-
# image_batch的维度分别是 batch x 宽 x 高 x 通道
-
# 将所有图的bbox对应位置, 替换为其他任意一张图像
-
# 第一个参数rand_index是一个list,可以根据这个list里索引去获得image_batch的图像,也就是将图片乱序的对应起来
-
image_batch_updated[:, bbx1: bbx2, bby1:bby2, :] = image_batch[rand_index, bbx1:bbx2, bby1:bby2, :]
-
-
# 计算 1 - bbox占整张图像面积的比例
-
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1)) / (image_batch.shape[1] * image_batch.shape[2])
-
# 根据公式计算label
-
label = target_a * lam + target_b * (1. - lam)
-
-
return image_batch_updated, label
-
-
-
if __name__ == '__main__':
-
cat = cv2.cvtColor(cv2.imread("data/123.png"), cv2.COLOR_BGR2RGB)
-
dog = cv2.cvtColor(cv2.imread("data/123.png"), cv2.COLOR_BGR2RGB)
-
-
cat = cv2.imread("data/123.png")
-
dog = cv2.imread("data/images/bus.jpg")
-
-
cat=cv2.resize(cat,(600,600))
-
dog=cv2.resize(dog,(600,600))
-
-
-
for i in range(10):
-
updated_img, label = cutmix(np.array([cat, dog]), np.array([[0, 1], [1, 0]]), 0.5)
-
print(label)
-
-
cv2.imshow("updated_img0",updated_img[0])
-
cv2.imshow("updated_img1",updated_img[1])
-
cv2.waitKey()
-
-
# fig, axs = plt.subplots(nrows=1, ncols=2, squeeze=False)
-
# ax1 = axs[0, 0]![](https: // img2020.cnblogs.com / blog / 1621431 / 202108 / 1621431 - 20210814233143289 - 396153337.png)
-
-
# ax2 = axs[0, 1]
-
# ax1.imshow(updated_img[0])
-
# ax2.imshow(updated_img[1])
-
# plt.show()
pytorch CutMix代码
代码地址:https://github.com/clovaai/CutMix-PyTorch
生成裁剪区域
-
-
"""输入为:样本的size和生成的随机lamda值"""
-
-
def rand_bbox(size, lam):
-
-
W = size[2]
-
-
H = size[3]
-
-
"""1.论文里的公式2,求出B的rw,rh"""
-
-
cut_rat = np.sqrt(1. - lam)
-
-
cut_w = np.int(W * cut_rat)
-
-
cut_h = np.int(H * cut_rat)
-
-
-
# uniform
-
-
"""2.论文里的公式2,求出B的rx,ry(bbox的中心点)"""
-
-
cx = np.random.randint(W)
-
-
cy = np.random.randint(H)
-
-
#限制坐标区域不超过样本大小
-
-
-
bbx1 = np.clip(cx - cut_w // 2, 0, W)
-
-
bby1 = np.clip(cy - cut_h // 2, 0, H)
-
-
bbx2 = np.clip(cx + cut_w // 2, 0, W)
-
-
bby2 = np.clip(cy + cut_h // 2, 0, H)
-
-
"""3.返回剪裁B区域的坐标值"""
-
-
return bbx1, bby1, bbx2, bby2
整体流程
-
"""train.py 220-244行"""
-
-
for i, (input, target) in enumerate(train_loader):
-
-
# measure data loading time
-
-
data_time.update(time.time() - end)
-
-
-
input = input.cuda()
-
-
target = target.cuda()
-
-
r = np.random.rand(1)
-
-
if args.beta > 0 and r < args.cutmix_prob:
-
-
# generate mixed sample
-
-
"""1.设定lamda的值,服从beta分布"""
-
-
lam = np.random.beta(args.beta, args.beta)
-
-
"""2.找到两个随机样本"""
-
-
rand_index = torch.randperm(input.size()[0]).cuda()
-
-
target_a = target#一个batch
-
-
target_b = target[rand_index] #batch中的某一张
-
-
"""3.生成剪裁区域B"""
-
-
bbx1, bby1, bbx2, bby2 = rand_bbox(input.size(), lam)
-
-
"""4.将原有的样本A中的B区域,替换成样本B中的B区域"""
-
-
input[:, :, bbx1:bbx2, bby1:bby2] = input[rand_index, :, bbx1:bbx2, bby1:bby2]
-
-
# adjust lambda to exactly match pixel ratio
-
-
"""5.根据剪裁区域坐标框的值调整lam的值"""
-
-
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (input.size()[-1] * input.size()[-2]))
-
-
# compute output
-
-
"""6.将生成的新的训练样本丢到模型中进行训练"""
-
-
output = model(input)
-
-
"""7.按lamda值分配权重"""
-
-
loss = criterion(output, target_a) * lam + criterion(output, target_b) * (1. - lam)
-
-
else:
-
-
# compute output
-
-
output = model(input)
-
-
loss = criterion(output, target)
Mosaic数据增强
什么是Mosaic数据增强方法
Yolov4的mosaic数据增强参考了CutMix数据增强方式,理论上具有一定的相似性!
CutMix数据增强方式利用两张图片进行拼接。
文章来源: blog.csdn.net,作者:AI视觉网奇,版权归原作者所有,如需转载,请联系作者。
原文链接:blog.csdn.net/jacke121/article/details/106989253
【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱:
cloudbbs@huaweicloud.com
- 点赞
- 收藏
- 关注作者
评论(0)