一、数据格式

1. MS COCO

1
2
3
4
5
6
7
8
9
10
├── coco2017: 数据集根目录
├── train2017: 所有训练图像文件夹(118287张)
├── val2017: 所有验证图像文件夹(5000张)
└── annotations: 对应标注文件夹
├── instances_train2017.json: 对应目标检测、分割任务的训练集标注文件
├── instances_val2017.json: 对应目标检测、分割任务的验证集标注文件
├── captions_train2017.json: 对应图像描述的训练集标注文件
├── captions_val2017.json: 对应图像描述的验证集标注文件
├── person_keypoints_train2017.json: 对应人体关键点检测的训练集标注文件
└── person_keypoints_val2017.json: 对应人体关键点检测的验证集标注文件夹
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# 读取jason文件
import json

json_path = "/data/coco2017/annotations/instances_val2017.json"
json_labels = json.load(open(json_path, "r"))
print(json_labels["info"])

"""
json文件主要包含以下五个字段:{
"info": {dict: 6}, # dict 数据集描述

"licenses": [list: 8], # 内部是dict

"images": [list: 5000], # 列表中每个元素都是一个dict,主要包括file_name, height, width,id是图片的唯一标识

"annotations": [list: 36781], # 列表元素个数对应数据集中所有标注的目标个数,列表中每个元素都是一个dict对应一个目标的标注信息。包括目标的分割信息(polygons多边形)、目标边界框信息[x,y,width,height](左上角x,y坐标,以及宽高)、目标面积、对应图像id以及类别id等

"categories": {list:80} ,# 内部是dict , 主要字段包括类别id、类别名称和所属超类。
}
"""

# segmentation字段:polygon以及RLE
def convert_coco_poly_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles) # 此处的mask为二值蒙版,背景为0,前景均为1
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype= torch.uint8)
mask = mask.any(dim = 2) # 通道方向只要有一个元素为1,即为True
masks.append(mask)
if masks:
masks = torch.stack(masks, dim = 0)
else:
# 如果mask为空,则说明没有目标,直接返回数值为0的mask
masks = torch.zeros((0, height, width), dtype = torch.uint8)
return masks

Note: coco格式下分割标签对应的标注文件为json格式,而voc格式中分割标签对应的是.png的图像。注文件(.png)的每个目标处的像素值是按照xml文件中目标顺序排列的,如xml中有三个目标则对应目标的像素值为1,2,3。

2. YOLO

  • yolo/
    • images
      • .txt # 类id、x_center、y_center、w、h(真实像素值除以图片的高和宽之后的值)
    • labels
      • .jpg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# COCO 格式的数据集转化为 YOLO 格式的数据集
# --json_path 输入的json文件路径
# --save_path 保存的文件夹名字,默认为当前目录下的labels

import os
import json
from tqdm import tqdm
import argparse

parser = argparse.ArgumentParser()
# 这里根据自己的json文件位置,换成自己的就行
parser.add_argument('--json_path',
default=r'your path', type=str,
help="input: coco format(json)")
# 这里设置.txt文件保存位置
parser.add_argument('--save_path', default=r'your path', type=str,
help="specify where to save the output dir of labels")
arg = parser.parse_args()

def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = box[0] + box[2] / 2.0
y = box[1] + box[3] / 2.0
w = box[2]
h = box[3]
# round函数确定(xmin, ymin, xmax, ymax)的小数位数
x = round(x * dw, 6)
w = round(w * dw, 6)
y = round(y * dh, 6)
h = round(h * dh, 6)
return (x, y, w, h)


if __name__ == '__main__':
json_file = arg.json_path # COCO Object Instance 类型的标注
ana_txt_save_path = arg.save_path # 保存的路径

data = json.load(open(json_file, 'r'))
if not os.path.exists(ana_txt_save_path):
os.makedirs(ana_txt_save_path)

id_map = {} # coco数据集的id不连续!重新映射一下再输出
with open(os.path.join(ana_txt_save_path, 'classes.txt'), 'w') as f:
# 写入classes.txt
for i, category in enumerate(data['categories']):
f.write(f"{category['name']}\n")
id_map[category['id']] = i
# print(id_map)
# 这里需要根据自己的需要,更改写入图像相对路径的文件位置。
list_file = open(os.path.join(ana_txt_save_path, 'train2017.txt'), 'w')
for img in tqdm(data['images']):
filename = img["file_name"]
img_width = img["width"]
img_height = img["height"]
img_id = img["id"]
head, tail = os.path.splitext(filename)
ana_txt_name = head + ".txt" # 对应的txt名字,与jpg一致
f_txt = open(os.path.join(ana_txt_save_path, ana_txt_name), 'w')
for ann in data['annotations']:
if ann['image_id'] == img_id:
box = convert((img_width, img_height), ann["bbox"])
f_txt.write("%s %s %s %s %s\n" % (id_map[ann["category_id"]], box[0], box[1], box[2], box[3]))
f_txt.close()

二、iou及nms

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import torch

def iou(box, boxes, box_format="corners"):
""""
Calculates intersection over union

parameters:
box (tensor): (1, 4)
boxes (tensor) : (N, 4)
corners : (x1, y1, x2, y2)

Return:
tensor: Intersection over union for all examples
"""
box1_x1 = box[..., 0:1]
box1_y1 = box[..., 1:2]
box1_x2 = box[..., 2:3]
box1_y2 = box[..., 3:4]
box2_x1 = boxes[..., 0:1]
box2_y1 = boxes[..., 1:2]
box2_x2 = boxes[..., 2:3]
box2_y2 = boxes[..., 3:4]

x1 = torch.max(box1_x1, box2_x1)
y1 = torch.max(box1_y1, box2_y1)
x2 = torch.min(box1_x2, box2_x2)
y2 = torch.min(box1_y2, box2_y2)

intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)

box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))

return intersection / (box1_area + box2_area - intersection + 1e-6)

def nms(boxes, iou_threshold):
""""
Non Max Supression given boxes

Parameters:
boxes (list): [class, conf, x1, y1, x2, y2]
iou_threshold (float)

Return:
list: boxes after nsm
"""

boxes = sorted(boxes, key = lambda x: x[1], reverse=True)
boxes_after_nms = []
while boxes:
chosen_box = boxes[0]

boxes = [
box
for box in boxes
if iou(
chosen_box[2:],
box[2:],
)
< iou_threshold
]

boxes_after_nms.append(chosen_box)
return boxes_after_nms