第一步、重新给数据集图片命令

新建代码rename_mot.py插入代码:
路径按需修改

import os
import numpy as np
import time


ori_train_lists = [
                'D:/MOT20/train/MOT20-01/img1',
               'D:/MOT20/train/MOT20-02/img1',
               'D:/MOT20/train/MOT20-03/img1',
               'D:/MOT20/train/MOT20-05/img1',
            ]

ori_test_lists = ['D:/MOT20/test/MOT20-04/img1',
               'D:/MOT20/test/MOT20-06/img1',
               'D:/MOT20/test/MOT20-07/img1',
               'D:/MOT20/test/MOT20-08/img1',
            ]

voc_img_dir = 'D:/MOT20-Det/voc/JPEGImages'
voc_imgsets_dir = 'D:/MOT20-Det/voc/ImageSets/Main'

if not os.path.exists(voc_img_dir):
    os.makedirs(voc_img_dir)
if not os.path.exists(voc_imgsets_dir):
    os.makedirs(voc_imgsets_dir)

def img_rename_move(ori_path,new_path,fp):
    filelists = os.listdir(ori_path)                          #获取原路径下的所有图片列表
    for file in filelists:
        src = os.path.join(os.path.abspath(ori_path),file)    #读取该文件的信息
        ori_name = os.path.basename(src)                      #读取该文件的文件名,不包含路径
        # print(ori_name)
        new_name = ori_path[-7:-5] + ori_name                 #将原文件名和文件夹名进行部分拼接,得到新的文件名
        txt_name = new_name[:-4]
        fp.write(txt_name+'\n')                               #将文件名去除后缀的部分以追加的形式写入txt文件
        # print(new_name)
        dst = os.path.join(os.path.abspath(new_path),new_name)
        os.rename(src,dst)

txt_train= voc_imgsets_dir + '/train.txt'
fp_train = open(txt_train,'a+')
txt_test = voc_imgsets_dir +'/test.txt'
fp_test = open(txt_test,'a+')

# img_rename_move(test_dir,test_outdir)
start_time = time.time()

for ori_path in ori_train_lists:
    print('this is processing {}'.format(str(ori_path)))
    img_rename_move(ori_path,voc_img_dir,fp_train)
fp_train.close()

for ori_path in ori_test_lists:
    print('this is processing {}'.format(str(ori_path)))
    img_rename_move(ori_path,voc_img_dir,fp_test)
fp_test.close()

end_time = time.time()
print('succeed , total cost {}'.format(end_time-start_time))

第二步 生成VOC格式的xml文件

新建脚本get_xml.py
这一步就生成了VOC数据集了。注意:要使用det.txt,不要使用gt.txt。gt.txt有很多错误。

在这里插入代码片import os
import cv2
import codecs
import time


ori_gt_lists = ['D:/MOT20/train/MOT20-01/det/det.txt',
                'D:/MOT20/train/MOT20-02/det/det.txt',
                'D:/MOT20/train/MOT20-03/det/det.txt',
                'D:/MOT20/train/MOT20-05/det/det.txt',
                'D:/MOT20/test/MOT20-04/det/det.txt',
                'D:/MOT20/test/MOT20-06/det/det.txt',
                'D:/MOT20/test/MOT20-07/det/det.txt',
                'D:/MOT20/test/MOT20-08/det/det.txt']

img_dir = 'D:/MOT20-Det/voc/JPEGImages/'
annotation_dir = 'D:/MOT20-Det/voc/Annotations/'
if not os.path.exists(annotation_dir):
    os.makedirs(annotation_dir)
for each_dir in ori_gt_lists:

    start_time = time.time()

    fp = open(each_dir, 'r')
    userlines = fp.readlines()
    fp.close()
    fram_list = []
    for line in userlines:
        e_fram = int(line.split(',')[0])
        fram_list.append(e_fram)
    max_index = max(fram_list)
    print(each_dir + 'max_index:', max_index)

    for i in range(1, max_index):
        clear_name = each_dir[-14:-12] + format(str(i), '0>6s')
        format_name = clear_name + '.jpg'
        detail_dir = img_dir + format_name
        print(detail_dir)
        img = cv2.imread(detail_dir)
        shape_img = img.shape
        height = shape_img[0]
        width = shape_img[1]
        depth = shape_img[2]
        #如果采用index的方法去获取索引只会找到想匹配的第一个数据的索引,后面的索引信息无法获取,所以这里
        #采用enumerate方式来获取所有匹配的索引
        each_index = [num for num,x in enumerate(fram_list) if x == (i)]

        with codecs.open(annotation_dir + clear_name + '.xml', 'w') as xml:
            xml.write('<?xml version="1.0" encoding="UTF-8"?>\n')
            xml.write('<annotation>\n')
            xml.write('\t<folder>' + 'voc' + '</folder>\n')
            xml.write('\t<filename>' + format_name + '</filename>\n')
            # xml.write('\t<path>' + path + "/" + info1 + '</path>\n')
            xml.write('\t<source>\n')
            xml.write('\t\t<database> The MOT17-Det </database>\n')
            xml.write('\t</source>\n')
            xml.write('\t<size>\n')
            xml.write('\t\t<width>' + str(width) + '</width>\n')
            xml.write('\t\t<height>' + str(height) + '</height>\n')
            xml.write('\t\t<depth>' + str(depth) + '</depth>\n')
            xml.write('\t</size>\n')
            xml.write('\t\t<segmented>0</segmented>\n')
            for j in range(len(each_index)):
                num = each_index[j]

                x1 = int(userlines[num].split(',')[2])
                y1 = int(userlines[num].split(',')[3])
                x2 = int(userlines[num].split(',')[4])
                y2 = int(userlines[num].split(',')[5])

                xml.write('\t<object>\n')
                xml.write('\t\t<name>person</name>\n')
                xml.write('\t\t<pose>Unspecified</pose>\n')
                xml.write('\t\t<truncated>0</truncated>\n')
                xml.write('\t\t<difficult>0</difficult>\n')
                xml.write('\t\t<bndbox>\n')
                xml.write('\t\t\t<xmin>' + str(x1) + '</xmin>\n')
                xml.write('\t\t\t<ymin>' + str(y1) + '</ymin>\n')
                xml.write('\t\t\t<xmax>' + str(x1 + x2) + '</xmax>\n')
                xml.write('\t\t\t<ymax>' + str(y1 + y2) + '</ymax>\n')
                xml.write('\t\t</bndbox>\n')
                xml.write('\t</object>\n')

            xml.write('</annotation>')

    end_time = time.time()
    print('process {} cost time:{}s'.format(each_dir,(end_time-start_time)))

print('succeed in processing all gt files')

如果只是想生成VOC格式的数据集,到这一步就可以了。

第三步 将VOC格式的数据集转为Labelme标注的数据集。

方便查看数据标注状态,对一些不满意的标注做修改。
新建代码voc2labelme.py,插入代码:
1.json是labelme的标注文件,如果没有找张图片随意标注然后保存即可。

import sys
import os.path as osp
import io
from labelme.logger import logger
from labelme import PY2
from labelme import QT4
import PIL.Image
import base64
from labelme import utils
import os
import cv2
import xml.etree.ElementTree as ET

module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
    sys.path.append(module_path)
import json
from PIL import Image

Image.MAX_IMAGE_PIXELS = None
imageroot = 'D:/MOT17-Det/voc'

def load_image_file(filename):
    try:
        image_pil = PIL.Image.open(filename)
    except IOError:
        logger.error('Failed opening image file: {}'.format(filename))
        return

    # apply orientation to image according to exif
    image_pil = utils.apply_exif_orientation(image_pil)

    with io.BytesIO() as f:
        ext = osp.splitext(filename)[1].lower()
        if PY2 and QT4:
            format = 'PNG'
        elif ext in ['.jpg', '.jpeg']:
            format = 'JPEG'
        else:
            format = 'PNG'
        image_pil.save(f, format=format)
        f.seek(0)
        return f.read()


def dict_json(flags, imageData, shapes, imagePath, fillColor=None, lineColor=None, imageHeight=100, imageWidth=100):
    '''
    :param imageData: str
    :param shapes: list
    :param imagePath: str
    :param fillColor: list
    :param lineColor: list
    :return: dict
    '''
    return {"version": "3.16.4", "flags": flags, "shapes": shapes, 'lineColor': lineColor, "fillColor": fillColor,
            'imagePath': imagePath.split('/')[-1], "imageData": imageData, 'imageHeight': imageHeight,
            'imageWidth': imageWidth}


data = json.load(open('1.json'))

xmlpathName = imageroot  + '/Annotations/'
imagepath = imageroot  + '/JPEGImages'
resultFile = os.listdir(xmlpathName)
for file in resultFile:
    print(file)
    imagePH = imagepath + '/' + file.split('.')[0] + '.jpg'
    print(imagePH)
    tree = ET.parse(xmlpathName + '/' + file)
    image = cv2.imread(imagePH)
    shapes = data["shapes"]
    version = data["version"]
    flags = data["flags"]
    lineColor = data["lineColor"]
    fillColor = data['fillColor']
    newshapes = []
    for elem in tree.iter():
        if 'object' in elem.tag:
            name = ''
            xminNode = 0
            yminNode = 0
            xmaxNode = 0
            ymaxNode = 0
            for attr in list(elem):
                if 'name' in attr.tag:
                    name = attr.text
                if 'bndbox' in attr.tag:
                    for dim in list(attr):
                        if 'xmin' in dim.tag:
                            xminNode = int(round(float(dim.text)))
                        if 'ymin' in dim.tag:
                            yminNode = int(round(float(dim.text)))
                        if 'xmax' in dim.tag:
                            xmaxNode = int(round(float(dim.text)))
                        if 'ymax' in dim.tag:
                            ymaxNode = int(round(float(dim.text)))
            line_color = None
            fill_color = None
            newPoints = [[float(xminNode), float(yminNode)], [float(xmaxNode), float(ymaxNode)]]
            shape_type = 'rectangle'
            flags = flags
            newshapes.append(
                    {"label": name, "line_color": line_color, "fill_color": fill_color, "points": newPoints,
                     "shape_type": shape_type, "flags": flags})
    imageData_90 = load_image_file(imagePH)
    imageData_90 = base64.b64encode(imageData_90).decode('utf-8')
    imageHeight = image.shape[0]
    imageWidth = image.shape[1]
    data_90 = dict_json(flags, imageData_90, newshapes, imagePH, fillColor, lineColor, imageHeight, imageWidth)
    json_file = imagePH[:-4] + '.json'
    json.dump(data_90, open(json_file, 'w'),indent=2)

第四步 将Labelme标注的数据集转为YoloV4、V5、V6、V7等yolo模型可以训练的数据集

新建labelme2yolo.py,插入代码:

import os
import shutil

import numpy as np
import json
from glob import glob
import cv2
from sklearn.model_selection import train_test_split
from os import getcwd


def convert(size, box):
    dw = 1. / (size[0])
    dh = 1. / (size[1])
    x = (box[0] + box[1]) / 2.0 - 1
    y = (box[2] + box[3]) / 2.0 - 1
    w = box[1] - box[0]
    h = box[3] - box[2]
    x = x * dw
    w = w * dw
    y = y * dh
    h = h * dh
    return (x, y, w, h)


def change_2_yolo5(files, txt_Name):
    imag_name=[]
    for json_file_ in files:
        json_filename = labelme_path + json_file_ + ".json"
        out_file = open('%s/%s.txt' % (labelme_path, json_file_), 'w')
        json_file = json.load(open(json_filename, "r", encoding="utf-8"))
        # image_path = labelme_path + json_file['imagePath']
        imag_name.append(json_file_+'.jpg')
        height, width, channels = cv2.imread(labelme_path + json_file_ + ".jpg").shape
        for multi in json_file["shapes"]:
            points = np.array(multi["points"])
            xmin = min(points[:, 0]) if min(points[:, 0]) > 0 else 0
            xmax = max(points[:, 0]) if max(points[:, 0]) > 0 else 0
            ymin = min(points[:, 1]) if min(points[:, 1]) > 0 else 0
            ymax = max(points[:, 1]) if max(points[:, 1]) > 0 else 0
            label = multi["label"].lower()
            if xmax <= xmin:
                pass
            elif ymax <= ymin:
                pass
            else:
                cls_id = classes.index(label)
                b = (float(xmin), float(xmax), float(ymin), float(ymax))
                bb = convert((width, height), b)
                out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
                # print(json_filename, xmin, ymin, xmax, ymax, cls_id)
    return imag_name

def image_txt_copy(files,scr_path,dst_img_path,dst_txt_path):
    """
    :param files: 图片名字组成的list
    :param scr_path: 图片的路径
    :param dst_img_path: 图片复制到的路径
    :param dst_txt_path: 图片对应的txt复制到的路径
    :return:
    """
    for file in files:
        img_path=scr_path+file
        print(file)
        shutil.copy(img_path, dst_img_path+file)
        scr_txt_path=scr_path+file.split('.')[0]+'.txt'
        shutil.copy(scr_txt_path, dst_txt_path + file.split('.')[0]+'.txt')


if __name__ == '__main__':
    classes = ['person']
    # 1.标签路径
    labelme_path = "JPEGImages/"
    isUseTest = True  # 是否创建test集
    # 3.获取待处理文件
    files = glob(labelme_path + "*.json")

    files = [i.replace("\\", "/").split("/")[-1].split(".json")[0] for i in files]
    for i in files:
        print(i)
    trainval_files, test_files = train_test_split(files, test_size=0.1, random_state=55)
    # split
    train_files, val_files = train_test_split(trainval_files, test_size=0.1, random_state=55)
    train_name_list=change_2_yolo5(train_files, "train")
    print(train_name_list)
    val_name_list=change_2_yolo5(val_files, "val")
    test_name_list=change_2_yolo5(test_files, "test")
    #创建数据集文件夹。
    file_List = ["train", "val", "test"]
    for file in file_List:
        if not os.path.exists('./VOC/images/%s' % file):
            os.makedirs('./VOC/images/%s' % file)
        if not os.path.exists('./VOC/labels/%s' % file):
            os.makedirs('./VOC/labels/%s' % file)
    image_txt_copy(train_name_list,labelme_path,'./VOC/images/train/','./VOC/labels/train/')
    image_txt_copy(val_name_list, labelme_path, './VOC/images/val/', './VOC/labels/val/')
    image_txt_copy(test_name_list, labelme_path, './VOC/images/test/', './VOC/labels/test/')


转载自:https://wanghao.blog.csdn.net/article/details/127374841