1.数据集转换
VisDrone2019-DET-train
下载地址
http://aiskyeye.com/download/object-detection/
解压后只有images
、annotations
两个文件夹
使用数据集转换工具visdrone2yolo.py
只要修改
root_dir
就可以直接运行。运行前先装一下pip包。如果没有labels文件夹生成就手动新建个labels文件夹,在root_dir
下。
import os
from os import getcwd
from PIL import Image
import xml.etree.ElementTree as ET
import random
#root_dir = "train/"
root_dir = "/home/lhw/Gradute/VirDrone/yolov5/VisDrone2019-DET-train/"
annotations_dir = root_dir+"annotations/"
image_dir = root_dir + "images/"
label_dir = root_dir + "labels/"
# label_dir = root_dir + "images/" # yolo里面要和图片放到一起
xml_dir = root_dir+"annotations_voc/" #注意新建文件夹。后续改一下名字,运行完成之后annotations这个文件夹就不需要了。把annotations_命名为annotations
data_split_dir = root_dir + "train_namelist/"
sets = ['train', 'test','val']
class_name = ['ignored regions', 'pedestrian','people','bicycle','car', 'van', 'truck', 'tricycle','awning-tricycle', 'bus','motor','others']
def visdrone2voc(annotations_dir, image_dir, xml_dir):
for filename in os.listdir(annotations_dir):
fin = open(annotations_dir + filename, 'r')
image_name = filename.split('.')[0]
img = Image.open(image_dir + image_name + ".jpg")
xml_name = xml_dir + image_name + '.xml'
with open(xml_name, 'w') as fout:
fout.write('<annotation>' + '\n')
fout.write('\t' + '<folder>VOC2007</folder>' + '\n')
fout.write('\t' + '<filename>' + image_name + '.jpg' + '</filename>' + '\n')
fout.write('\t' + '<source>' + '\n')
fout.write('\t\t' + '<database>' + 'VisDrone2018 Database' + '</database>' + '\n')
fout.write('\t\t' + '<annotation>' + 'VisDrone2018' + '</annotation>' + '\n')
fout.write('\t\t' + '' + '\n')
fout.write('\t\t' + '<flickrid>' + 'Unspecified' + '</flickrid>' + '\n')
fout.write('\t' + '</source>' + '\n')
fout.write('\t' + '<owner>' + '\n')
fout.write('\t\t' + '<flickrid>' + 'Haipeng Zhang' + '</flickrid>' + '\n')
fout.write('\t\t' + '<name>' + 'Haipeng Zhang' + '</name>' + '\n')
fout.write('\t' + '</owner>' + '\n')
fout.write('\t' + '<size>' + '\n')
fout.write('\t\t' + '<width>' + str(img.size[0]) + '</width>' + '\n')
fout.write('\t\t' + '<height>' + str(img.size[1]) + '</height>' + '\n')
fout.write('\t\t' + '<depth>' + '3' + '</depth>' + '\n')
fout.write('\t' + '</size>' + '\n')
fout.write('\t' + '<segmented>' + '0' + '</segmented>' + '\n')
for line in fin.readlines():
line = line.split(',')
fout.write('\t' + '<object>' + '\n')
fout.write('\t\t' + '<name>' + class_name[int(line[5])] + '</name>' + '\n')
fout.write('\t\t' + '<pose>' + 'Unspecified' + '</pose>' + '\n')
fout.write('\t\t' + '<truncated>' + line[6] + '</truncated>' + '\n')
fout.write('\t\t' + '<difficult>' + str(int(line[7])) + '</difficult>' + '\n')
fout.write('\t\t' + '<bndbox>' + '\n')
fout.write('\t\t\t' + '<xmin>' + line[0] + '</xmin>' + '\n')
fout.write('\t\t\t' + '<ymin>' + line[1] + '</ymin>' + '\n')
# pay attention to this point!(0-based)
fout.write('\t\t\t' + '<xmax>' + str(int(line[0]) + int(line[2]) - 1) + '</xmax>' + '\n')
fout.write('\t\t\t' + '<ymax>' + str(int(line[1]) + int(line[3]) - 1) + '</ymax>' + '\n')
fout.write('\t\t' + '</bndbox>' + '\n')
fout.write('\t' + '</object>' + '\n')
fin.close()
fout.write('</annotation>')
def data_split(xml_dir, data_split_dir):
trainval_percent = 0.2
train_percent = 0.9
total_xml = os.listdir(xml_dir)
if not os.path.exists(data_split_dir):
os.makedirs(data_split_dir)
num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)
ftrainval = open(data_split_dir+'/trainval.txt', 'w')
ftest = open(data_split_dir+'/test.txt', 'w')
ftrain = open(data_split_dir+'/train.txt', 'w')
fval = open(data_split_dir+'/val.txt', 'w')
for i in list:
name = total_xml[i][:-4] + '\n'
if i in trainval:
ftrainval.write(name)
if i in train:
ftest.write(name)
else:
fval.write(name)
else:
ftrain.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest.close()
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def convert_annotation_voc(xml_dir, label_dir, image_name):
in_file = open(xml_dir + '%s.xml' % (image_name))
out_file = open(label_dir + '%s.txt' % (image_name), 'w')
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in class_name or int(difficult) == 1:
continue
cls_id = class_name.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert((w, h), b)
if cls_id != 0: # 忽略掉0类
if cls_id != 11: # 忽略掉11类
out_file.write(str(cls_id - 1) + " " + " ".join([str(a) for a in bb]) + '\n') # 其他类id-1。可以根据自己需要修改代码
def voc2yolo(xml_dir, image_dir, label_dir):
wd = getcwd()
print(wd)
for image_set in sets:
if not os.path.exists(label_dir):
os.makedirs(label_dir)
image_names = open(data_split_dir+'%s.txt' % (image_set)).read().strip().split()
list_file = open(root_dir + '%s.txt' % (image_set), 'w')
for image_name in image_names:
list_file.write(image_dir+'%s.jpg\n' % (image_name))
convert_annotation_voc(xml_dir, label_dir, image_name)
list_file.close()
if __name__ == '__main__':
visdrone2voc(annotations_dir, image_dir, xml_dir) #将visdrone转化为voc的xml格式
data_split(xml_dir, data_split_dir) # 将数据集分开成train、val、test
voc2yolo(xml_dir, image_dir, label_dir) # 将voc转化为yolo格式的txt
- 下载yolov5源码
git clone https://github.com/ultralytics/yolov5.git
然后在git下的yolov5
根目录下创建文件夹visdronedata
及其附属目录
将visdrone的images
文件夹里面的图片全部复制到images/train
和iamges/val
里面,上面程序生成的labels
文件夹,将里面的所有txt复制到labels/train
和labels/val
里面
然后安装yolov5环境,要在Python3.8的环境下安装,如果使用conda可以使用
conda create -n py38 python=3.8 # 创建虚拟环境
conda activate py38 # 激活虚拟环境,如果成功激活环境则在命令行用户名前面有虚拟环境名称的括号
pip install -r requirements.txt # 在yolov5根目录下安装需要的包
安装完环境之后,修改data/voc.yaml
# download command/URL (optional)
# download: bash data/scripts/get_voc.sh
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: visdronedata/images/train/ # 16551 images
val: visdronedata/images/train/ # 4952 images
# number of classes
nc: 10
# class names
names: ['pedestrian','people','bicycle','car','van','truck','tricycle','awning-tricycle','bus','motor']
修改models/yolov5l.yaml
# parameters
nc: 10 # number of classes #只修改这个类别数
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple
在这里的v1部分下载预训练模型yolov5l.pt,由于使用的yolov5l.yaml
,所以使用yolov5l.pt
。使用其他的训练配置文件可以下载相应的预训练模型,在github有对应关系。
将下载好的预训练模型放到weights
文件夹下
然后在命令段运行
内存足够时可以增大
batch-size
:1、8、16、32、64
python train.py --data data/voc.yaml --cfg models/yolov5l.yaml --weights weights/yolov5l.pt --batch-size 1
也可直接在train.py
直接修改默认参数,然后直接运行
python train.py
然后进行训练,在训练的过程中会产生过程文件及训练模型,会保存在runs/
文件夹中
在exp
里面会有保存的中间临时权重,可以拿出来放到yolov5/
根目录下进行预测测试,预测的结果也会放到runs/detect
文件夹下
python decect.py --source file.jpg --weight best.pt --conf 0.25
python decect.py --source file.mp4 --weight best.pt --conf 0.25
运行十来分钟就拿来测试了。。。
版权声明:本文为CSDN博主「小灰啾」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/qq_42932308/article/details/115217935
暂无评论