添加注释
This commit is contained in:
.gitignore
.idea
README.mdccpd_process.pydata
argoverse_hd.yamlcoco.yamlcoco128.yamlhyp.finetune.yamlhyp.scratch.yamlplateAndCar.yamlretinaface2yolo.py
demo.shdetect_demo.pydetect_plate.pydetect_test.pyexport.pyhubconf.pyscripts
train2yolo.pyval2yolo.pyval2yolo_for_test.pyvoc.yamlwiderface.yamlimage/README
imgs
Quicker_20220930_180856.pngQuicker_20220930_180919.pngQuicker_20220930_180938.pngQuicker_20220930_181044.pngdouble_yellow.jpghongkang1.jpgmoto.pngpolice.jpgshi_lin_guan.jpgsingle_blue.jpgsingle_green.jpgsingle_yellow.jpgtmp8F1F.pngtmpA5E3.pngxue.jpg
json2yolo.pymain.pymodels
__init__.pyblazeface.yamlblazeface_fpn.yamlcommon.pyexperimental.pyyolo.pyyolov5l.yamlyolov5l6.yamlyolov5m.yamlyolov5m6.yamlyolov5n-0.5.yamlyolov5n.yamlyolov5n6.yamlyolov5s.yamlyolov5s6.yaml
onnx_infer.pyopenvino_infer.pyplate_recognition
readme
result_Rainy
20230402110037.jpg20230402114216.jpg20230402122209.jpg20230402124902.jpg20230402174157.jpg20230402174728.jpg20230402180202.jpg20230402181145.jpg20230402181201.jpg20230402181706.jpg20230402181837.jpg20230402181905.jpg20230402182355.jpg20230402185127.jpg20230402185227.jpg20230402185235.jpg20230402185440.jpg20230402185635.jpg20230402185824.jpg20230402190134.jpg20230402190219.jpg20230402191206.jpg20230402191441.jpg20230402193706.jpg20230402204602.jpg20230402205418.jpg20230402215131.jpg20230403095717.jpg20230403095725.jpg20230403095737.jpg20230403095753.jpg20230403095805.jpg20230403100026.jpg20230403100057.jpg20230403100126.jpg20230403101514.jpg20230403102512.jpg20230403103451.jpg20230403103529.jpg20230403103608.jpg20230403104140.jpg20230403104242.jpg20230403104456.jpg20230403104545.jpg20230403110417.jpg20230403110828.jpg20230403110834.jpg20230403112145.jpg20230403112214.jpg20230403112311.jpg20230403112419.jpg20230403113813.jpg20230403123413.jpg20230403124017.jpg20230403124103.jpg20230403124124.jpg20230403124136.jpg20230403124217.jpg20230403124252.jpg20230403124302.jpg20230403124351.jpg20230403124405.jpg20230403124451.jpg20230403124634.jpg20230403124804.jpg20230403130503.jpg20230403133007.jpg20230403133306.jpg20230403133325.jpg20230403133347.jpg20230403133423.jpg20230403134123.jpg20230403134542.jpg20230403134606.jpg20230403134619.jpg20230403134804.jpg20230403134826.jpg20230403134831.jpg20230403134851.jpg20230403135436.jpg20230403135701.jpg20230403140316.jpg20230403140510.jpg20230403140545.jpg20230403140607.jpg20230403140809.jpg20230403140837.jpg20230403140923.jpg20230403141404.jpg20230403142918.jpg20230403142924.jpg20230403143111.jpg20230403143121.jpg20230403143706.jpg20230403144318.jpg20230403144922.jpg20230403145623.jpg20230403145646.jpg20230403145705.jpg20230403145728.jpg20230403150326.jpg20230403150331.jpg20230403150352.jpg20230403150432.jpg20230403152406.jpg20230403152827.jpg20230403153033.jpg20230403155233.jpg20230403160720.jpg20230403161621.jpg20230403161947.jpg20230403162014.jpg20230403162753.jpg20230403163831.jpg20230403164326.jpg20230403165419.jpg20230403174904.jpg20230403200008.jpg20230403204052.jpg20230403205331.jpg20230403205353.jpg20230403205627.jpg20230403205637.jpg20230403205646.jpg20230403205717.jpg20230403205726.jpg20230403205909.jpg20230403205918.jpg20230403205939.jpg20230403205953.jpg20230403211808.jpg20230404001927.jpg20230404003523.jpg20230404003828.jpg20230404004015.jpg20230404004137.jpg20230404033237.jpg20230404033324.jpg20230404052913.jpg20230404053155.jpg20230404062844.jpg20230404062851.jpg20230404063557.jpg错误.jpg错误2.jpg错误3.jpg错误4.jpg
test.pytest_widerface.pytorch2trt
train.pyutils
__init__.pyactivations.pyautoanchor.py
aws
cv_puttext.pydatasets.pyface_datasets.pygeneral.pygoogle_app_engine
google_utils.pyinfer_utils.pyloss.pymetrics.pyplots.pytorch_utils.pywandb_logging
weights
widerface_evaluate
21
data/argoverse_hd.yaml
Normal file
21
data/argoverse_hd.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
|
||||
# Train command: python train.py --data argoverse_hd.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /argoverse
|
||||
# /yolov5
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: bash data/scripts/get_argoverse_hd.sh
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images
|
||||
val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges
|
||||
test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview
|
||||
|
||||
# number of classes
|
||||
nc: 8
|
||||
|
||||
# class names
|
||||
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ]
|
35
data/coco.yaml
Normal file
35
data/coco.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# COCO 2017 dataset http://cocodataset.org
|
||||
# Train command: python train.py --data coco.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /coco
|
||||
# /yolov5
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: bash data/scripts/get_coco.sh
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../coco/train2017.txt # 118287 images
|
||||
val: ../coco/val2017.txt # 5000 images
|
||||
test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
|
||||
|
||||
# number of classes
|
||||
nc: 80
|
||||
|
||||
# class names
|
||||
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
|
||||
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
|
||||
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
||||
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
|
||||
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
|
||||
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
||||
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
|
||||
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
|
||||
'hair drier', 'toothbrush' ]
|
||||
|
||||
# Print classes
|
||||
# with open('data/coco.yaml') as f:
|
||||
# d = yaml.load(f, Loader=yaml.FullLoader) # dict
|
||||
# for i, x in enumerate(d['names']):
|
||||
# print(i, x)
|
28
data/coco128.yaml
Normal file
28
data/coco128.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
# COCO 2017 dataset http://cocodataset.org - first 128 training images
|
||||
# Train command: python train.py --data coco128.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /coco128
|
||||
# /yolov5
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../coco128/images/train2017/ # 128 images
|
||||
val: ../coco128/images/train2017/ # 128 images
|
||||
|
||||
# number of classes
|
||||
nc: 80
|
||||
|
||||
# class names
|
||||
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
|
||||
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
|
||||
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
||||
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
|
||||
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
|
||||
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
||||
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
|
||||
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
|
||||
'hair drier', 'toothbrush' ]
|
38
data/hyp.finetune.yaml
Normal file
38
data/hyp.finetune.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
# Hyperparameters for VOC finetuning
|
||||
# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50
|
||||
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
|
||||
# Hyperparameter Evolution Results
|
||||
# Generations: 306
|
||||
# P R mAP.5 mAP.5:.95 box obj cls
|
||||
# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146
|
||||
|
||||
lr0: 0.0032
|
||||
lrf: 0.12
|
||||
momentum: 0.843
|
||||
weight_decay: 0.00036
|
||||
warmup_epochs: 2.0
|
||||
warmup_momentum: 0.5
|
||||
warmup_bias_lr: 0.05
|
||||
box: 0.0296
|
||||
cls: 0.243
|
||||
cls_pw: 0.631
|
||||
obj: 0.301
|
||||
obj_pw: 0.911
|
||||
iou_t: 0.2
|
||||
anchor_t: 2.91
|
||||
# anchors: 3.63
|
||||
fl_gamma: 0.0
|
||||
hsv_h: 0.0138
|
||||
hsv_s: 0.664
|
||||
hsv_v: 0.464
|
||||
degrees: 0.373
|
||||
translate: 0.245
|
||||
scale: 0.898
|
||||
shear: 0.602
|
||||
perspective: 0.0
|
||||
flipud: 0.00856
|
||||
fliplr: 0.5
|
||||
mosaic: 1.0
|
||||
mixup: 0.243
|
34
data/hyp.scratch.yaml
Normal file
34
data/hyp.scratch.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
# Hyperparameters for COCO training from scratch
|
||||
# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
|
||||
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
|
||||
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
|
||||
momentum: 0.937 # SGD momentum/Adam beta1
|
||||
weight_decay: 0.0005 # optimizer weight decay 5e-4
|
||||
warmup_epochs: 3.0 # warmup epochs (fractions ok)
|
||||
warmup_momentum: 0.8 # warmup initial momentum
|
||||
warmup_bias_lr: 0.1 # warmup initial bias lr
|
||||
box: 0.05 # box loss gain
|
||||
cls: 0.5 # cls loss gain
|
||||
landmark: 0.005 # landmark loss gain
|
||||
cls_pw: 1.0 # cls BCELoss positive_weight
|
||||
obj: 1.0 # obj loss gain (scale with pixels)
|
||||
obj_pw: 1.0 # obj BCELoss positive_weight
|
||||
iou_t: 0.20 # IoU training threshold
|
||||
anchor_t: 4.0 # anchor-multiple threshold
|
||||
# anchors: 3 # anchors per output layer (0 to ignore)
|
||||
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
|
||||
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
|
||||
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
|
||||
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
|
||||
degrees: 0.0 # image rotation (+/- deg)
|
||||
translate: 0.1 # image translation (+/- fraction)
|
||||
scale: 0.5 # image scale (+/- gain)
|
||||
shear: 0.5 # image shear (+/- deg)
|
||||
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
|
||||
flipud: 0.0 # image flip up-down (probability)
|
||||
fliplr: 0.5 # image flip left-right (probability)
|
||||
mosaic: 0.5 # image mosaic (probability)
|
||||
mixup: 0.0 # image mixup (probability)
|
20
data/plateAndCar.yaml
Normal file
20
data/plateAndCar.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
|
||||
# Train command: python train.py --data voc.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /VOC
|
||||
# /yolov5
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: bash data/scripts/get_voc.sh
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: /mnt/Gpan/Mydata/pytorch
|
||||
Porject/datasets/ccpd/train_car_plate/train_detect
|
||||
val: /mnt/Gpan/Mydata/pytorchPorject/datasets/ccpd/train_car_plate/val_detect
|
||||
# number of classes
|
||||
nc: 3
|
||||
|
||||
# class names
|
||||
names: [ 'single_plate','double_plate','car']
|
150
data/retinaface2yolo.py
Normal file
150
data/retinaface2yolo.py
Normal file
@ -0,0 +1,150 @@
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import torch
|
||||
import torch.utils.data as data
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
class WiderFaceDetection(data.Dataset):
|
||||
def __init__(self, txt_path, preproc=None):
|
||||
self.preproc = preproc
|
||||
self.imgs_path = []
|
||||
self.words = []
|
||||
f = open(txt_path,'r')
|
||||
lines = f.readlines()
|
||||
isFirst = True
|
||||
labels = []
|
||||
for line in lines:
|
||||
line = line.rstrip()
|
||||
if line.startswith('#'):
|
||||
if isFirst is True:
|
||||
isFirst = False
|
||||
else:
|
||||
labels_copy = labels.copy()
|
||||
self.words.append(labels_copy)
|
||||
labels.clear()
|
||||
path = line[2:]
|
||||
path = txt_path.replace('label.txt','images/') + path
|
||||
self.imgs_path.append(path)
|
||||
else:
|
||||
line = line.split(' ')
|
||||
label = [float(x) for x in line]
|
||||
labels.append(label)
|
||||
|
||||
self.words.append(labels)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.imgs_path)
|
||||
|
||||
def __getitem__(self, index):
|
||||
img = cv2.imread(self.imgs_path[index])
|
||||
height, width, _ = img.shape
|
||||
|
||||
labels = self.words[index]
|
||||
annotations = np.zeros((0, 15))
|
||||
if len(labels) == 0:
|
||||
return annotations
|
||||
for idx, label in enumerate(labels):
|
||||
annotation = np.zeros((1, 15))
|
||||
# bbox
|
||||
annotation[0, 0] = label[0] # x1
|
||||
annotation[0, 1] = label[1] # y1
|
||||
annotation[0, 2] = label[0] + label[2] # x2
|
||||
annotation[0, 3] = label[1] + label[3] # y2
|
||||
|
||||
# landmarks
|
||||
annotation[0, 4] = label[4] # l0_x
|
||||
annotation[0, 5] = label[5] # l0_y
|
||||
annotation[0, 6] = label[7] # l1_x
|
||||
annotation[0, 7] = label[8] # l1_y
|
||||
annotation[0, 8] = label[10] # l2_x
|
||||
annotation[0, 9] = label[11] # l2_y
|
||||
annotation[0, 10] = label[13] # l3_x
|
||||
annotation[0, 11] = label[14] # l3_y
|
||||
annotation[0, 12] = label[16] # l4_x
|
||||
annotation[0, 13] = label[17] # l4_y
|
||||
if (annotation[0, 4]<0):
|
||||
annotation[0, 14] = -1
|
||||
else:
|
||||
annotation[0, 14] = 1
|
||||
|
||||
annotations = np.append(annotations, annotation, axis=0)
|
||||
target = np.array(annotations)
|
||||
if self.preproc is not None:
|
||||
img, target = self.preproc(img, target)
|
||||
|
||||
return torch.from_numpy(img), target
|
||||
|
||||
def detection_collate(batch):
|
||||
"""Custom collate fn for dealing with batches of images that have a different
|
||||
number of associated object annotations (bounding boxes).
|
||||
|
||||
Arguments:
|
||||
batch: (tuple) A tuple of tensor images and lists of annotations
|
||||
|
||||
Return:
|
||||
A tuple containing:
|
||||
1) (tensor) batch of images stacked on their 0 dim
|
||||
2) (list of tensors) annotations for a given image are stacked on 0 dim
|
||||
"""
|
||||
targets = []
|
||||
imgs = []
|
||||
for _, sample in enumerate(batch):
|
||||
for _, tup in enumerate(sample):
|
||||
if torch.is_tensor(tup):
|
||||
imgs.append(tup)
|
||||
elif isinstance(tup, type(np.empty(0))):
|
||||
annos = torch.from_numpy(tup).float()
|
||||
targets.append(annos)
|
||||
|
||||
return (torch.stack(imgs, 0), targets)
|
||||
|
||||
save_path = '/ssd_1t/derron/yolov5-face/data/widerface/train'
|
||||
aa=WiderFaceDetection("/ssd_1t/derron/yolov5-face/data/widerface/widerface/train/label.txt")
|
||||
for i in range(len(aa.imgs_path)):
|
||||
print(i, aa.imgs_path[i])
|
||||
img = cv2.imread(aa.imgs_path[i])
|
||||
base_img = os.path.basename(aa.imgs_path[i])
|
||||
base_txt = os.path.basename(aa.imgs_path[i])[:-4] +".txt"
|
||||
save_img_path = os.path.join(save_path, base_img)
|
||||
save_txt_path = os.path.join(save_path, base_txt)
|
||||
with open(save_txt_path, "w") as f:
|
||||
height, width, _ = img.shape
|
||||
labels = aa.words[i]
|
||||
annotations = np.zeros((0, 14))
|
||||
if len(labels) == 0:
|
||||
continue
|
||||
for idx, label in enumerate(labels):
|
||||
annotation = np.zeros((1, 14))
|
||||
# bbox
|
||||
label[0] = max(0, label[0])
|
||||
label[1] = max(0, label[1])
|
||||
label[2] = min(width - 1, label[2])
|
||||
label[3] = min(height - 1, label[3])
|
||||
annotation[0, 0] = (label[0] + label[2] / 2) / width # cx
|
||||
annotation[0, 1] = (label[1] + label[3] / 2) / height # cy
|
||||
annotation[0, 2] = label[2] / width # w
|
||||
annotation[0, 3] = label[3] / height # h
|
||||
#if (label[2] -label[0]) < 8 or (label[3] - label[1]) < 8:
|
||||
# img[int(label[1]):int(label[3]), int(label[0]):int(label[2])] = 127
|
||||
# continue
|
||||
# landmarks
|
||||
annotation[0, 4] = label[4] / width # l0_x
|
||||
annotation[0, 5] = label[5] / height # l0_y
|
||||
annotation[0, 6] = label[7] / width # l1_x
|
||||
annotation[0, 7] = label[8] / height # l1_y
|
||||
annotation[0, 8] = label[10] / width # l2_x
|
||||
annotation[0, 9] = label[11] / height # l2_y
|
||||
annotation[0, 10] = label[13] / width # l3_x
|
||||
annotation[0, 11] = label[14] / height # l3_y
|
||||
annotation[0, 12] = label[16] / width # l4_x
|
||||
annotation[0, 13] = label[17] / height # l4_y
|
||||
str_label="0 "
|
||||
for i in range(len(annotation[0])):
|
||||
str_label =str_label+" "+str(annotation[0][i])
|
||||
str_label = str_label.replace('[', '').replace(']', '')
|
||||
str_label = str_label.replace(',', '') + '\n'
|
||||
f.write(str_label)
|
||||
cv2.imwrite(save_img_path, img)
|
||||
|
62
data/scripts/get_argoverse_hd.sh
Normal file
62
data/scripts/get_argoverse_hd.sh
Normal file
@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
|
||||
# Download command: bash data/scripts/get_argoverse_hd.sh
|
||||
# Train command: python train.py --data argoverse_hd.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /argoverse
|
||||
# /yolov5
|
||||
|
||||
# Download/unzip images
|
||||
d='../argoverse/' # unzip directory
|
||||
mkdir $d
|
||||
url=https://argoverse-hd.s3.us-east-2.amazonaws.com/
|
||||
f=Argoverse-HD-Full.zip
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background
|
||||
wait # finish background tasks
|
||||
|
||||
cd ../argoverse/Argoverse-1.1/
|
||||
ln -s tracking images
|
||||
|
||||
cd ../Argoverse-HD/annotations/
|
||||
|
||||
python3 - "$@" <<END
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
annotation_files = ["train.json", "val.json"]
|
||||
print("Converting annotations to YOLOv5 format...")
|
||||
|
||||
for val in annotation_files:
|
||||
a = json.load(open(val, "rb"))
|
||||
|
||||
label_dict = {}
|
||||
for annot in a['annotations']:
|
||||
img_id = annot['image_id']
|
||||
img_name = a['images'][img_id]['name']
|
||||
img_label_name = img_name[:-3] + "txt"
|
||||
|
||||
obj_class = annot['category_id']
|
||||
x_center, y_center, width, height = annot['bbox']
|
||||
x_center = (x_center + width / 2) / 1920. # offset and scale
|
||||
y_center = (y_center + height / 2) / 1200. # offset and scale
|
||||
width /= 1920. # scale
|
||||
height /= 1200. # scale
|
||||
|
||||
img_dir = "./labels/" + a['seq_dirs'][a['images'][annot['image_id']]['sid']]
|
||||
|
||||
Path(img_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if img_dir + "/" + img_label_name not in label_dict:
|
||||
label_dict[img_dir + "/" + img_label_name] = []
|
||||
|
||||
label_dict[img_dir + "/" + img_label_name].append(f"{obj_class} {x_center} {y_center} {width} {height}\n")
|
||||
|
||||
for filename in label_dict:
|
||||
with open(filename, "w") as file:
|
||||
for string in label_dict[filename]:
|
||||
file.write(string)
|
||||
|
||||
END
|
||||
|
||||
mv ./labels ../../Argoverse-1.1/
|
27
data/scripts/get_coco.sh
Normal file
27
data/scripts/get_coco.sh
Normal file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
# COCO 2017 dataset http://cocodataset.org
|
||||
# Download command: bash data/scripts/get_coco.sh
|
||||
# Train command: python train.py --data coco.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /coco
|
||||
# /yolov5
|
||||
|
||||
# Download/unzip labels
|
||||
d='../' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB
|
||||
echo 'Downloading' $url$f ' ...'
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
|
||||
|
||||
# Download/unzip images
|
||||
d='../coco/images' # unzip directory
|
||||
url=http://images.cocodataset.org/zips/
|
||||
f1='train2017.zip' # 19G, 118k images
|
||||
f2='val2017.zip' # 1G, 5k images
|
||||
f3='test2017.zip' # 7G, 41k images (optional)
|
||||
for f in $f1 $f2; do
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
|
||||
done
|
||||
wait # finish background tasks
|
139
data/scripts/get_voc.sh
Normal file
139
data/scripts/get_voc.sh
Normal file
@ -0,0 +1,139 @@
|
||||
#!/bin/bash
|
||||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
|
||||
# Download command: bash data/scripts/get_voc.sh
|
||||
# Train command: python train.py --data voc.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /VOC
|
||||
# /yolov5
|
||||
|
||||
start=$(date +%s)
|
||||
mkdir -p ../tmp
|
||||
cd ../tmp/
|
||||
|
||||
# Download/unzip images and labels
|
||||
d='.' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images
|
||||
f2=VOCtest_06-Nov-2007.zip # 438MB, 4953 images
|
||||
f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images
|
||||
for f in $f3 $f2 $f1; do
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
|
||||
done
|
||||
wait # finish background tasks
|
||||
|
||||
end=$(date +%s)
|
||||
runtime=$((end - start))
|
||||
echo "Completed in" $runtime "seconds"
|
||||
|
||||
echo "Splitting dataset..."
|
||||
python3 - "$@" <<END
|
||||
import xml.etree.ElementTree as ET
|
||||
import pickle
|
||||
import os
|
||||
from os import listdir, getcwd
|
||||
from os.path import join
|
||||
|
||||
sets=[('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
|
||||
|
||||
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
|
||||
|
||||
|
||||
def convert(size, box):
|
||||
dw = 1./(size[0])
|
||||
dh = 1./(size[1])
|
||||
x = (box[0] + box[1])/2.0 - 1
|
||||
y = (box[2] + box[3])/2.0 - 1
|
||||
w = box[1] - box[0]
|
||||
h = box[3] - box[2]
|
||||
x = x*dw
|
||||
w = w*dw
|
||||
y = y*dh
|
||||
h = h*dh
|
||||
return (x,y,w,h)
|
||||
|
||||
def convert_annotation(year, image_id):
|
||||
in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id))
|
||||
out_file = open('VOCdevkit/VOC%s/labels/%s.txt'%(year, image_id), 'w')
|
||||
tree=ET.parse(in_file)
|
||||
root = tree.getroot()
|
||||
size = root.find('size')
|
||||
w = int(size.find('width').text)
|
||||
h = int(size.find('height').text)
|
||||
|
||||
for obj in root.iter('object'):
|
||||
difficult = obj.find('difficult').text
|
||||
cls = obj.find('name').text
|
||||
if cls not in classes or int(difficult)==1:
|
||||
continue
|
||||
cls_id = classes.index(cls)
|
||||
xmlbox = obj.find('bndbox')
|
||||
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
|
||||
bb = convert((w,h), b)
|
||||
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
|
||||
|
||||
wd = getcwd()
|
||||
|
||||
for year, image_set in sets:
|
||||
if not os.path.exists('VOCdevkit/VOC%s/labels/'%(year)):
|
||||
os.makedirs('VOCdevkit/VOC%s/labels/'%(year))
|
||||
image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split()
|
||||
list_file = open('%s_%s.txt'%(year, image_set), 'w')
|
||||
for image_id in image_ids:
|
||||
list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n'%(wd, year, image_id))
|
||||
convert_annotation(year, image_id)
|
||||
list_file.close()
|
||||
|
||||
END
|
||||
|
||||
cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt >train.txt
|
||||
cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt
|
||||
|
||||
python3 - "$@" <<END
|
||||
|
||||
import shutil
|
||||
import os
|
||||
os.system('mkdir ../VOC/')
|
||||
os.system('mkdir ../VOC/images')
|
||||
os.system('mkdir ../VOC/images/train')
|
||||
os.system('mkdir ../VOC/images/val')
|
||||
|
||||
os.system('mkdir ../VOC/labels')
|
||||
os.system('mkdir ../VOC/labels/train')
|
||||
os.system('mkdir ../VOC/labels/val')
|
||||
|
||||
import os
|
||||
print(os.path.exists('../tmp/train.txt'))
|
||||
f = open('../tmp/train.txt', 'r')
|
||||
lines = f.readlines()
|
||||
|
||||
for line in lines:
|
||||
line = "/".join(line.split('/')[-5:]).strip()
|
||||
if (os.path.exists("../" + line)):
|
||||
os.system("cp ../"+ line + " ../VOC/images/train")
|
||||
|
||||
line = line.replace('JPEGImages', 'labels')
|
||||
line = line.replace('jpg', 'txt')
|
||||
if (os.path.exists("../" + line)):
|
||||
os.system("cp ../"+ line + " ../VOC/labels/train")
|
||||
|
||||
|
||||
print(os.path.exists('../tmp/2007_test.txt'))
|
||||
f = open('../tmp/2007_test.txt', 'r')
|
||||
lines = f.readlines()
|
||||
|
||||
for line in lines:
|
||||
line = "/".join(line.split('/')[-5:]).strip()
|
||||
if (os.path.exists("../" + line)):
|
||||
os.system("cp ../"+ line + " ../VOC/images/val")
|
||||
|
||||
line = line.replace('JPEGImages', 'labels')
|
||||
line = line.replace('jpg', 'txt')
|
||||
if (os.path.exists("../" + line)):
|
||||
os.system("cp ../"+ line + " ../VOC/labels/val")
|
||||
|
||||
END
|
||||
|
||||
rm -rf ../tmp # remove temporary directory
|
||||
echo "VOC download done."
|
176
data/train2yolo.py
Normal file
176
data/train2yolo.py
Normal file
@ -0,0 +1,176 @@
|
||||
import os.path
|
||||
import sys
|
||||
import torch
|
||||
import torch.utils.data as data
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
class WiderFaceDetection(data.Dataset):
|
||||
def __init__(self, txt_path, preproc=None):
|
||||
self.preproc = preproc
|
||||
self.imgs_path = []
|
||||
self.words = []
|
||||
f = open(txt_path, 'r')
|
||||
lines = f.readlines()
|
||||
isFirst = True
|
||||
labels = []
|
||||
for line in lines:
|
||||
line = line.rstrip()
|
||||
if line.startswith('#'):
|
||||
if isFirst is True:
|
||||
isFirst = False
|
||||
else:
|
||||
labels_copy = labels.copy()
|
||||
self.words.append(labels_copy)
|
||||
labels.clear()
|
||||
path = line[2:]
|
||||
path = txt_path.replace('label.txt', 'images/') + path
|
||||
self.imgs_path.append(path)
|
||||
else:
|
||||
line = line.split(' ')
|
||||
label = [float(x) for x in line]
|
||||
labels.append(label)
|
||||
|
||||
self.words.append(labels)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.imgs_path)
|
||||
|
||||
def __getitem__(self, index):
|
||||
img = cv2.imread(self.imgs_path[index])
|
||||
height, width, _ = img.shape
|
||||
|
||||
labels = self.words[index]
|
||||
annotations = np.zeros((0, 15))
|
||||
if len(labels) == 0:
|
||||
return annotations
|
||||
for idx, label in enumerate(labels):
|
||||
annotation = np.zeros((1, 15))
|
||||
# bbox
|
||||
annotation[0, 0] = label[0] # x1
|
||||
annotation[0, 1] = label[1] # y1
|
||||
annotation[0, 2] = label[0] + label[2] # x2
|
||||
annotation[0, 3] = label[1] + label[3] # y2
|
||||
|
||||
# landmarks
|
||||
annotation[0, 4] = label[4] # l0_x
|
||||
annotation[0, 5] = label[5] # l0_y
|
||||
annotation[0, 6] = label[7] # l1_x
|
||||
annotation[0, 7] = label[8] # l1_y
|
||||
annotation[0, 8] = label[10] # l2_x
|
||||
annotation[0, 9] = label[11] # l2_y
|
||||
annotation[0, 10] = label[13] # l3_x
|
||||
annotation[0, 11] = label[14] # l3_y
|
||||
annotation[0, 12] = label[16] # l4_x
|
||||
annotation[0, 13] = label[17] # l4_y
|
||||
if annotation[0, 4] < 0:
|
||||
annotation[0, 14] = -1
|
||||
else:
|
||||
annotation[0, 14] = 1
|
||||
|
||||
annotations = np.append(annotations, annotation, axis=0)
|
||||
target = np.array(annotations)
|
||||
if self.preproc is not None:
|
||||
img, target = self.preproc(img, target)
|
||||
|
||||
return torch.from_numpy(img), target
|
||||
|
||||
|
||||
def detection_collate(batch):
|
||||
"""Custom collate fn for dealing with batches of images that have a different
|
||||
number of associated object annotations (bounding boxes).
|
||||
|
||||
Arguments:
|
||||
batch: (tuple) A tuple of tensor images and lists of annotations
|
||||
|
||||
Return:
|
||||
A tuple containing:
|
||||
1) (tensor) batch of images stacked on their 0 dim
|
||||
2) (list of tensors) annotations for a given image are stacked on 0 dim
|
||||
"""
|
||||
targets = []
|
||||
imgs = []
|
||||
for _, sample in enumerate(batch):
|
||||
for _, tup in enumerate(sample):
|
||||
if torch.is_tensor(tup):
|
||||
imgs.append(tup)
|
||||
elif isinstance(tup, type(np.empty(0))):
|
||||
annos = torch.from_numpy(tup).float()
|
||||
targets.append(annos)
|
||||
|
||||
return torch.stack(imgs, 0), targets
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) == 1:
|
||||
print('Missing path to WIDERFACE train folder.')
|
||||
print('Run command: python3 train2yolo.py /path/to/original/widerface/train [/path/to/save/widerface/train]')
|
||||
exit(1)
|
||||
elif len(sys.argv) > 3:
|
||||
print('Too many arguments were provided.')
|
||||
print('Run command: python3 train2yolo.py /path/to/original/widerface/train [/path/to/save/widerface/train]')
|
||||
exit(1)
|
||||
original_path = sys.argv[1]
|
||||
|
||||
if len(sys.argv) == 2:
|
||||
if not os.path.isdir('widerface'):
|
||||
os.mkdir('widerface')
|
||||
if not os.path.isdir('widerface/train'):
|
||||
os.mkdir('widerface/train')
|
||||
|
||||
save_path = 'widerface/train'
|
||||
else:
|
||||
save_path = sys.argv[2]
|
||||
|
||||
if not os.path.isfile(os.path.join(original_path, 'label.txt')):
|
||||
print('Missing label.txt file.')
|
||||
exit(1)
|
||||
|
||||
aa = WiderFaceDetection(os.path.join(original_path, 'label.txt'))
|
||||
|
||||
for i in range(len(aa.imgs_path)):
|
||||
print(i, aa.imgs_path[i])
|
||||
img = cv2.imread(aa.imgs_path[i])
|
||||
base_img = os.path.basename(aa.imgs_path[i])
|
||||
base_txt = os.path.basename(aa.imgs_path[i])[:-4] + ".txt"
|
||||
save_img_path = os.path.join(save_path, base_img)
|
||||
save_txt_path = os.path.join(save_path, base_txt)
|
||||
with open(save_txt_path, "w") as f:
|
||||
height, width, _ = img.shape
|
||||
labels = aa.words[i]
|
||||
annotations = np.zeros((0, 14))
|
||||
if len(labels) == 0:
|
||||
continue
|
||||
for idx, label in enumerate(labels):
|
||||
annotation = np.zeros((1, 14))
|
||||
# bbox
|
||||
label[0] = max(0, label[0])
|
||||
label[1] = max(0, label[1])
|
||||
label[2] = min(width - 1, label[2])
|
||||
label[3] = min(height - 1, label[3])
|
||||
annotation[0, 0] = (label[0] + label[2] / 2) / width # cx
|
||||
annotation[0, 1] = (label[1] + label[3] / 2) / height # cy
|
||||
annotation[0, 2] = label[2] / width # w
|
||||
annotation[0, 3] = label[3] / height # h
|
||||
#if (label[2] -label[0]) < 8 or (label[3] - label[1]) < 8:
|
||||
# img[int(label[1]):int(label[3]), int(label[0]):int(label[2])] = 127
|
||||
# continue
|
||||
# landmarks
|
||||
annotation[0, 4] = label[4] / width # l0_x
|
||||
annotation[0, 5] = label[5] / height # l0_y
|
||||
annotation[0, 6] = label[7] / width # l1_x
|
||||
annotation[0, 7] = label[8] / height # l1_y
|
||||
annotation[0, 8] = label[10] / width # l2_x
|
||||
annotation[0, 9] = label[11] / height # l2_y
|
||||
annotation[0, 10] = label[13] / width # l3_x
|
||||
annotation[0, 11] = label[14] / height # l3_y
|
||||
annotation[0, 12] = label[16] / width # l4_x
|
||||
annotation[0, 13] = label[17] / height # l4_yca
|
||||
str_label = "0 "
|
||||
for i in range(len(annotation[0])):
|
||||
str_label = str_label + " " + str(annotation[0][i])
|
||||
str_label = str_label.replace('[', '').replace(']', '')
|
||||
str_label = str_label.replace(',', '') + '\n'
|
||||
f.write(str_label)
|
||||
cv2.imwrite(save_img_path, img)
|
88
data/val2yolo.py
Normal file
88
data/val2yolo.py
Normal file
@ -0,0 +1,88 @@
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
import shutil
|
||||
import sys
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
def xywh2xxyy(box):
|
||||
x1 = box[0]
|
||||
y1 = box[1]
|
||||
x2 = box[0] + box[2]
|
||||
y2 = box[1] + box[3]
|
||||
return x1, x2, y1, y2
|
||||
|
||||
|
||||
def convert(size, box):
|
||||
dw = 1. / (size[0])
|
||||
dh = 1. / (size[1])
|
||||
x = (box[0] + box[1]) / 2.0 - 1
|
||||
y = (box[2] + box[3]) / 2.0 - 1
|
||||
w = box[1] - box[0]
|
||||
h = box[3] - box[2]
|
||||
x = x * dw
|
||||
w = w * dw
|
||||
y = y * dh
|
||||
h = h * dh
|
||||
return x, y, w, h
|
||||
|
||||
|
||||
def wider2face(root, phase='val', ignore_small=0):
|
||||
data = {}
|
||||
with open('{}/{}/label.txt'.format(root, phase), 'r') as f:
|
||||
lines = f.readlines()
|
||||
for line in tqdm(lines):
|
||||
line = line.strip()
|
||||
if '#' in line:
|
||||
path = '{}/{}/images/{}'.format(root, phase, line.split()[-1])
|
||||
img = cv2.imread(path)
|
||||
height, width, _ = img.shape
|
||||
data[path] = list()
|
||||
else:
|
||||
box = np.array(line.split()[0:4], dtype=np.float32) # (x1,y1,w,h)
|
||||
if box[2] < ignore_small or box[3] < ignore_small:
|
||||
continue
|
||||
box = convert((width, height), xywh2xxyy(box))
|
||||
label = '0 {} {} {} {} -1 -1 -1 -1 -1 -1 -1 -1 -1 -1'.format(round(box[0], 4), round(box[1], 4),
|
||||
round(box[2], 4), round(box[3], 4))
|
||||
data[path].append(label)
|
||||
return data
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) == 1:
|
||||
print('Missing path to WIDERFACE folder.')
|
||||
print('Run command: python3 val2yolo.py /path/to/original/widerface [/path/to/save/widerface/val]')
|
||||
exit(1)
|
||||
elif len(sys.argv) > 3:
|
||||
print('Too many arguments were provided.')
|
||||
print('Run command: python3 val2yolo.py /path/to/original/widerface [/path/to/save/widerface/val]')
|
||||
exit(1)
|
||||
|
||||
root_path = sys.argv[1]
|
||||
if not os.path.isfile(os.path.join(root_path, 'val', 'label.txt')):
|
||||
print('Missing label.txt file.')
|
||||
exit(1)
|
||||
|
||||
if len(sys.argv) == 2:
|
||||
if not os.path.isdir('widerface'):
|
||||
os.mkdir('widerface')
|
||||
if not os.path.isdir('widerface/val'):
|
||||
os.mkdir('widerface/val')
|
||||
|
||||
save_path = 'widerface/val'
|
||||
else:
|
||||
save_path = sys.argv[2]
|
||||
|
||||
datas = wider2face(root_path, phase='val')
|
||||
for idx, data in enumerate(datas.keys()):
|
||||
pict_name = os.path.basename(data)
|
||||
out_img = f'{save_path}/{idx}.jpg'
|
||||
out_txt = f'{save_path}/{idx}.txt'
|
||||
shutil.copyfile(data, out_img)
|
||||
labels = datas[data]
|
||||
f = open(out_txt, 'w')
|
||||
for label in labels:
|
||||
f.write(label + '\n')
|
||||
f.close()
|
65
data/val2yolo_for_test.py
Normal file
65
data/val2yolo_for_test.py
Normal file
@ -0,0 +1,65 @@
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
import shutil
|
||||
from tqdm import tqdm
|
||||
|
||||
root = '/ssd_1t/derron/WiderFace'
|
||||
|
||||
|
||||
def xywh2xxyy(box):
|
||||
x1 = box[0]
|
||||
y1 = box[1]
|
||||
x2 = box[0] + box[2]
|
||||
y2 = box[1] + box[3]
|
||||
return (x1, x2, y1, y2)
|
||||
|
||||
|
||||
def convert(size, box):
|
||||
dw = 1. / (size[0])
|
||||
dh = 1. / (size[1])
|
||||
x = (box[0] + box[1]) / 2.0 - 1
|
||||
y = (box[2] + box[3]) / 2.0 - 1
|
||||
w = box[1] - box[0]
|
||||
h = box[3] - box[2]
|
||||
x = x * dw
|
||||
w = w * dw
|
||||
y = y * dh
|
||||
h = h * dh
|
||||
return (x, y, w, h)
|
||||
|
||||
|
||||
def wider2face(phase='val', ignore_small=0):
|
||||
data = {}
|
||||
with open('{}/{}/label.txt'.format(root, phase), 'r') as f:
|
||||
lines = f.readlines()
|
||||
for line in tqdm(lines):
|
||||
line = line.strip()
|
||||
if '#' in line:
|
||||
path = '{}/{}/images/{}'.format(root, phase, os.path.basename(line))
|
||||
img = cv2.imread(path)
|
||||
height, width, _ = img.shape
|
||||
data[path] = list()
|
||||
else:
|
||||
box = np.array(line.split()[0:4], dtype=np.float32) # (x1,y1,w,h)
|
||||
if box[2] < ignore_small or box[3] < ignore_small:
|
||||
continue
|
||||
box = convert((width, height), xywh2xxyy(box))
|
||||
label = '0 {} {} {} {} -1 -1 -1 -1 -1 -1 -1 -1 -1 -1'.format(round(box[0], 4), round(box[1], 4),
|
||||
round(box[2], 4), round(box[3], 4))
|
||||
data[path].append(label)
|
||||
return data
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
datas = wider2face('val')
|
||||
for idx, data in enumerate(datas.keys()):
|
||||
pict_name = os.path.basename(data)
|
||||
out_img = 'widerface/val/images/{}'.format(pict_name)
|
||||
out_txt = 'widerface/val/labels/{}.txt'.format(os.path.splitext(pict_name)[0])
|
||||
shutil.copyfile(data, out_img)
|
||||
labels = datas[data]
|
||||
f = open(out_txt, 'w')
|
||||
for label in labels:
|
||||
f.write(label + '\n')
|
||||
f.close()
|
21
data/voc.yaml
Normal file
21
data/voc.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
|
||||
# Train command: python train.py --data voc.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /VOC
|
||||
# /yolov5
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: bash data/scripts/get_voc.sh
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../VOC/images/train/ # 16551 images
|
||||
val: ../VOC/images/val/ # 4952 images
|
||||
|
||||
# number of classes
|
||||
nc: 20
|
||||
|
||||
# class names
|
||||
names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
|
||||
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ]
|
19
data/widerface.yaml
Normal file
19
data/widerface.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
|
||||
# Train command: python train.py --data voc.yaml
|
||||
# Default dataset location is next to /yolov5:
|
||||
# /parent_folder
|
||||
# /VOC
|
||||
# /yolov5
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: bash data/scripts/get_voc.sh
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: /mnt/Gpan/Mydata/pytorchPorject/yolov5-face/ccpd/train_detect
|
||||
val: /mnt/Gpan/Mydata/pytorchPorject/yolov5-face/ccpd/val_detect
|
||||
# number of classes
|
||||
nc: 2
|
||||
|
||||
# class names
|
||||
names: [ 'single','double']
|
Reference in New Issue
Block a user